mkdir -p bin/vg.metacompiler-linux-x86_64-zig-cc/ &&
taskset -c 0-1 zig cc -I. -I./include/ -fsanitize=address -lasan -Wall -Wno-unused-function -O0 -ggdb \
-std=c11 -D_DEFAULT_SOURCE \
- -include "common_api.h" \
+ -include "types.h" \
source/foundation/options.c \
source/foundation/logging.c \
source/foundation/allocator_heap.c \
--- /dev/null
+include include/
+include source/
-include include/
-include source/
-
event
{
name OPTIONS
name main
queue_size_m 40
flag MAIN
+ flag OPENGL
}
thread
{
name async
queue_size_m 40
+ flag ASYNC
}
thread
{
EVENT_CALL( OPTIONS ); \
_options_check_end();
-/* Types
- * ------------------------------------------------------------------------------------------------------------------ */
-typedef unsigned char u8;
-typedef char c8;
-typedef unsigned short int u16;
-typedef unsigned int u32;
-typedef unsigned long int u64;
-typedef char i8;
-typedef signed short int i16;
-typedef signed int i32;
-typedef signed long int i64;
-typedef float f32;
-typedef double f64;
-typedef unsigned char bool;
-
-#define NULL 0
-
#define BYTES_KB( X ) X*1024
#define BYTES_MB( X ) X*1024*1024
#define BYTES_GB( X ) X*1024*1024*1024
static inline f32 f32_max( f32 a, f32 b ){ return a > b? a: b; }
static inline i32 i32_min( i32 a, i32 b ){ return a < b? a: b; }
static inline i32 i32_max( i32 a, i32 b ){ return a > b? a: b; }
+static inline i32 u32_min( u32 a, u32 b ){ return a < b? a: b; }
+static inline i32 u32_max( u32 a, u32 b ){ return a > b? a: b; }
static inline i16 i16_min( i16 a, i16 b ){ return a < b? a: b; }
static inline i16 i16_max( i16 a, i16 b ){ return a > b? a: b; }
static inline f32 f32_clamp( f32 a, f32 min, f32 max ) { return f32_min( max, f32_max( a, min ) ); }
bool compare_buffers( const void *buffer_a, i32 a_max, const void *buffer_b, i32 b_max );
i32 buffer_first_index( const void *buffer, u8 match, i32 max_length );
i32 buffer_last_index( const void *buffer, u8 match, i32 max_length );
-void buffer_copy( const void *buffer_src, u32 src_length, void *buffer_dest, u32 dest_length );
+bool buffer_copy( const void *buffer_src, u32 src_length, void *buffer_dest, u32 dest_length );
struct stretchy_allocator
{
#define ASYNC_CRITICAL 0x1
#define ASYNC_NONBLOCKING 0x2
+// TODO THIS SHOULD BE PART OF THE METACOMPILER
+#define THREAD_FLAG_MAIN 0x1
+#define THREAD_FLAG_ASYNC 0x2
+#define THREAD_FLAG_OPENGL 0x4
+
+// TODO THIS SHOULD BE PART OF THE METACOMPILER
+#define ASYNC_GROUP_OPENGL 0x1
+#define ASYNC_GROUP_FIRST_LOAD 0x2
+
void _set_thread_id( enum thread_id id );
enum thread_id _get_thread_id(void);
bool _thread_has_flags( enum thread_id id, u32 flags );
}
else return INFINITY;
}
+
+
+
+/*
+
+ Time of collision (out t) of line a0 -> a0+b , vs b0->b1
+
+ a0: line start
+ b : line trace direction ( non-normalized )
+ b0: test segment start
+ b1: test segment end
+ bOneSided: One way or double sided colliders
+
+ Returns 1/0 hit
+
+*/
+static inline bool segment_segment_time_2d( f32 a0[2], f32 b[2], f32 b0[2], f32 d[2], f32 *t, bool one_sided )
+{
+ const f32 k_epsilon = 0.00001f;
+
+ f32 c[2];
+ f32 det, u;
+
+ /* Interior test */
+ det = v2_cross( b, d );
+ if( det <= 0 && one_sided )
+ return 0;
+
+ v2_sub( b0, a0, c );
+
+ /* Second edge */
+ u = v2_cross( c, b ) / det;
+ if( u < -k_epsilon || u > 1.0f+k_epsilon )
+ return 0;
+
+ /* First edge */
+ *t = v2_cross( c, d ) / det;
+ if( *t < -k_epsilon || *t > 1.0f+k_epsilon )
+ return 0;
+
+ return 1;
+}
+
+/*
+
+ Positional collision HELPER. Dont use if in a loop, calculate b/d outside
+
+ a0: line start
+ a1: line end
+ b0: line start 2
+ b1: line end 2
+ dest: collision location
+ bOneSided: One way or double sided colliders
+
+ returns: 1/0 Hit
+
+*/
+static inline bool segment_segment_intersect_2d( f32 a0[2], f32 a1[2], f32 b0[2], f32 b1[2], f32 dest[2], bool one_sided )
+{
+ f32 b[2];
+ f32 d[2];
+ f32 t;
+
+ /* Create trace vectors */
+ v2_sub( a1, a0, b );
+ v2_sub( b1, b0, d );
+
+ /* Find time */
+ if( !segment_segment_time_2d( a0, b, b0, d, &t, one_sided ) )
+ return 0;
+
+ /* Calculate position */
+ v2_muls( b, t, dest );
+ v2_add( a0, dest, dest );
+
+ return 1;
+}
+
+#pragma once
#include "vg/dep/glad.4.3/glad/glad.h"
#include "generated/shaders.h"
--- /dev/null
+typedef unsigned char u8;
+typedef char c8;
+typedef unsigned short int u16;
+typedef unsigned int u32;
+typedef unsigned long int u64;
+typedef char i8;
+typedef signed short int i16;
+typedef signed int i32;
+typedef signed long int i64;
+typedef float f32;
+typedef double f64;
+typedef unsigned char bool;
+#define NULL 0
+#include "common_api.h"
#include "console_core.h"
struct console_command
+#include "common_api.h"
#include "console_core.h"
#include "graphics_api.h"
#include "engine_interface.h"
+#include "common_api.h"
#include "glfw.h"
#include "engine_interface.h"
#include "input_api.h"
+#include "common_api.h"
+#include "common_thread_api.h"
#include <time.h>
#include <math.h>
+#include <threads.h>
#include "engine_interface.h"
#include "engine_backend.h"
#include "input_api.h"
// _steam_api.disabled = 1;
}
+
+i32 async_thread( void *_ )
+{
+ _set_thread_id( k_thread_async );
+ while( _task_queue_process( 1 ) ) {}
+ $log( $warning, {"Async thread exits"} );
+ return 0;
+}
+
i32 main( i32 argc, const c8 *argv[] )
{
VG_PRE_MAIN;
f64 next_frame_time = 0.0, fixed_time = 0.0, fixed_accumulator = 0.0;
/* ------------- */
+
EVENT_CALL( START );
+ thrd_t handle;
+ ASSERT_CRITICAL( thrd_create( &handle, async_thread, NULL ) == thrd_success );
+
L_new_frame:;
f64 now = glfwGetTime();
if( now < next_frame_time )
_engine.time = now;
next_frame_time = now + 1.0/_engine.framerate_limit;
+ while( _task_queue_process( 0 ) ) {}
+
/* normal update */
EVENT_CALL( ENGINE_NEW_FRAME );
+#include "common_api.h"
#include "opengl.h"
GLuint compile_opengl_subshader( GLint type, const c8 *sources[], u32 source_count, bool critical, const c8 *name )
+#include "common_api.h"
#include "opengl.h"
#include "graphics_api.h"
#include "engine_interface.h"
#include <stdlib.h>
+#include "common_api.h"
void *_heap_allocate( u64 size )
{
+#include "common_api.h"
+
void pool_init( struct pool_allocator *pool, struct pool_node *nodes, u16 node_count, struct pool_chain *full_chain )
{
pool->nodes = nodes;
+#include "common_api.h"
+
void queue_init( struct queue_allocator *queue, void *buffer, u32 buffer_size )
{
queue->buffer = buffer;
+#include "common_api.h"
+
void stack_init( struct stack_allocator *stack, void *buffer, u32 capacity, const c8 *debug_name )
{
zero_buffer( stack, sizeof(struct stack_allocator) );
+#include "common_api.h"
#define SMALL_SEGMENTS 4
void stretchy_init( struct stretchy_allocator *stretchy, u32 element_size )
{
+#include "common_api.h"
#include "common_thread_api.h"
#include "generated/threads.c"
#include <threads.h>
+#define ASYNC_DEBUG_GROUP_COUNTS
+
struct thread_context
{
u16 async_groups[ 8 ];
}
}
-static void _async_group_increment( u16 groups, i16 dir )
+static void _async_group_increment( u16 groups, i16 dir, const c8 *who )
{
if( !groups )
return;
_async.group_counts[i] += dir;
ASSERT_CRITICAL( _async.group_counts[i] >= 0 );
ASSERT_CRITICAL( _async.group_counts[i] <= 2048 );
+
+#if defined( ASYNC_DEBUG_GROUP_COUNTS )
$log( $warning, {"The task count for group "}, $unsigned(i), {" has "}, {dir>0? "increased": "decreased"},
- {" to "}, $unsigned(_async.group_counts[i]) );
+ {" to "}, $unsigned(_async.group_counts[i]), {" ("}, {who}, {")"} );
+#endif
}
}
ASSERT_CRITICAL( mtx_unlock( &_async.count_lock ) == thrd_success );
task->alloc_debug_info = debug_info;
task->buffer_size = buffer_size;
task->groups = _async_get_groups();
- _async_group_increment( task->groups, +1 );
+ _async_group_increment( task->groups, +1, task->alloc_debug_info );
ASSERT_CRITICAL( mtx_unlock( &queue->lock ) == thrd_success );
return task;
ASSERT_CRITICAL( queue->allocating_task == task );
if( fn ) task->fn = fn;
- else _async_group_increment( task->groups, -1 );
+ else _async_group_increment( task->groups, -1, task->alloc_debug_info );
queue->allocating_task = NULL;
ASSERT_CRITICAL( mtx_lock( &queue->lock ) == thrd_success );
struct task_queue *queue = _get_thread_task_queue( _get_thread_id() );
ASSERT_CRITICAL( mtx_lock( &queue->lock ) == thrd_success );
- while( blocking && (queue->count == 0) )
- ASSERT_CRITICAL( cnd_wait( &queue->work_signal, &queue->lock ) == thrd_success );
+ if( blocking )
+ {
+ while( queue->count == 0 )
+ ASSERT_CRITICAL( cnd_wait( &queue->work_signal, &queue->lock ) == thrd_success );
+ }
+ else
+ {
+ if( queue->count == 0)
+ {
+ ASSERT_CRITICAL( mtx_unlock( &queue->lock ) == thrd_success );
+ return 0;
+ }
+ }
struct task *task = queue_tail_data( &queue->queue );
ASSERT_CRITICAL( mtx_unlock( &queue->lock ) == thrd_success );
_async_push_groups( task->groups, 0 );
task->fn( task );
_async_pop_groups();
- _async_group_increment( task->groups, -1 );
+ _async_group_increment( task->groups, -1, task->alloc_debug_info );
}
ASSERT_CRITICAL( mtx_lock( &queue->lock ) == thrd_success );
+#include "common_api.h"
+
void zero_buffer( void *buffer, u32 length )
{
for( u32 i=0; i<length; i ++ )
u32 buffer_djb2( const void *buffer, i32 max_length )
{
u32 hash = 5381;
+ if( buffer == NULL )
+ return hash;
for( i32 i=0; max_length? (i<max_length): 1; i ++ )
{
u32 c = ((u8 *)buffer)[i];
return index;
}
-void buffer_copy( const void *buffer_src, u32 src_length, void *buffer_dest, u32 dest_length )
+bool buffer_copy( const void *buffer_src, u32 src_length, void *buffer_dest, u32 dest_length )
{
ASSERT_CRITICAL( dest_length );
((u8 *)buffer_dest)[i] = v;
if( (src_length == 0) && (v == 0) )
- return;
+ return 1;
}
if( src_length == 0 )
{
$log( $warning, {"Buffer copy (null terminated), was truncated"} );
((u8 *)buffer_dest)[ dest_length -1 ] = 0;
+ return 0;
}
+
+ return 1;
}
+#include "common_api.h"
#include <signal.h>
#include <execinfo.h>
#include <stdio.h>
+#include "common_api.h"
+
#include <dirent.h>
#include <stdio.h>
#include <sys/stat.h>
+#include "common_api.h"
#include <fileapi.h>
#include <shlobj.h>
#include <stdio.h>
struct directory *directory_open( const c8 *path, struct stack_allocator *stack )
{
+#error
}
enum directory_status directory_status( struct directory *directory )
{
+#error
return directory->status;
}
const c8 *directory_entry_name( struct directory *directory )
{
+#error
}
static bool directory_skip( struct directory *directory )
{
+#error
}
bool directory_next_entry( struct directory *directory )
{
+#error
}
enum directory_entry_type directory_entry_type( struct directory *directory )
{
+#error
}
void directory_close( struct directory *directory )
{
+#error
}
+#include "common_api.h"
#include <string.h>
#define KV_PAGE_COUNT 32
+#include "common_api.h"
#include <stdio.h>
#include <string.h>
#include <threads.h>
+#include "common_api.h"
#define MAX_OPTIONS 32
#define MAX_ARGUMENTS 32
+#include "common_api.h"
#include <stdio.h>
#include <errno.h>
#include <string.h>
}
void stream_open_buffer_read( struct stream *stream, const void *buffer, u32 buffer_length, u32 flags )
{
+ if( buffer_length )
+ ASSERT_CRITICAL( buffer );
stream->buffer_read = buffer;
stream_open_buffer_all( stream, buffer_length, flags | k_stream_read );
}
+#include "common_api.h"
#include <errno.h>
#include <string.h>
+#include "common_api.h"
#define TEMP_STACK_MAX 64
+
__thread struct stack_allocator _temp_stack;
__thread u32 _temp_offsets[ TEMP_STACK_MAX ];
__thread u32 _temp_stack_depth = 0;
+#include "common_api.h"
#include "graphics_api.h"
struct _font _font =
+#include "common_api.h"
#include "graphics_api.h"
struct graphics_target *_graphics_target;
+#include "common_api.h"
#include "graphics_api.h"
struct
+#include "common_api.h"
#include "graphics_api.h"
#include "input_api.h"
#include "maths/common_maths.h"
#include <stdlib.h>
+#include "common_api.h"
+
+struct stream folder_string;
+struct stream tripple_string;
+bool _setup_done = 0;
+
+const c8 *target_file_path = NULL;
+
+bool use_tsan = 0;
+bool use_asan = 0;
+bool shared = 0;
+bool no_pdb = 0;
+
+u32 optimise = 0;
+
+enum libc_version
+{
+ k_libc_version_native = 0,
+ k_libc_version_2_23,
+ k_libc_version_count,
+}
+libc = k_libc_version_native;
+const c8 *libc_names[] =
+{
+ [k_libc_version_native] = "",
+ [k_libc_version_2_23] = "2.23"
+};
+
+enum architecture
+{
+ k_architecture_native,
+ k_architecture_i386,
+ k_architecture_x86_64,
+ k_architecture_count,
+}
+arch = k_architecture_x86_64;
+const c8 *architecture_names[] =
+{
+ [k_architecture_native] = "native",
+ [k_architecture_i386] = "i386",
+ [k_architecture_x86_64] = "x86_64",
+};
+
+enum platform
+{
+ k_platform_native,
+ k_platform_windows,
+ k_platform_linux,
+ k_platform_count,
+}
+platform = k_platform_linux;
+const c8 *platform_names[] =
+{
+ [k_platform_native] = "native",
+ [k_platform_windows] = "windows",
+ [k_platform_linux] = "linux",
+};
+u64 processors = 4;
+
+void system_call( const c8 *call )
+{
+ $log( $shell, {call} );
+ ASSERT_CRITICAL( system(call) == 0 );
+}
+
struct
{
}
static _shaders;
+struct shell_command
+{
+ struct stream command;
+};
+struct
+{
+ struct stretchy_allocator shell;
+}
+static _commands;
+
struct block_context
{
enum target_type
if( keyvalues_read_u32s( kvs, block, "queue_size_m", NULL, &queue_size_m, 1 ) )
$v_string( &_threads.structures, {" .queue_size_m = "}, $unsigned(queue_size_m), {",\n"} );
+
+ u32 flag_kv = keyvalues_get( kvs, block, "flag", 0 );
+ if( flag_kv )
+ $v_string( &_threads.structures, {" .flags = "} );
+
+ u32 flag_count = 0;
+ while( flag_kv )
+ {
+ const c8 *flag_name = keyvalues_value( kvs, flag_kv, NULL );
+ $v_string( &_threads.structures, {flag_count? "|": ""}, {"THREAD_FLAG_"}, {flag_name} );
+
+ flag_kv = keyvalues_get( kvs, flag_kv, "flag", 1 );
+ flag_count ++;
+ }
+ if( flag_count )
+ $v_string( &_threads.structures, {",\n"} );
+
$v_string( &_threads.structures, {" },\n"} );
}
else if( compare_buffers( block_key, 0, "event", 0 ))
_fatal_exit();
}
}
+ else if( compare_buffers( block_key, 0, "symlink", 0 ) )
+ {
+ const c8 *source = keyvalues_read_string( kvs, block, "source", NULL );
+ const c8 *name = keyvalues_read_string( kvs, block, "name", NULL );
+ ASSERT_CRITICAL( source );
+
+ struct shell_command *command = stretchy_append( &_commands.shell );
+ stream_open_auto( &command->command, k_stream_null_terminate );
+ $v_string( &command->command, {"ln -snf "}, {context.folder}, {"/"}, {source}, {" "}, {string_get( &folder_string )}, {"/"}, { name? name: source } );
+ }
else
{
$log( $warning, {"We don't have a compiler definition for block '"}, {block_key}, {"'"} );
const c8 *key = keyvalues_key( kvs, kv, NULL );
const c8 *value = keyvalues_value( kvs, kv, NULL );
+ /* MUST COME FIRST */
+ if( _setup_done == 0 )
+ {
+ if( compare_buffers( key, 0, "name", 0 ) )
+ {
+ buffer_copy( value, 0, _metacompiler.project_name, sizeof(_metacompiler.project_name) );
+
+ c8 *output_folder = realpath( target_file_path, NULL );
+ if( !output_folder )
+ {
+ $log( $fatal, {"'"}, {target_file_path}, {"' "}, $errno() );
+ _fatal_exit();
+ }
+
+ i32 s = buffer_last_index( output_folder, '/', 0 );
+ if( s != -1 )
+ output_folder[s] = '\0';
+
+
+ stream_open_auto( &tripple_string, k_stream_null_terminate );
+ $v_string( &tripple_string, {architecture_names[arch]}, {"-"}, {platform_names[platform]} );
+ if( platform == k_platform_linux )
+ {
+ $v_string( &tripple_string, {"-gnu"} );
+ if( libc != k_libc_version_native )
+ $v_string( &tripple_string, {"."}, {libc_names[libc]} );
+ }
+
+ stream_open_auto( &folder_string, k_stream_null_terminate );
+ $v_string( &folder_string, {output_folder}, {"/bin/"}, {_metacompiler.project_name}, {"-"}, {string_get(&tripple_string)} );
+
+ free(output_folder);
+
+ u32 temp_frame = _start_temporary_frame();
+ {
+ struct stream command_string;
+ stream_open_stack( &command_string, _temporary_stack_allocator(), k_stream_null_terminate );
+ $v_string( &command_string, {"mkdir -p "}, {string_get( &folder_string )} );
+ system_call( string_get( &command_string ) );
+ }
+ _end_temporary_frame( temp_frame );
+
+ _setup_done = 1;
+ }
+ }
+
+ ASSERT_CRITICAL( _setup_done );
+
/* availible in all targets */
if( compare_buffers( key, 0, "enable", 0 ) )
_metacompiler.enabled_features[ context.feature_count ++ ] = keyvalues_value( kvs, kv, NULL );
if( context.target == k_target_main )
{
- if( compare_buffers( key, 0, "name", 0 ) )
- buffer_copy( value, 0, _metacompiler.project_name, sizeof(_metacompiler.project_name) );
+ if( compare_buffers( key, 0, "object_type", 0 ) )
+ {
+ if( compare_buffers( value, 0, "executable", 0 ) ) shared = 0;
+ else if( compare_buffers( value, 0, "shared", 0 ) ) shared = 1;
+ else
+ {
+ $log( $fatal, {"Unknown output type '"}, {value}, {"'"} );
+ _fatal_exit();
+ }
+ }
if( compare_buffers( key, 0, "add", 0 ) )
$v_string( &_metacompiler.source_list, {" "}, {context.folder}, {"/"}, {value}, {" \\\n"} );
_end_temporary_frame( temp_frame );
}
-void system_call( const c8 *call )
-{
- $log( $shell, {call} );
- ASSERT_CRITICAL( system(call) == 0 );
-}
-
static i32 enum_read( const c8 *options[], u32 option_count, const c8 *input )
{
for( i32 i=0; i<option_count; i ++ )
return -1;
}
-bool use_tsan = 0;
-bool use_asan = 0;
-bool shared = 0;
-bool no_pdb = 0;
-
-u32 optimise = 0;
-
-enum libc_version
-{
- k_libc_version_native = 0,
- k_libc_version_2_23,
- k_libc_version_count,
-}
-libc = k_libc_version_native;
-const c8 *libc_names[] =
-{
- [k_libc_version_native] = "",
- [k_libc_version_2_23] = "2.23"
-};
-
-enum architecture
-{
- k_architecture_native,
- k_architecture_i386,
- k_architecture_x86_64,
- k_architecture_count,
-}
-arch = k_architecture_x86_64;
-const c8 *architecture_names[] =
-{
- [k_architecture_native] = "native",
- [k_architecture_i386] = "i386",
- [k_architecture_x86_64] = "x86_64",
-};
-
-enum platform
-{
- k_platform_native,
- k_platform_windows,
- k_platform_linux,
- k_platform_count,
-}
-platform = k_platform_linux;
-const c8 *platform_names[] =
-{
- [k_platform_native] = "native",
- [k_platform_windows] = "windows",
- [k_platform_linux] = "linux",
-};
-
-u64 processors = 4;
-const c8 *target_file_path = NULL;
-
void _metacompiler_options(void)
{
const c8 *arg;
{
VG_PRE_MAIN;
- buffer_copy( "project", 0, _metacompiler.project_name, sizeof(_metacompiler.project_name) );
-
u32 options = k_stream_null_terminate;
stream_open_auto( &_metacompiler.source_list, options );
stream_open_auto( &_metacompiler.include_path_list, options );
stream_open_auto( &_shaders.uniform_func_protos, options );
stretchy_init( &_hooks.events, sizeof( struct hook_event ) );
stretchy_init( &_shaders.shaders, sizeof( struct shader ) );
+ stretchy_init( &_commands.shell, sizeof( struct shell_command ) );
if( platform == k_platform_linux ) _metacompiler.enabled_features[0] = "linux";
if( platform == k_platform_windows ) _metacompiler.enabled_features[0] = "windows";
context.feature_count = 1;
_append_kv_list( target_file_path, context );
+ ASSERT_CRITICAL( _setup_done );
+
system_call( "mkdir -p generated" );
$log( $info, {"Generating input header"} );
/* main */
u32 temp_frame = _start_temporary_frame();
{
- c8 *output_folder = realpath( target_file_path, NULL );
- if( !output_folder )
- {
- $log( $fatal, {"'"}, {target_file_path}, {"' "}, $errno() );
- _fatal_exit();
- }
-
- i32 s = buffer_last_index( output_folder, '/', 0 );
- if( s != -1 )
- output_folder[s] = '\0';
-
- struct stream tripple_string;
- stream_open_stack( &tripple_string, _temporary_stack_allocator(), k_stream_null_terminate );
- $v_string( &tripple_string, {architecture_names[arch]}, {"-"}, {platform_names[platform]} );
- if( platform == k_platform_linux )
- {
- $v_string( &tripple_string, {"-gnu"} );
- if( libc != k_libc_version_native )
- $v_string( &tripple_string, {"."}, {libc_names[libc]} );
- }
-
- struct stream folder_string;
- stream_open_stack( &folder_string, _temporary_stack_allocator(), k_stream_null_terminate );
- $v_string( &folder_string, {output_folder}, {"/bin/"}, {_metacompiler.project_name}, {"-"}, {string_get(&tripple_string)} );
-
- free(output_folder);
-
struct stream command_string;
- stream_open_stack( &command_string, _temporary_stack_allocator(), k_stream_null_terminate );
- $v_string( &command_string, {"mkdir -p "}, {string_get( &folder_string )} );
- system_call( string_get( &command_string ) );
-
-
stream_open_stack( &command_string, _temporary_stack_allocator(), k_stream_null_terminate );
$v_string( &command_string, {"taskset -c 0-"}, $unsigned(processors),
{" zig cc -Wall -Wno-unused-function -std=c11 -D_DEFAULT_SOURCE -lm \\\n"},
- {" -include \"common_api.h\" \\\n"} );
+ {" -include \"types.h\" \\\n"} );
+#if 0
if( use_tsan ) $v_string( &command_string, {" -fsanitize=thread -lasan -L/lib/x86_64-linux-gnu \\\n"} );
if( use_asan ) $v_string( &command_string, {" -fsanitize=address -lasan -L/lib/x86_64-linux-gnu \\\n"} );
+#else
+ if( use_tsan ) $v_string( &command_string, {" -fsanitize=thread -lasan -L/usr/lib \\\n"} );
+ if( use_asan ) $v_string( &command_string, {" -fsanitize=address -lasan -L/usr/lib \\\n"} );
+#endif
$v_string( &command_string, { " -O" }, $unsigned( optimise ) );
if( optimise == 0 ) $v_string( &command_string, { " -ggdb" } );
$v_string( &command_string, {" \\\n"} );
- $v_string( &command_string, {" -target "}, {string_get(&tripple_string)}, {" "} );
+ $v_string( &command_string, {" -target "}, {string_get(&tripple_string)}, {shared? " -shared -fPIC ": " "} );
if( platform == k_platform_windows )
{
if( !shared )
_end_temporary_frame( temp_frame2 );
}
+
+ for( u32 i=0; i< stretchy_count( &_commands.shell ); i ++ )
+ {
+ struct shell_command *command = stretchy_get( &_commands.shell, i );
+ system_call( string_get( &command->command ) );
+ }
}
_end_temporary_frame( temp_frame );
return 0;
--- /dev/null
+#include "common_api.h"
+#include "maths/common_maths.h"
+#include "vg_camera.h"
+
+void vg_camera_lerp_angles( f32 a[3], f32 b[3], f32 t, f32 d[3] )
+{
+ d[0] = f32_radian_lerp( a[0], b[0], t );
+ d[1] = f32_lerp( a[1], b[1], t );
+ d[2] = f32_lerp( a[2], b[2], t );
+}
+
+/* lerp position, fov, and angles */
+void vg_camera_lerp( struct vg_camera *a, struct vg_camera *b, f32 t, struct vg_camera *d )
+{
+ v3_lerp( a->pos, b->pos, t, d->pos );
+ vg_camera_lerp_angles( a->angles, b->angles, t, d->angles );
+ d->fov = f32_lerp( a->fov, b->fov, t );
+}
+
+void vg_camera_copy( struct vg_camera *a, struct vg_camera *d )
+{
+ v3_copy( a->pos, d->pos );
+ v3_copy( a->angles, d->angles );
+ d->fov = a->fov;
+}
+
+void vg_m4x3_transform_camera( f32 m[4][3], struct vg_camera *cam )
+{
+ m4x3_mulv( m, cam->pos, cam->pos );
+
+ f32 v0[3];
+ v3_angles_to_vector( cam->angles, v0 );
+ m3x3_mulv( m, v0, v0 );
+ v3_normalize( v0 );
+ v3_vector_to_angles( v0, cam->angles );
+}
+
+/*
+ * 1) [angles, pos] -> transform
+ */
+void vg_camera_update_transform( struct vg_camera *cam )
+{
+ f32 qyaw[4], qpitch[4], qroll[4], qcam[4];
+ q_axis_angle( qyaw, (f32[]){ 0.0f, 1.0f, 0.0f }, -cam->angles[0] );
+ q_axis_angle( qpitch, (f32[]){ 1.0f, 0.0f, 0.0f }, -cam->angles[1] );
+ q_axis_angle( qroll, (f32[]){ 0.0f, 0.0f, 1.0f }, -cam->angles[2] );
+
+ q_mul( qyaw, qpitch, qcam );
+ q_mul( qcam, qroll, qcam );
+ q_m3x3( qcam, cam->transform );
+ v3_copy( cam->pos, cam->transform[3] );
+}
+
+/*
+ * 2) [transform] -> transform_inverse, view matrix
+ */
+void vg_camera_update_view( struct vg_camera *cam )
+{
+ m4x4_copy( cam->mtx.v, cam->mtx_prev.v );
+ m4x3_invert_affine( cam->transform, cam->transform_inverse );
+ m4x3_expand( cam->transform_inverse, cam->mtx.v );
+}
+
+/*
+ * 3) [fov,nearz,farz] -> projection matrix
+ */
+void vg_camera_update_projection( struct vg_camera *cam, f32 vw, f32 vh )
+{
+ m4x4_copy( cam->mtx.p, cam->mtx_prev.p );
+ m4x4_projection( cam->mtx.p, cam->fov, (f32)vw / (f32)vh, cam->nearz, cam->farz );
+}
+
+/*
+ * 4) [projection matrix, view matrix] -> previous pv, new pv
+ */
+void vg_camera_finalize( struct vg_camera *cam )
+{
+ m4x4_copy( cam->mtx.pv, cam->mtx_prev.pv );
+ m4x4_mul( cam->mtx.p, cam->mtx.v, cam->mtx.pv );
+}
--- /dev/null
+#pragma once
+
+struct vg_camera
+{
+ /* Input */
+ f32 angles[3];
+ f32 pos[3];
+ f32 fov, nearz, farz;
+
+ /* Output */
+ f32 transform[4][3],
+ transform_inverse[4][3];
+
+ struct vg_camera_mtx
+ {
+ f32 p[4][4],
+ v[4][4],
+ pv[4][4];
+ }
+ mtx,
+ mtx_prev;
+};
+
+void vg_camera_lerp_angles( f32 a[3], f32 b[3], f32 t, f32 d[3] );
+
+/* lerp position, fov, and angles */
+void vg_camera_lerp( struct vg_camera *a, struct vg_camera *b, f32 t, struct vg_camera *d );
+void vg_camera_copy( struct vg_camera *a, struct vg_camera *d );
+void vg_m4x3_transform_camera( f32 m[4][3], struct vg_camera *cam );
+
+/*
+ * 1) [angles, pos] -> transform
+ */
+void vg_camera_update_transform( struct vg_camera *cam );
+
+/*
+ * 2) [transform] -> transform_inverse, view matrix
+ */
+void vg_camera_update_view( struct vg_camera *cam );
+
+/*
+ * 3) [fov,nearz,farz] -> projection matrix
+ */
+void vg_camera_update_projection( struct vg_camera *cam, f32 vw, f32 vh );
+
+/*
+ * 4) [projection matrix, view matrix] -> previous pv, new pv
+ */
+void vg_camera_finalize( struct vg_camera *cam );
--- /dev/null
+add vg_camera.c
+include ""
--- /dev/null
+out vec4 FragColor;
+
+in vec4 s_colour;
+
+void main()
+{
+ FragColor = s_colour;
+}
--- /dev/null
+layout (location=0) in vec3 a_co;
+layout (location=1) in vec4 a_colour;
+
+out vec4 s_colour;
+
+void main()
+{
+ vec4 vert_pos = uPv * vec4( a_co, 1.0 );
+ s_colour = a_colour;
+ gl_Position = vert_pos;
+}
--- /dev/null
+#include "opengl.h"
+#include "common_api.h"
+#include "common_thread_api.h"
+#include "maths/common_maths.h"
+#include <stddef.h>
+
+struct vg_lines
+{
+ struct stack_allocator vertex_stack;
+ struct vg_lines_vert
+ {
+ f32 co[3];
+ u32 colour;
+ }
+ *vertex_buffer;
+ u32 vertex_count;
+
+ GLuint vao, vbo;
+}
+static _vg_lines;
+
+#define VG_LINES_MAX_VERTS 50000
+
+void _vg_lines_init(void)
+{
+ ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_OPENGL ) );
+
+ _vg_lines.vertex_buffer = _heap_allocate( VG_LINES_MAX_VERTS*sizeof(struct vg_lines_vert) );
+ glGenVertexArrays( 1, &_vg_lines.vao );
+ glGenBuffers( 1, &_vg_lines.vbo );
+ glBindVertexArray( _vg_lines.vao );
+ glBindBuffer( GL_ARRAY_BUFFER, _vg_lines.vbo );
+ glBufferData( GL_ARRAY_BUFFER, VG_LINES_MAX_VERTS*sizeof(struct vg_lines_vert), NULL, GL_DYNAMIC_DRAW );
+ glBindVertexArray( _vg_lines.vao );
+
+ /* Pointers */
+ glVertexAttribPointer(
+ 0,
+ 3,
+ GL_FLOAT,
+ GL_FALSE,
+ sizeof( struct vg_lines_vert ),
+ (void *)0
+ );
+ glEnableVertexAttribArray( 0 );
+
+ glVertexAttribPointer(
+ 1,
+ 4,
+ GL_UNSIGNED_BYTE,
+ GL_TRUE,
+ sizeof( struct vg_lines_vert ),
+ (void*)(offsetof( struct vg_lines_vert, colour ))
+ );
+ glEnableVertexAttribArray( 1 );
+}
+
+void vg_lines_draw( f32 pv[4][4] )
+{
+ _shader_bind( k_shader_debug_lines );
+ _shader_debug_lines_uPv( pv );
+
+ glBindVertexArray( _vg_lines.vao );
+ glBindBuffer( GL_ARRAY_BUFFER, _vg_lines.vbo );
+ glBufferSubData( GL_ARRAY_BUFFER, 0, _vg_lines.vertex_count*sizeof(struct vg_lines_vert), _vg_lines.vertex_buffer );
+
+ glEnable( GL_BLEND );
+ glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
+ glBlendEquation( GL_FUNC_ADD );
+ glDrawArrays( GL_LINES, 0, _vg_lines.vertex_count );
+ glDisable( GL_BLEND );
+}
+
+void vg_lines_clear(void)
+{
+ _vg_lines.vertex_count = 0;
+}
+
+void vg_line2( f32 from[3], f32 to[3], u32 fc, u32 tc )
+{
+ if( _vg_lines.vertex_count < VG_LINES_MAX_VERTS )
+ {
+ struct vg_lines_vert *v = &_vg_lines.vertex_buffer[ _vg_lines.vertex_count ];
+
+ v3_copy( from, v[0].co );
+ v3_copy( to, v[1].co );
+
+ v[0].colour = fc;
+ v[1].colour = tc;
+
+ _vg_lines.vertex_count += 2;
+ }
+}
+
+void vg_line( f32 from[3], f32 to[3], u32 colour )
+{
+ vg_line2( from, to, colour, colour );
+}
+
+void vg_line_arrow( f32 co[3], f32 dir[3], f32 size, u32 colour )
+{
+ f32 p1[3], tx[3], ty[3], p2[3], p3[3];
+ v3_muladds( co, dir, size, p1 );
+ v3_tangent_basis( dir, tx, ty );
+
+ v3_muladds( p1, dir, -size * 0.125f, p2 );
+ v3_muladds( p2, ty, size * 0.125f, p3 );
+ v3_muladds( p2, ty, -size * 0.125f, p2 );
+
+ vg_line( co, p1, colour );
+ vg_line( p1, p2, colour );
+ vg_line( p1, p3, colour );
+}
+
+void vg_line_box_verts( f32 box[2][3], f32 verts[8][3] )
+{
+ for( u32 i=0; i<8; i++ )
+ for( u32 j=0; j<3; j++ )
+ verts[i][j] = i&(0x1<<j)? box[1][j]: box[0][j];
+}
+
+void vg_line_mesh( f32 verts[][3], u32 indices[][2], u32 indice_count, u32 colour )
+{
+ for( u32 i=0; i<indice_count; i++ )
+ vg_line( verts[indices[i][0]], verts[indices[i][1]], colour );
+}
+
+void vg_line_boxf( f32 box[2][3], u32 colour )
+{
+ f32 verts[8][3];
+ vg_line_box_verts( box, verts );
+ u32 indices[][2] = {{0,1},{1,3},{3,2},{2,0},
+ {4,5},{5,7},{7,6},{6,4},
+ {4,0},{5,1},{6,2},{7,3}};
+
+ vg_line_mesh( verts, indices, ARRAY_COUNT(indices), colour );
+}
+
+void vg_line_boxf_transformed( f32 m[4][3], f32 box[2][3], u32 colour )
+{
+ f32 verts[8][3];
+ vg_line_box_verts( box, verts );
+
+ for( u32 i=0; i<8; i++ )
+ m4x3_mulv( m, verts[i], verts[i] );
+
+ u32 indices[][2] = {{0,1},{1,3},{3,2},{2,0},
+ {4,5},{5,7},{7,6},{6,4},
+ {4,0},{5,1},{6,2},{7,3}};
+ vg_line_mesh( verts, indices, ARRAY_COUNT(indices), colour );
+}
+
+void vg_line_cross( f32 pos[3], u32 colour, f32 scale )
+{
+ f32 p0[3], p1[3];
+ v3_add( (f32[]){ scale,0.0f,0.0f}, pos, p0 );
+ v3_add( (f32[]){-scale,0.0f,0.0f}, pos, p1 );
+ vg_line( p0, p1, colour );
+ v3_add( (f32[]){0.0f, scale,0.0f}, pos, p0 );
+ v3_add( (f32[]){0.0f,-scale,0.0f}, pos, p1 );
+ vg_line( p0, p1, colour );
+ v3_add( (f32[]){0.0f,0.0f, scale}, pos, p0 );
+ v3_add( (f32[]){0.0f,0.0f,-scale}, pos, p1 );
+ vg_line( p0, p1, colour );
+}
+
+void vg_line_point( f32 pt[3], f32 size, u32 colour )
+{
+ f32 box[][3] =
+ {
+ { pt[0]-size, pt[1]-size, pt[2]-size },
+ { pt[0]+size, pt[1]+size, pt[2]+size }
+ };
+
+ vg_line_boxf( box, colour );
+}
+
+
+void vg_line_sphere( f32 m[4][3], f32 radius, u32 colour )
+{
+ f32 ly[3] = { 0.0f, 0.0f, radius },
+ lx[3] = { 0.0f, radius, 0.0f },
+ lz[3] = { 0.0f, 0.0f, radius };
+
+ for( int i=0; i<16; i++ )
+ {
+ f32 t = ((f32)(i+1) * (1.0f/16.0f)) * VG_PIf * 2.0f,
+ s = sinf(t),
+ c = cosf(t);
+
+ f32 py[3] = { s*radius, 0.0f, c*radius },
+ px[3] = { s*radius, c*radius, 0.0f },
+ pz[3] = { 0.0f, s*radius, c*radius };
+
+ f32 p0[3], p1[3], p2[3], p3[3], p4[3], p5[3];
+ m4x3_mulv( m, py, p0 );
+ m4x3_mulv( m, ly, p1 );
+ m4x3_mulv( m, px, p2 );
+ m4x3_mulv( m, lx, p3 );
+ m4x3_mulv( m, pz, p4 );
+ m4x3_mulv( m, lz, p5 );
+
+ vg_line( p0, p1, colour == 0x00? 0xff00ff00: colour );
+ vg_line( p2, p3, colour == 0x00? 0xff0000ff: colour );
+ vg_line( p4, p5, colour == 0x00? 0xffff0000: colour );
+
+ v3_copy( py, ly );
+ v3_copy( px, lx );
+ v3_copy( pz, lz );
+ }
+}
+
+void vg_line_capsule( f32 m[4][3], f32 radius, f32 h, u32 colour )
+{
+ f32 s0 = sinf(0.0f)*radius,
+ c0 = cosf(0.0f)*radius;
+
+ f32 p0[3], p1[3], up[3], right[3], forward[3];
+ m3x3_mulv( m, (f32[]){0.0f,1.0f,0.0f}, up );
+ m3x3_mulv( m, (f32[]){1.0f,0.0f,0.0f}, right );
+ m3x3_mulv( m, (f32[]){0.0f,0.0f,-1.0f}, forward );
+ v3_muladds( m[3], up, -h*0.5f+radius, p0 );
+ v3_muladds( m[3], up, h*0.5f-radius, p1 );
+
+ f32 a0[3], a1[3], b0[3], b1[3];
+ v3_muladds( p0, right, radius, a0 );
+ v3_muladds( p1, right, radius, a1 );
+ v3_muladds( p0, forward, radius, b0 );
+ v3_muladds( p1, forward, radius, b1 );
+ vg_line( a0, a1, colour );
+ vg_line( b0, b1, colour );
+
+ v3_muladds( p0, right, -radius, a0 );
+ v3_muladds( p1, right, -radius, a1 );
+ v3_muladds( p0, forward, -radius, b0 );
+ v3_muladds( p1, forward, -radius, b1 );
+ vg_line( a0, a1, colour );
+ vg_line( b0, b1, colour );
+
+ for( i32 i=0; i<16; i++ )
+ {
+ f32 t = ((f32)(i+1) * (1.0f/16.0f)) * VG_PIf * 2.0f,
+ s1 = sinf(t)*radius,
+ c1 = cosf(t)*radius;
+
+ f32 e0[3] = { s0, 0.0f, c0 },
+ e1[3] = { s1, 0.0f, c1 },
+ e2[3] = { s0, c0, 0.0f },
+ e3[3] = { s1, c1, 0.0f },
+ e4[3] = { 0.0f, c0, s0 },
+ e5[3] = { 0.0f, c1, s1 };
+
+ m3x3_mulv( m, e0, e0 );
+ m3x3_mulv( m, e1, e1 );
+ m3x3_mulv( m, e2, e2 );
+ m3x3_mulv( m, e3, e3 );
+ m3x3_mulv( m, e4, e4 );
+ m3x3_mulv( m, e5, e5 );
+
+ v3_add( p0, e0, a0 );
+ v3_add( p0, e1, a1 );
+ v3_add( p1, e0, b0 );
+ v3_add( p1, e1, b1 );
+
+ vg_line( a0, a1, colour );
+ vg_line( b0, b1, colour );
+
+ if( c0 < 0.0f )
+ {
+ v3_add( p0, e2, a0 );
+ v3_add( p0, e3, a1 );
+ v3_add( p0, e4, b0 );
+ v3_add( p0, e5, b1 );
+ }
+ else
+ {
+ v3_add( p1, e2, a0 );
+ v3_add( p1, e3, a1 );
+ v3_add( p1, e4, b0 );
+ v3_add( p1, e5, b1 );
+ }
+
+ vg_line( a0, a1, colour );
+ vg_line( b0, b1, colour );
+
+ s0 = s1;
+ c0 = c1;
+ }
+}
--- /dev/null
+#define LINE_RED 0xff0000ff
+#define LINE_GREEN 0xff00ff00
+#define LINE_BLUE 0xffff0000
+#define LINE_WHITE 0xffffffff
+#define LINE_BLACK 0xff000000
+#define LINE_CLEAR 0x00ffffff
+#define LINE_PINK 0xffff00ff
+#define LINE_YELOW 0xff00ffff
+#define LINE_CYAN 0xffffff00
+#define LINE_NONE 0x00000000
+
+void vg_lines_clear(void);
+void vg_lines_draw( f32 pv[4][4] );
+
+void vg_line_capsule( f32 m[4][3], f32 radius, f32 h, u32 colour );
+void vg_line_sphere( f32 m[4][3], f32 radius, u32 colour );
+void vg_line_point( f32 pt[3], f32 size, u32 colour );
+void vg_line_boxf_transformed( f32 m[4][3], f32 box[2][3], u32 colour );
+void vg_line_boxf( f32 box[2][3], u32 colour );
+void vg_line_mesh( f32 verts[][3], u32 indices[][2], u32 indice_count, u32 colour );
+void vg_line_box_verts( f32 box[2][3], f32 verts[8][3] );
+void vg_line_cross( f32 pos[3], u32 colour, f32 scale );
+void vg_line_arrow( f32 co[3], f32 dir[3], f32 size, u32 colour );
+void vg_line( f32 from[3], f32 to[3], u32 colour );
+void vg_line2( f32 from[3], f32 to[3], u32 fc, u32 tc );
--- /dev/null
+add vg_lines.c
+include ""
+
+hook
+{
+ event START
+ function _vg_lines_init
+}
+
+cvar
+{
+ name debug_lines
+ type u32
+ default 0
+ cheat 1
+ description "Show line debuggers"
+}
+
+shader
+{
+ name debug_lines
+
+ subshader
+ {
+ type vertex
+ add debug_lines.vs
+
+ uniform
+ {
+ type mat4
+ alias uPv
+ }
+ }
+
+ subshader
+ {
+ type fragment
+ add debug_lines.fs
+ }
+}
--- /dev/null
+#include "common_api.h"
+#include "array_file.h"
+
+u32 af_str_hash( const void *packed_strings, u32 pstr )
+{
+ if( pstr & 0x3 )
+ {
+ $log( $fatal, {"ALIGNMENT ERROR, PSTR INDEX: "}, $unsigned(pstr) );
+ }
+ return *((u32 *)(packed_strings + pstr));
+}
+
+const c8 *af_str( const void *packed_strings, u32 pstr )
+{
+ return packed_strings + pstr + 4;
+}
+
+bool af_str_eq( const void *packed_strings, u32 pstr, const c8 *str, u32 str_hash )
+{
+ if( af_str_hash( packed_strings, pstr ) == str_hash )
+ if( compare_buffers( str, 0, af_str( packed_strings, pstr ), 0 ))
+ return 1;
+ return 0;
+}
+
+void af_load_array_file_buffer( struct array_file_context *ctx, struct array_file_meta *arr, void *buffer, u32 stride )
+{
+ if( arr->item_count )
+ {
+ zero_buffer( buffer, stride*arr->item_count );
+ u32 read_size = u32_min( stride, arr->item_size );
+ for( u32 i=0; i<arr->item_count; i++ )
+ {
+ stream_seek( ctx->stream, arr->file_offset + i*arr->item_size );
+ stream_read( ctx->stream, buffer+i*stride, read_size );
+ }
+ }
+}
+
+void af_load_array_file( struct array_file_context *ctx, struct array_file_ptr *out_ptr,
+ struct array_file_meta *arr, struct stack_allocator *stack, u32 stride )
+{
+ if( arr->item_count )
+ {
+ u32 size = stride*arr->item_count;
+ out_ptr->data = stack_allocate( stack, size, 8, NULL );
+ af_load_array_file_buffer( ctx, arr, out_ptr->data, stride );
+ }
+ else
+ out_ptr->data = NULL;
+
+ out_ptr->stride = stride;
+ out_ptr->count = arr->item_count;
+}
+
+void *af_arritm( struct array_file_ptr *arr, u32 index )
+{
+ if( index >= arr->count )
+ {
+ $log( $fatal, {"Index out of range"}, $unsigned(index), {" >= "}, $unsigned( arr->count ) );
+ _fatal_exit();
+ }
+
+ return ((u8 *)arr->data) + index*arr->stride;
+}
+
+u32 af_arrcount( struct array_file_ptr *arr )
+{
+ return arr->count;
+}
+
+struct array_file_meta *af_find_array( struct array_file_context *ctx, const c8 *name )
+{
+ for( u32 i=0; i<af_arrcount(&ctx->index); i++ )
+ {
+ struct array_file_meta *arr = af_arritm( &ctx->index, i );
+ if( compare_buffers( arr->name, 0, name, 0 ) )
+ return arr;
+ }
+
+ return NULL;
+}
+
+bool af_load_array( struct array_file_context *ctx, struct array_file_ptr *ptr, const c8 *name,
+ struct stack_allocator *stack, u32 stride )
+{
+ struct array_file_meta *arr = af_find_array( ctx, name );
+
+ if( arr )
+ {
+ af_load_array_file( ctx, ptr, arr, stack, stride );
+ return 1;
+ }
+ else
+ {
+ ptr->data = NULL;
+ ptr->count = 0;
+ ptr->stride = 0;
+ return 0;
+ }
+}
+
+bool af_open_stream( struct array_file_context *afc, struct stream *stream, u32 min_version, u32 max_version,
+ struct stack_allocator *stack )
+{
+ afc->stream = stream;
+ if( stream_read( stream, &afc->header, sizeof(struct array_file_header)) != sizeof(struct array_file_header) )
+ {
+ $log( $error, {"Array file not large enough to contain header."} );
+ return 0;
+ }
+
+ if( (afc->header.version < min_version) || (afc->header.version > max_version) )
+ {
+ $log( $error, {"Array file version is out of range ("}, $unsigned( afc->header.version ),
+ {"\nAccepted: "}, $unsigned( min_version ), {" -> "}, $unsigned( max_version ) );
+ return 0;
+ }
+
+ af_load_array_file( afc, &afc->index, &afc->header.index, stack, sizeof(struct array_file_meta) );
+ return 1;
+}
+
+/* compiler
+ * ---------------------------------------------------------------------- */
+struct af_compiler_iter
+{
+ u32 i, j;
+ struct af_compiler_index *index;
+ struct af_compiler_item *current_item;
+ void *data;
+};
+
+static void af_init_iterator( struct af_compiler_iter *iter, struct af_compiler_index *index )
+{
+ iter->i = 0;
+ iter->j = 0;
+ iter->index = index;
+ iter->current_item = NULL;
+ iter->data = NULL;
+}
+
+static bool af_next( struct af_compiler_iter *iter )
+{
+ if( iter->i == 0 )
+ {
+ if( iter->index->first == NULL )
+ return 0;
+ iter->current_item = iter->index->first;
+ }
+
+ if( iter->j >= iter->current_item->count )
+ {
+ if( iter->current_item->next == NULL )
+ return 0;
+
+ iter->current_item = iter->current_item->next;
+ iter->j = 0;
+ }
+
+ iter->data = iter->current_item->data + (iter->j * iter->index->element_size);
+ iter->j ++;
+ iter->i ++;
+ return 1;
+}
+
+struct af_compiler_item *af_compiler_allocate_items( struct af_compiler *compiler, struct af_compiler_index *index, u32 count )
+{
+ struct af_compiler_item *entry = stack_allocate( compiler->stack, sizeof(struct af_compiler_item), 1, "Compiler item" );
+ entry->next = NULL;
+
+ u32 data_size = count * index->element_size;
+ index->element_count += count;
+
+ entry->data = stack_allocate( compiler->stack, data_size, 8, NULL );
+ entry->count = count;
+
+ for( u32 i=0; i<data_size; i ++ )
+ ((u8 *)entry->data)[i] = 0xab;
+
+ if( index->last )
+ index->last->next = entry;
+ index->last = entry;
+
+ if( !index->first )
+ index->first = entry;
+
+ return entry;
+}
+
+static void af_compiler_init_index( struct af_compiler_index *index, const c8 *alias, u32 element_size )
+{
+ ASSERT_CRITICAL( element_size );
+ if( !buffer_copy( alias, 0, index->name, sizeof(index->name) ) )
+ {
+ $log( $fatal, {"Index name overflowed: "}, {alias} );
+ _fatal_exit();
+ }
+ index->element_size = element_size;
+ index->element_count = 0;
+ index->first = NULL;
+ index->last = NULL;
+}
+
+struct af_compiler_index *af_compiler_create_index( struct af_compiler *compiler, const c8 *alias, u32 element_size )
+{
+ struct af_compiler_item *item = af_compiler_allocate_items( compiler, &compiler->index, 1 );
+ struct af_compiler_index *index = item->data;
+ af_compiler_init_index( index, alias, element_size );
+ return index;
+}
+
+u32 af_compile_string( struct af_compiler *compiler, const c8 *string )
+{
+ u32 string_hash = buffer_djb2( string, 0 );
+
+ // TODO: Hash table against existing strings (low priority)
+ u32 offset = offset = compiler->strings_index->element_count;
+ u32 bytes = PAD_TO_4( buffer_first_index( string, 0,0 )+1 + 4 );
+ struct af_compiler_item *item = af_compiler_allocate_items( compiler, compiler->strings_index, bytes );
+ *((u32 *)item->data) = string_hash;
+ buffer_copy( string, 0, item->data+4, 0 );
+ return offset;
+}
+
+void af_compiler_init( struct af_compiler *compiler, struct stack_allocator *stack )
+{
+ compiler->stack = stack;
+ af_compiler_init_index( &compiler->index, "index", sizeof(struct af_compiler_index) );
+ compiler->strings_index = af_compiler_create_index( compiler, "strings", 1 );
+ af_compile_string( compiler, "nul" );
+}
+
+static void af_write_bin( struct af_compiler *compiler, void *data, u32 data_len, u32 padding )
+{
+ if( data )
+ {
+ stream_write( &compiler->stream, data, data_len );
+ compiler->file_offset += data_len;
+ }
+
+ if( padding )
+ {
+ while( compiler->file_offset % padding )
+ {
+ const u8 pad_byte = 0xac;
+ stream_write( &compiler->stream, &pad_byte, 1 );
+ compiler->file_offset ++;
+ }
+ }
+}
+
+struct af_compiler_index *af_get_or_make_index( struct af_compiler *compiler, const c8 *alias, u32 element_size )
+{
+ struct af_compiler_iter iter;
+ af_init_iterator( &iter, &compiler->index );
+ while( af_next( &iter ) )
+ {
+ struct af_compiler_index *index = iter.data;
+ if( compare_buffers( index->name, 0, alias, 0 ) )
+ return index;
+ }
+
+ return af_compiler_create_index( compiler, alias, element_size );
+}
+
+bool af_write( struct af_compiler *compiler, const c8 *path, u32 version )
+{
+ u32 indices_to_write = 0;
+
+ struct af_compiler_iter iter;
+ af_init_iterator( &iter, &compiler->index );
+ while( af_next( &iter ) )
+ {
+ struct af_compiler_index *index = iter.data;
+ if( index->element_count )
+ indices_to_write ++;
+ }
+
+ u32 header_size = PAD_TO_8( sizeof( struct array_file_header ) );
+ u32 index_size = PAD_TO_8( sizeof( struct array_file_meta ) * indices_to_write );
+
+ if( !stream_open_file( &compiler->stream, path, k_stream_write ) )
+ return 0;
+
+ compiler->file_offset = 0;
+
+ struct array_file_header header;
+ header.version = version;
+ header.index.file_offset = header_size;
+ header.index.item_count = indices_to_write;
+ header.index.item_size = sizeof(struct array_file_meta);
+ buffer_copy( "index", 0, header.index.name, sizeof(header.index.name) );
+ af_write_bin( compiler, &header, sizeof(struct array_file_header), 8 );
+
+ /* write index */
+ u32 file_offset = header_size + index_size;
+ af_init_iterator( &iter, &compiler->index );
+ while( af_next( &iter ) )
+ {
+ struct af_compiler_index *index = iter.data;
+ if( index->element_count )
+ {
+ struct array_file_meta meta;
+ buffer_copy( index->name, sizeof(index->name), meta.name, sizeof(meta.name) );
+ meta.item_count = index->element_count;
+ meta.item_size = index->element_size;
+ meta.file_offset = file_offset;
+ file_offset += PAD_TO_8( meta.item_size*meta.item_count );
+ af_write_bin( compiler, &meta, sizeof(struct array_file_meta), 0 );
+ }
+ }
+ af_write_bin( compiler, NULL, 0, 8 );
+
+ af_init_iterator( &iter, &compiler->index );
+ while( af_next( &iter ) )
+ {
+ struct af_compiler_index *index = iter.data;
+
+ if( index->element_count )
+ {
+ struct af_compiler_iter item_iter;
+ af_init_iterator( &item_iter, index );
+
+ while( af_next( &item_iter ) )
+ af_write_bin( compiler, item_iter.data, index->element_size, 0 );
+ af_write_bin( compiler, NULL, 0, 8 );
+ }
+ }
+
+ stream_close( &compiler->stream );
+ return 1;
+}
--- /dev/null
+#pragma once
+
+struct array_file_ptr
+{
+ void *data;
+ u32 count, stride;
+};
+
+struct array_file_meta
+{
+ u32 file_offset,
+ item_count,
+ item_size;
+
+ c8 name[16];
+};
+
+struct array_file_header
+{
+ u32 version;
+ struct array_file_meta index;
+};
+
+struct array_file_context
+{
+ struct stream *stream;
+ struct array_file_header header;
+ struct array_file_ptr index;
+};
+
+/* array loading */
+struct array_file_meta *af_find_array( struct array_file_context *ctx, const c8 *name );
+
+void af_load_array_file( struct array_file_context *ctx, struct array_file_ptr *out_ptr,
+ struct array_file_meta *arr, struct stack_allocator *stack, u32 stride );
+
+bool af_load_array( struct array_file_context *ctx, struct array_file_ptr *ptr, const c8 *name,
+ struct stack_allocator *stack, u32 stride );
+void af_load_array_file_buffer( struct array_file_context *ctx, struct array_file_meta *arr, void *buffer, u32 stride );
+
+/* array access */
+void *af_arritm( struct array_file_ptr *arr, u32 index );
+u32 af_arrcount( struct array_file_ptr *arr );
+
+/* packed string buffer access (with djb2 hash prefix) */
+const c8 *af_str( const void *packed_strings, u32 pstr );
+u32 af_str_hash( const void *packed_strings, u32 pstr );
+bool af_str_eq( const void *packed_strings, u32 pstr, const char *str, u32 str_hash );
+
+#define AF_STR_EQ( CTX, PSTR, CONSTR ) \
+ af_str_eq( CTX, PSTR, CONSTR, vg_strdjb2( CONSTR ) )
+
+/* COmpiler
+ * ------------------------------------ */
+
+struct af_compiler_item
+{
+ void *data;
+ u32 count;
+ struct af_compiler_item *next;
+};
+
+struct af_compiler_index
+{
+ c8 name[16];
+ u32 element_size, element_count;
+ struct af_compiler_item *first, *last;
+};
+
+struct af_compiler
+{
+ struct stack_allocator *stack;
+ struct af_compiler_index index,
+ *strings_index;
+ struct af_compiler_item *most_recent_item;
+
+ struct stream stream;
+ u32 file_offset;
+};
+
+void af_compiler_init( struct af_compiler *compiler, struct stack_allocator *stack );
+struct af_compiler_item *af_compiler_allocate_items( struct af_compiler *compiler, struct af_compiler_index *index, u32 count );
+struct af_compiler_index *af_compiler_create_index( struct af_compiler *compiler, const c8 *alias, u32 element_size );
+bool af_write( struct af_compiler *compiler, const c8 *path, u32 version );
+struct af_compiler_index *af_get_or_make_index( struct af_compiler *compiler, const c8 *alias, u32 element_size );
+u32 af_compile_string( struct af_compiler *compiler, const c8 *string );
+
+bool af_open_stream( struct array_file_context *afc, struct stream *stream, u32 min_version, u32 max_version,
+ struct stack_allocator *stack );
--- /dev/null
+#include "compiler.h"
+
+void mdl_compiler_init( struct mdl_compiler *compiler )
+{
+ stack_init( &compiler->stack, NULL, VG_MB(10), "MDL Compiler" );
+ af_compiler_init( &compiler->af, &compiler->stack );
+ compiler->meshes = af_compiler_create_index( &compiler->af, "mdl_mesh", sizeof(struct mdl_mesh) );
+ compiler->submeshes = af_compiler_create_index( &compiler->af, "mdl_submesh", sizeof(struct mdl_submesh) );
+ compiler->vertices = af_compiler_create_index( &compiler->af, "mdl_vert", sizeof(struct mdl_vert) );
+ compiler->indices = af_compiler_create_index( &compiler->af, "mdl_indice", sizeof(u32) );
+ compiler->bones = af_compiler_create_index( &compiler->af, "mdl_bone", sizeof(struct mdl_bone) );
+ compiler->materials = af_compiler_create_index( &compiler->af, "mdl_material", sizeof(struct mdl_material) );
+ compiler->shader_data = af_compiler_create_index( &compiler->af, "shader_data", 1 );
+ compiler->armatures = af_compiler_create_index( &compiler->af, "mdl_armature", sizeof(struct mdl_armature) );
+ compiler->textures = af_compiler_create_index( &compiler->af, "mdl_texture", sizeof(struct mdl_texture) );
+ compiler->pack_data = af_compiler_create_index( &compiler->af, "pack", 1 );
+}
+
+u32 mdl_compiler_push_entity( struct mdl_compiler *compiler, u32 entity_type, const c8 *entity_type_string,
+ void *data, u32 data_size )
+{
+ struct af_compiler_index *index = af_get_or_make_index( &compiler->af, entity_type_string, data_size );
+
+ u32 index_part = index->element_count,
+ id = (entity_type & 0xfffff)<<16 | (index_part & 0xfffff); //TODO
+
+ struct af_compiler_item *item = af_compiler_allocate_items( &compiler->af, index, 1 );
+ buffer_copy( item->data, data_size, data, data_size );
+ return jd;
+}
+
+void mdl_compiler_start_mesh( struct mdl_compiler *compiler, const c8 *name, u32 associated_entity, u32 associated_armature )
+{
+ struct mdl_mesh *mesh = af_compiler_allocate_items( &compiler->af, compiler->meshes, 1 )->data;
+ mdl_transform_identity( &mesh->transform );
+ mesh->submesh_start = compiler->submeshes->element_count;
+ mesh->submesh_count = 0;
+ mesh->pstr_name = af_compile_string( &compiler->af, name );
+ mesh->entity_id = associated_entity;
+ mesh->armature_id = associated_armature;
+}
+
+void mdl_compiler_start_submesh( struct mdl_compiler *compiler, u32 material_id, u32 flags )
+{
+ struct mdl_mesh *current_mesh = compiler->meshes->last->data;
+ current_mesh->submesh_count ++;
+
+ struct mdl_submesh *sm = af_compiler_allocate_items( &compiler->af, compiler->submeshes, 1 )->data;
+ sm->indice_start = compiler->indices->element_count;
+ sm->vertex_start = compiler->vertices->element_count;
+ sm->indice_count = 0;
+ sm->vertex_count = 0;
+ sm->material_id = material_id;
+ sm->flags = flags;
+ box_init_inf( sm->bbx );
+}
+
+void mdl_compiler_push_meshdata( struct mdl_compiler *compiler, struct mdl_vert *vertex_buffer, u32 vertex_count,
+ u32 *indice_buffer, u32 indice_count )
+{
+ struct mdl_submesh *current_submesh = compiler->submeshes->last->data;
+ current_submesh->vertex_count += vertex_count;
+ current_submesh->indice_count += indice_count;
+
+ struct mdl_vert *dest_verts = af_compiler_allocate_items( &compiler->af, compiler->vertices, vertex_count )->data;
+ u32 *dest_indices = af_compiler_allocate_items( &compiler->af, compiler->indices, indice_count )->data;
+
+ for( u32 i=0; i<vertex_count; i ++ )
+ box_addpt( current_submesh->bbx, vertex_buffer[i].co );
+
+ u32 vert_size = vertex_count * sizeof(mdl_vert);
+ buffer_copy( vertex_buffer, vert_size, dest_verts, vert_size );
+
+ u32 indice_size = indice_count * sizeof(u32);
+ buffer_copy( indice_buffer, indice_size, dest_indices, indice_size );
+}
+
+static void mdl_compiler_pack_data( struct mdl_compiler *compiler, const c8 *path, void *data,
+ u32 data_length, struct mdl_file *out_file )
+{
+ out_file->pstr_path = af_compile_string( &compiler->af, path );
+ out_file->pack_offset = compiler->pack_data->element_count;
+ out_file->pack_size = data_length;
+ void *dest = af_compiler_allocate_items( &compiler->af, compiler->pack_data, vg_align16(data_length) )->data;
+ buffer_copy( data, data_length, dest, data_length );
+}
+
+u32 mdl_compiler_start_material( struct mdl_compiler *compiler, const c8 *name )
+{
+ u32 material_id = compiler->materials->element_count + 1;
+
+ struct mdl_material *material = af_compiler_allocate_items( &compiler->af, compiler->materials, 1 )->data;
+ material->pstr_name = af_compile_string( &compiler->af, name );
+ material->shader = 0;
+ material->flags = 0;
+ material->surface_prop = 0;
+ material->props.kvs.offset = 0;
+ material->props.kvs.size = 0;
+ return material_id;
+}
+
+void mdl_compiler_set_surface_info( struct mdl_compiler *compiler, u32 flags, u32 surface_prop )
+{
+ struct mdl_material *current_material = compiler->materials->last->data;
+ current_material->flags = flags;
+ current_material->surface_prop = surface_prop;
+}
+
+u32 mdl_compiler_compile_texture_qoi( struct mdl_compiler *compiler, const c8 *name, void *data, u32 data_len )
+{
+ u32 texture_id = compiler->textures->element_count + 1;
+ struct mdl_texture *texture = af_compiler_allocate_items( &compiler->af, compiler->textures, 1 )->data;
+ mdl_compiler_pack_data( compiler, name, data, data_len, &texture->file );
+ return texture_id;
+}
+
+void mdl_compiler_push_shaderdata( struct mdl_compiler *compiler, u32 shader_id, struct keyvalues *shader_kvs )
+{
+ struct mdl_material *current_material = compiler->materials->last->data;
+ current_material->shader = shader_id;
+ current_material->props.kvs.offset = compiler->shader_data->element_count;
+
+ // FIXME FIXME FIXME FIXME FIXME NEED TO FLATTEN OUT THE KVS INTO STRINGYMABOB!!!!!!!!!!!!!!!!
+#if 0
+ current_material->props.kvs.size = shader_kvs->cur.co;
+
+ void *dest = af_compiler_allocate_items( &compiler->af, compiler->shader_data, vg_align8(shader_kvs->cur.co) )->data;
+ memcpy( dest, shader_kvs->buf, shader_kvs->cur.co );
+#endif
+}
+
+void mdl_compiler_free( struct mdl_compiler *compiler )
+{
+ stack_free( &compiler->stack );
+}
--- /dev/null
+struct mdl_compiler
+{
+ struct af_compiler af;
+ struct af_compiler_index *meshes,
+ *submeshes,
+ *vertices,
+ *indices,
+ *bones,
+ *materials,
+ *shader_data,
+ *armatures,
+ *textures,
+ *pack_data;
+ struct stack_allocator stack;
+};
--- /dev/null
+#pragma once
+#include "model.h"
+
+enum entity_alias
+{
+ k_ent_none = 0,
+ k_ent_gate = 1,
+ k_ent_spawn = 2,
+ k_ent_route_node = 3,
+ k_ent_route = 4,
+ k_ent_water = 5,
+ k_ent_volume = 6,
+ k_ent_audio = 7,
+ k_ent_marker = 8,
+ k_ent_font = 9,
+ k_ent_font_variant= 10,
+ k_ent_traffic = 11,
+ k_ent_skateshop = 12,
+ k_ent_camera = 13,
+ k_ent_swspreview = 14,
+ k_ent_deleted1 = 15, //k_ent_menuitem = 15,
+ k_ent_worldinfo = 16,
+ k_ent_ccmd = 17,
+ k_ent_objective = 18,
+ k_ent_challenge = 19,
+ k_ent_deleted0 = 20, //k_ent_relay = 20,
+ k_ent_cubemap = 21,
+ k_ent_miniworld = 22,
+ k_ent_prop = 23,
+ k_ent_list = 24,
+ k_ent_region = 25,
+ k_ent_glider = 26,
+ k_ent_npc = 27,
+ k_ent_armature = 28,
+ k_ent_script = 29,
+ k_ent_atom = 30,
+ k_ent_cutscene = 31,
+ k_ent_light = 32,
+ k_mdl_mesh = 33,
+ k_editer_property = 34,
+ k_editer_item = 35,
+ k_ent_max
+};
+
+const char *_entity_alias_str[] =
+{
+ [k_ent_none] = "none/null",
+ [k_ent_gate] = "ent_gate",
+ [k_ent_spawn] = "ent_spawn",
+ [k_ent_route_node] = "ent_route_node",
+ [k_ent_route] = "ent_route",
+ [k_ent_water] = "ent_water",
+ [k_ent_volume] = "ent_volume",
+ [k_ent_audio] = "ent_audio",
+ [k_ent_marker] = "ent_marker",
+ [k_ent_font] = "ent_font",
+ [k_ent_font_variant] = "ent_font_variant",
+ [k_ent_traffic] = "ent_traffic",
+ [k_ent_skateshop] = "ent_skateshop",
+ [k_ent_camera] = "ent_camera",
+ [k_ent_swspreview] = "ent_swspreview",
+ //[k_ent_menuitem] = "ent_menuitem",
+ [k_ent_worldinfo] = "ent_worldinfo",
+ [k_ent_ccmd] = "ent_ccmd",
+ [k_ent_objective] = "ent_objective",
+ [k_ent_challenge] = "ent_challenge",
+ //[k_ent_relay] = "ent_relay",
+ [k_ent_cubemap] = "ent_cubemap",
+ [k_ent_miniworld] = "ent_miniworld",
+ [k_ent_prop] = "ent_prop",
+ [k_ent_region] = "ent_region",
+ [k_ent_glider] = "ent_glider",
+ [k_ent_npc] = "ent_npc",
+ [k_ent_armature] = "mdl_armature",
+ [k_ent_script] = "ent_script",
+ [k_ent_atom] = "ent_atom",
+ [k_ent_cutscene] = "ent_cutscene",
+ [k_ent_light] = "ent_light"
+};
+
+static inline u32 mdl_entity_id_type( u32 entity_id )
+{
+ return (entity_id & 0x0fff0000) >> 16;
+}
+
+static inline u32 mdl_entity_id_id( u32 entity_id )
+{
+ return entity_id & 0x0000ffff;
+}
+
+static inline u32 mdl_entity_id( u32 type, u32 index )
+{
+ return (type & 0xfffff)<<16 | (index & 0xfffff);
+}
+
+enum ent_spawn_flag
+{
+ k_ent_spawn_flag_locked = 0x1,
+ k_ent_spawn_flag_group_1 = 0x10,
+ k_ent_spawn_flag_group_2 = 0x20,
+ k_ent_spawn_flag_group_3 = 0x40,
+ k_ent_spawn_flag_group_4 = 0x80
+};
+
+struct ent_spawn
+{
+ struct mdl_transform transform;
+ u32 pstr_name;
+ u32 flags;
+};
+
+enum light_type
+{
+ k_light_type_point = 0,
+ k_light_type_spot = 1
+};
+
+struct ent_light
+{
+ struct mdl_transform transform;
+ u32 daytime,
+ type;
+ f32 colour[4];
+ f32 angle,
+ range;
+ f32 inverse_world[4][3];
+ f32 angle_sin_cos[2];
+};
+
+/* v101 */
+#if 0
+enum gate_type{
+ k_gate_type_unlinked = 0,
+ k_gate_type_teleport = 1,
+ k_gate_type_nonlocal_unlinked = 2,
+ k_gate_type_nonlocel = 3
+};
+#endif
+
+enum list_alias_type
+{
+ k_list_alias_none = 0,
+ k_list_alias_string = 1
+};
+
+struct ent_list
+{
+ u16 entity_ref_start, entity_ref_count;
+ u8 alias_type, none0, none1, none2;
+ u32 pstr_alias;
+};
+
+struct file_entity_ref
+{
+ u32 entity_id, pstr_alias;
+};
+
+/* v102+ */
+enum ent_gate_flag
+{
+ k_ent_gate_linked = 0x1, /* this is a working portal */
+ k_ent_gate_nonlocal = 0x2, /* use the key string to link this portal.
+ NOTE: if set, it adds the flip flag. */
+ k_ent_gate_flip = 0x4, /* flip direction 180* for exiting portal */
+ k_ent_gate_custom_mesh = 0x8, /* use a custom submesh instead of default */
+ k_ent_gate_locked = 0x10,/* has to be unlocked to be useful */
+
+ k_ent_gate_clean_pass = 0x20,/* player didn't rewind while getting here */
+ k_ent_gate_no_linkback = 0x40,/* NONLOCAL Recievers are not allowed to linkback through this gate */
+ k_ent_gate_passive = 0x80
+};
+
+struct ent_gate
+{
+ u32 flags,
+ target,
+ key;
+
+ f32 dimensions[3],
+ co[2][3];
+
+ f32 q[2][4];
+
+ /* runtime */
+ f32 to_world[4][3], transport[4][3];
+ union
+ {
+ u32 timing_version;
+ u16 remote_addon_id;
+
+ struct
+ {
+ u8 ref_count;
+ };
+ };
+
+ union
+ {
+ f64 timing_time;
+ u32 cubemap_id;
+ };
+
+ u16 routes[4]; /* routes that pass through this gate */
+ u8 route_count;
+
+ /* v102+ */
+ u32 submesh_start, submesh_count;
+};
+
+struct ent_route_node
+{
+ f32 co[3];
+ u8 ref_count, ref_total;
+};
+
+struct ent_path_index
+{
+ u16 index;
+};
+
+struct ent_checkpoint
+{
+ u16 gate_index,
+ path_start,
+ path_count;
+
+ /* EXTENSION */
+ f32 best_time;
+};
+
+enum ent_route_flag
+{
+ k_ent_route_flag_achieve_silver = 0x1,
+ k_ent_route_flag_achieve_gold = 0x2,
+
+ k_ent_route_flag_out_of_zone = 0x10,
+ k_ent_region_flag_hasname = 0x20,
+ k_ent_route_flag_target = 0x40
+};
+
+struct ent_route
+{
+ union
+ {
+ struct mdl_transform transform;
+ u32 official_track_id; /* TODO: remove this */
+ }
+ anon;
+
+ u32 pstr_name;
+ u16 checkpoints_start,
+ checkpoints_count;
+
+ f32 colour[4];
+
+ /* runtime */
+ u16 active_checkpoint,
+ valid_checkpoints;
+
+ f32 factive;
+ f32 board_transform[4][3];
+ struct mdl_submesh sm;
+ f64 timing_base;
+
+ u32 id_camera; /* v103+ */
+
+ /* v104+, but always accessible */
+ u32 flags;
+ f64 best_laptime;
+ f32 ui_stopper, ui_residual;
+ i16 ui_first_block_width, ui_residual_block_w;
+};
+
+struct ent_water
+{
+ struct mdl_transform transform;
+ f32 max_dist;
+ u32 reserved0, reserved1;
+};
+
+struct ent_audio_clip
+{
+ union
+ {
+ struct mdl_file file;
+ // struct audio_clip clip; FIXME, MUST ALLOCATE THIS LATER INSTEAD OF PUTTING IT IN THE FUCKING FILE STRUCT. WHO CARES IF WE LOSE HALF A KB
+ }
+ _;
+
+ f32 probability;
+};
+
+struct volume_particles
+{
+ u32 blank, blank2;
+};
+
+struct volume_trigger
+{
+ i32 blank, blank2;
+};
+
+struct volume_interact
+{
+ i32 blank;
+ u32 pstr_text;
+};
+
+enum ent_volume_flag
+{
+ k_ent_volume_flag_particles = 0x1,
+ k_ent_volume_flag_disabled = 0x2,
+ k_ent_volume_flag_removed0 = 0x4,
+ k_ent_volume_flag_interact = 0x8,
+ k_ent_volume_flag_water = 0x10,
+ k_ent_volume_flag_repeatable= 0x20
+};
+
+struct ent_volume
+{
+ struct mdl_transform transform;
+ f32 to_world[4][3];
+
+ union
+ {
+ f32 to_local[4][3];
+ f32 particle_co[3];
+ };
+
+ u32 flags;
+ u32 deleted0;
+ union
+ {
+ struct volume_trigger trigger;
+ struct volume_interact interact;
+ struct volume_particles particles;
+ };
+};
+
+struct ent_audio
+{
+ struct mdl_transform transform;
+ u32 flags,
+ clip_start,
+ clip_count;
+ f32 volume, crossfade;
+ u32 behaviour,
+ group,
+ probability_curve,
+ max_channels;
+};
+
+enum
+{
+ k_ent_marker_flag_hidden = 0x1,
+ k_ent_marker_flag_gui_icon = 0x8
+};
+
+struct ent_marker
+{
+ struct mdl_transform transform;
+ u32 pstr_alias;
+ u32 flags;
+};
+
+enum skateshop_type
+{
+ k_skateshop_type_boardshop = 0,
+ k_skateshop_type_charshop = 1,
+ k_skateshop_type_worldshop = 2,
+ k_skateshop_type_DELETED = 3,
+ k_skateshop_type_server = 4
+};
+
+struct ent_skateshop
+{
+ struct mdl_transform transform;
+ u32 type, id_camera;
+
+ union
+ {
+ struct
+ {
+ u32 id_display,
+ id_info,
+ id_rack;
+ }
+ boards;
+
+ struct
+ {
+ u32 id_display,
+ id_info,
+ id_rack;
+ }
+ character;
+
+ struct
+ {
+ u32 id_display,
+ id_info;
+ }
+ worlds;
+
+ struct
+ {
+ u32 id_lever;
+ }
+ server;
+ };
+};
+
+struct ent_swspreview
+{
+ u32 id_camera, id_display, id_display1;
+};
+
+struct ent_traffic
+{
+ struct mdl_transform transform;
+ u32 submesh_start,
+ submesh_count,
+ start_node,
+ node_count;
+ f32 speed,
+ t;
+ u32 index; /* into the path */
+};
+
+struct ent_camera
+{
+ f32 co[3], r[3];
+ f32 fov;
+};
+
+#if (VG_MODEL_VERSION_MIN <= 107)
+struct ent_camera_v107
+{
+ struct mdl_transform transform;
+ f32 fov;
+};
+
+static inline void fix_ent_camera_v107( struct ent_camera_v107 *old, struct ent_camera *new )
+{
+ f32 dir[3] = {0.0f,-1.0f,0.0f};
+ mdl_transform_vector( &old->transform, dir, dir );
+ v3_angles( dir, new->r );
+ v3_copy( old->transform.co, new->co );
+ new->fov = old->fov;
+}
+#endif
+
+enum world_flag
+{
+ k_world_flag_fixed_time = 0x1,
+ k_world_flag_water_is_safe = 0x2,
+ k_world_flag_no_skating = 0x4,
+ k_world_flag_no_rewind = 0x8
+};
+
+struct ent_worldinfo
+{
+ u32 pstr_name, pstr_author, pstr_desc;
+ f32 timezone;
+ u32 pstr_skybox;
+ u32 flags;
+ f32 wind_scale;
+};
+
+enum channel_behaviour
+{
+ k_channel_behaviour_unlimited = 0,
+ k_channel_behaviour_discard_if_full = 1,
+ k_channel_behaviour_crossfade_if_full = 2
+};
+
+enum probability_curve{
+ k_probability_curve_constant = 0,
+ k_probability_curve_wildlife_day = 1,
+ k_probability_curve_wildlife_night = 2
+};
+
+struct ent_font
+{
+ u32 alias,
+ variant_start,
+ variant_count,
+ glyph_start,
+ glyph_count,
+ glyph_utf32_base;
+};
+
+struct ent_font_variant
+{
+ u32 name,
+ material_id;
+};
+
+struct ent_glyph
+{
+ f32 size[2];
+ u32 indice_start,
+ indice_count;
+};
+
+struct ent_ccmd
+{
+ u32 pstr_command;
+};
+
+enum ent_script_flag
+{
+ k_ent_script_flag_linked = 0x1
+};
+
+struct ent_script
+{
+ union
+ {
+ u32 pstr_script_name, /* When its in the file */
+ script_id; /* When its runtime */
+ };
+
+ u32 entity_list_id;
+ u32 flags;
+};
+
+enum ent_atom_flag
+{
+ k_ent_atom_global = 0x1,
+ k_ent_atom_scrap = 0x2
+};
+
+struct ent_atom
+{
+ union
+ {
+ u32 pstr_alias;
+ i32 scrap_value;
+ };
+
+ u32 flags;
+};
+
+enum ent_cutscene_flag
+{
+ k_ent_cutscene_freeze_player = 0x1,
+};
+
+struct ent_cutscene
+{
+ u32 pstr_path,
+ flags;
+};
+
+enum ent_objective_filter{
+ k_ent_objective_filter_none = 0x00000000,
+ k_ent_objective_filter_trick_shuvit = 0x00000001,
+ k_ent_objective_filter_trick_kickflip = 0x00000002,
+ k_ent_objective_filter_trick_treflip = 0x00000004,
+ k_ent_objective_filter_trick_any =
+ k_ent_objective_filter_trick_shuvit|
+ k_ent_objective_filter_trick_treflip|
+ k_ent_objective_filter_trick_kickflip,
+ k_ent_objective_filter_flip_back = 0x00000008,
+ k_ent_objective_filter_flip_front = 0x00000010,
+ k_ent_objective_filter_flip_any =
+ k_ent_objective_filter_flip_back|
+ k_ent_objective_filter_flip_front,
+ k_ent_objective_filter_grind_truck_any = 0x00000020,
+ k_ent_objective_filter_grind_board_any = 0x00000040,
+ k_ent_objective_filter_grind_any =
+ k_ent_objective_filter_grind_truck_any|
+ k_ent_objective_filter_grind_board_any,
+ k_ent_objective_filter_footplant = 0x00000080,
+ k_ent_objective_filter_passthrough = 0x00000100,
+ k_ent_objective_filter_glider = 0x00000200
+};
+
+enum ent_objective_flag
+{
+ k_ent_objective_hidden = 0x1,
+ k_ent_objective_passed = 0x2,
+ k_ent_objective_failed = 0x4
+};
+
+struct ent_objective
+{
+ struct mdl_transform transform;
+ u32 submesh_start,
+ submesh_count,
+ flags,
+ id_next,
+ filter,filter2,
+ deleted0;
+ i32 deleted1;
+ f32 time_limit;
+ u32 pstr_description_ui;
+};
+
+enum ent_challenge_flag
+{
+ k_ent_challenge_timelimit = 0x1,
+ //k_ent_challenge_is_story = 0x2,
+ k_ent_challenge_locked = 0x4,
+ k_ent_challenge_any_order = 0x8,
+ k_ent_challenge_target = 0x10
+};
+
+struct ent_challenge
+{
+ struct mdl_transform transform;
+ u32 pstr_alias,
+ flags;
+
+ u32 deleted0;//on_activate_id;
+ u32 deleted1;//on_activate_event;
+ u32 deleted2;//on_complete_id;
+ i32 deleted3;//on_complete_event;
+
+ u32 first_objective_id;
+ u32 camera_id;
+
+ u32 status;
+ u32 reset_spawn_id;
+};
+
+struct ent_cubemap
+{
+ f32 co[3];
+ u32 resolution, live, texture_id,
+ framebuffer_id, renderbuffer_id, placeholder[2];
+};
+
+enum prop_flag
+{
+ k_prop_flag_hidden = 0x1,
+ k_prop_flag_spinning = 0x2,
+ k_prop_flag_collider = 0x4,
+ k_prop_flag_spinning_fast = 0x8,
+};
+
+struct ent_prop
+{
+ struct mdl_transform transform;
+ u32 submesh_start, submesh_count, flags, pstr_alias;
+};
+
+enum editer_type
+{
+ k_editer_type_toggle = 0,
+ k_editer_type_slider = 1,
+ k_editer_type_selecter = 2
+};
+
+struct editer_property
+{
+ u32 pstr_alias;
+ u8 ui_type;
+
+ union
+ {
+ u32 _u32;
+ f32 _f32;
+ u32 pstr_options;
+ }
+ max;
+};
+
+struct editer_item
+{
+ struct mdl_transform transform;
+ u32 submesh_start, submesh_count;
+
+ u32 pstr_visibility,
+ pstr_uv_x,
+ pstr_uv_y;
+ u16 discard_send,
+ discard_mask;
+};
+
+struct ent_region
+{
+ struct mdl_transform transform;
+ u32 submesh_start, submesh_count, pstr_title, flags;
+
+ union
+ {
+ struct{ u32 zone_volume; } v105;
+ struct{ u32 id_list; } v109;
+ };
+
+ /* 105+ */
+ u32 deleted01[2];
+};
+
+enum ent_glider_flag
+{
+ k_ent_glider_flag_locked = 0x1
+};
+struct ent_glider
+{
+ struct mdl_transform transform;
+ u32 flags;
+ f32 cooldown;
+};
+
+struct ent_npc
+{
+ struct mdl_transform transform;
+ u32 pstr_id, pstr_context_id, pstr_anim, none1;
+};
+
+enum entity_event_result
+{
+ k_entity_event_result_OK,
+ k_entity_event_result_unhandled,
+ k_entity_event_result_invalid
+};
+
+enum ent_event_flags
+{
+ k_ent_event_data_const_i32 = 0x1,
+ k_ent_event_data_const_f32 = 0x2,
+ k_ent_event_data_const_entity_id = 0x4,
+ k_ent_event_data_const_string = 0x8,
+ k_ent_event_data_data_alias = 0x10,
+ k_ent_event_data_v3f = 0x20
+};
+
+struct ent_event
+{
+ u32 pstr_source_event,
+ pstr_recieve_event,
+ source_entity_id,
+ recieve_entity_id,
+ flags;
+
+ f32 delay;
+ u32 unused0;
+
+ union
+ {
+ i32 const_i32;
+ f32 const_f32;
+ u32 const_entity_id;
+ u32 const_pstr;
+ u32 pstr_data_alias;
+ }
+ data;
+};
--- /dev/null
+#include "common_api.h"
+#include "common_thread_api.h"
+#include "metascene.h"
+#include "maths/common_maths.h"
+#include "model.h"
+#include "entity.h"
+#include "shader_props.h"
+#include "array_file.h"
+
+void metascene_load( struct metascene *ms, const c8 *path, struct stack_allocator *stack )
+{
+ ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_ASYNC ) );
+ zero_buffer( ms, sizeof(struct metascene) );
+
+ struct stream stream;
+ ASSERT_CRITICAL( stream_open_file( &stream, path, k_stream_read ) );
+
+ u32 temp_frame = _start_temporary_frame();
+ {
+ struct array_file_context af;
+ ASSERT_CRITICAL( af_open_stream( &af, &stream, MS_VERSION_MIN, MS_VERSION_NR, _temporary_stack_allocator() ) );
+
+ struct array_file_ptr strings;
+ af_load_array( &af, &strings, "strings", stack, 1 );
+ ms->packed_strings = strings.data;
+ af_load_array( &af, &ms->infos, "ms_scene_info", stack, sizeof(struct ms_scene_info) );
+ af_load_array( &af, &ms->instances, "ms_instance", stack, sizeof(struct ms_instance) );
+ af_load_array( &af, &ms->overrides, "ms_override", stack, sizeof(struct ms_override) );
+ af_load_array( &af, &ms->strips, "ms_strip", stack, sizeof(struct ms_strip) );
+ af_load_array( &af, &ms->tracks, "ms_track", stack, sizeof(struct ms_track) );
+ af_load_array( &af, &ms->keyframes, "ms_keyframe", stack, sizeof(struct ms_keyframe) );
+ af_load_array( &af, &ms->cameras, "ent_camera", stack, sizeof(struct ent_camera) );
+ af_load_array( &af, &ms->audios, "ent_audio", stack, sizeof(struct ent_audio) );
+ af_load_array( &af, &ms->audio_clips, "ent_audio_clip", stack, sizeof(struct ent_audio_clip) );
+ af_load_array( &af, &ms->curves, "ms_curves", stack, sizeof(struct ms_curve_keyframe) );
+
+ ASSERT_CRITICAL( af_arrcount( &ms->infos ) );
+ struct ms_scene_info *src_inf = af_arritm( &ms->infos, 0 );
+ ms->info = *src_inf;
+
+ stream_close( &stream );
+ }
+ _end_temporary_frame( temp_frame );
+}
+
+u32 skeleton_bone_id( struct ms_skeleton *skele, const c8 *name )
+{
+ for( u32 i=1; i<skele->bone_count; i++ )
+ if( compare_buffers( skele->bones[i].name, 0, name, 0 ) )
+ return i;
+
+ $log( $fatal, {"skeleton_bone_id( *, "}, {name}, {" ); -> Bone does not exist"} );
+ _fatal_exit();
+ return 0;
+}
+
+void keyframe_copy_pose( struct ms_keyframe *kfa, struct ms_keyframe *kfb, i32 num )
+{
+ for( i32 i=0; i<num; i++ )
+ kfb[i] = kfa[i];
+}
+
+
+/* apply a rotation from the perspective of root */
+void keyframe_rotate_around( struct ms_keyframe *kf, f32 origin[3], f32 offset[3], f32 q[4] )
+{
+ f32 v0[3], co[3];
+ v3_add( kf->co, offset, co );
+ v3_sub( co, origin, v0 );
+ q_mulv( q, v0, v0 );
+ v3_add( v0, origin, co );
+ v3_sub( co, offset, kf->co );
+ q_mul( q, kf->q, kf->q );
+ q_normalize( kf->q );
+}
+
+void keyframe_lerp( struct ms_keyframe *kfa, struct ms_keyframe *kfb, f32 t, struct ms_keyframe *kfd )
+{
+ v3_lerp( kfa->co, kfb->co, t, kfd->co );
+ q_nlerp( kfa->q, kfb->q, t, kfd->q );
+ v3_lerp( kfa->s, kfb->s, t, kfd->s );
+}
+
+/*
+ * Lerp between two sets of keyframes and store in dest. Rotations use Nlerp.
+ */
+void keyframe_lerp_pose( struct ms_keyframe *kfa, struct ms_keyframe *kfb, f32 t, struct ms_keyframe *kfd, i32 count )
+{
+ if( t <= 0.0001f )
+ {
+ keyframe_copy_pose( kfa, kfd, count );
+ return;
+ }
+ else if( t >= 0.9999f )
+ {
+ keyframe_copy_pose( kfb, kfd, count );
+ return;
+ }
+
+ for( i32 i=0; i<count; i++ )
+ keyframe_lerp( kfa+i, kfb+i, t, kfd+i );
+}
+
+void skeleton_lerp_pose( struct ms_skeleton *skele, struct ms_keyframe *kfa, struct ms_keyframe *kfb, f32 t, struct ms_keyframe *kfd )
+{
+ keyframe_lerp_pose( kfa, kfb, t, kfd, skele->bone_count-1 );
+}
+
+void skeleton_copy_pose( struct ms_skeleton *skele, struct ms_keyframe *kfa, struct ms_keyframe *kfd )
+{
+ keyframe_copy_pose( kfa, kfd, skele->bone_count-1 );
+}
+
+/*
+ * Sample animation between 2 closest frames using time value. Output is a
+ * keyframe buffer that is allocated with an appropriate size
+ *
+ * Time is in SECONDS
+ */
+void skeleton_sample_anim( struct ms_skeleton *skele, struct ms_skeletal_animation *anim, f32 time, struct ms_keyframe *output )
+{
+ struct ms_strip *strip = anim->strip;
+ f32 animtime = fmodf( time*anim->framerate, (f32)strip->strip.length ),
+ animframe = floorf( animtime ),
+ t = animtime - animframe;
+
+ u32 frame = (u32)animframe % strip->strip.length,
+ next = (frame+1) % strip->strip.length;
+
+ struct ms_keyframe *base = anim->keyframes_base + strip->strip.count*frame,
+ *nbase = anim->keyframes_base + strip->strip.count*next;
+ skeleton_lerp_pose( skele, base, nbase, t, output );
+}
+
+/* time is in SECONDS */
+i32 skeleton_sample_anim_clamped( struct ms_skeleton *skele, struct ms_skeletal_animation *anim, f32 time, struct ms_keyframe *output )
+{
+ struct ms_strip *strip = anim->strip;
+ f32 end = (strip->strip.length-1)/anim->framerate;
+ skeleton_sample_anim( skele, anim, f32_min( end, time ), output );
+ if( time > end ) return 0;
+ else return 1;
+}
+
+static i32 should_apply_bone( struct ms_skeleton *skele, u32 id, enum anim_apply type )
+{
+ struct ms_skeleton_bone *sb = &skele->bones[ id ],
+ *sp = &skele->bones[ sb->parent ];
+ if( type == k_anim_apply_defer_ik )
+ {
+ if( ((sp->flags & k_bone_flag_ik) && !(sb->flags & k_bone_flag_ik)) || sp->defer )
+ {
+ sb->defer = 1;
+ return 0;
+ }
+ else
+ {
+ sb->defer = 0;
+ return 1;
+ }
+ }
+ else if( type == k_anim_apply_deffered_only )
+ {
+ if( sb->defer )
+ return 1;
+ else
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Apply block of keyframes to skeletons final pose
+ */
+void skeleton_apply_pose( struct ms_skeleton *skele, struct ms_keyframe *pose, enum anim_apply passtype, f32 final_mtx[][4][3] )
+{
+ if( passtype == k_anim_apply_absolute )
+ {
+ for( u32 i=1; i<skele->bone_count; i++ )
+ {
+ struct ms_keyframe *kf = &pose[i-1];
+ q_m3x3( kf->q, final_mtx[i] );
+ m3x3_scale( final_mtx[i], kf->s );
+ v3_copy( kf->co, final_mtx[i][3] );
+ }
+ return;
+ }
+
+ m4x3_identity( final_mtx[0] );
+ skele->bones[0].defer = 0;
+ skele->bones[0].flags &= ~k_bone_flag_ik;
+
+ for( u32 i=1; i<skele->bone_count; i++ )
+ {
+ struct ms_skeleton_bone *sb = &skele->bones[i];
+ if( !should_apply_bone( skele, i, passtype ) )
+ continue;
+
+ sb->defer = 0;
+
+ /* process pose */
+ f32 posemtx[4][3];
+ f32 temp_delta[3];
+ v3_sub( skele->bones[i].co, skele->bones[sb->parent].co, temp_delta );
+
+ /* pose matrix */
+ struct ms_keyframe *kf = &pose[i-1];
+ q_m3x3( kf->q, posemtx );
+ m3x3_scale( posemtx, kf->s );
+ v3_copy( kf->co, posemtx[3] );
+ v3_add( temp_delta, posemtx[3], posemtx[3] );
+
+ /* final matrix */
+ m4x3_mul( final_mtx[ sb->parent ], posemtx, final_mtx[i] );
+ }
+}
+
+/*
+ * Take the final matrices and decompose it into an absolute positioned anim
+ */
+void skeleton_decompose_mtx_absolute( struct ms_skeleton *skele, struct ms_keyframe *anim, f32 final_mtx[][4][3] )
+{
+ for( u32 i=1; i<skele->bone_count; i++ )
+ {
+ struct ms_keyframe *kf = &anim[i-1];
+ m4x3_decompose( final_mtx[i], kf->co, kf->q, kf->s );
+ }
+}
+
+/*
+ * creates the reference inverse matrix for an IK bone, as it has an initial
+ * intrisic rotation based on the direction that the IK is setup..
+ */
+void skeleton_inverse_for_ik( struct ms_skeleton *skele, f32 ivaxis[3], u32 id, f32 inverse[3][3] )
+{
+ v3_copy( ivaxis, inverse[0] );
+ v3_copy( skele->bones[id].end, inverse[1] );
+ v3_normalize( inverse[1] );
+ v3_cross( inverse[0], inverse[1], inverse[2] );
+ m3x3_transpose( inverse, inverse );
+}
+
+/*
+ * Creates inverse rotation matrices which the IK system uses.
+ */
+void skeleton_create_inverses( struct ms_skeleton *skele )
+{
+ /* IK: inverse 'plane-bone space' axis '(^axis,^bone,...)[base] */
+ for( u32 i=0; i<skele->ik_count; i++ )
+ {
+ struct ms_skeleton_ik *ik = &skele->ik[i];
+ f32 iv0[3], iv1[3], ivaxis[3];
+ v3_sub( skele->bones[ik->target].co, skele->bones[ik->lower].co, iv0 );
+ v3_sub( skele->bones[ik->pole].co, skele->bones[ik->lower].co, iv1 );
+ v3_cross( iv0, iv1, ivaxis );
+ v3_normalize( ivaxis );
+
+ skeleton_inverse_for_ik( skele, ivaxis, ik->lower, ik->ia );
+ skeleton_inverse_for_ik( skele, ivaxis, ik->upper, ik->ib );
+ }
+}
+
+/*
+ * Apply a model matrix to all bones, should be done last
+ */
+void skeleton_apply_transform( struct ms_skeleton *skele, f32 transform[4][3], f32 final_mtx[][4][3] )
+{
+ for( u32 i=0; i<skele->bone_count; i++ )
+ m4x3_mul( transform, final_mtx[i], final_mtx[i] );
+}
+
+/*
+ * Apply an inverse matrix to all bones which maps vertices from bind space into
+ * bone relative positions
+ */
+void skeleton_apply_inverses( struct ms_skeleton *skele, f32 final_mtx[][4][3] )
+{
+ for( u32 i=0; i<skele->bone_count; i++ )
+ {
+ struct ms_skeleton_bone *sb = &skele->bones[i];
+ f32 inverse[4][3];
+ m3x3_identity( inverse );
+ v3_negate( sb->co, inverse[3] );
+ m4x3_mul( final_mtx[i], inverse, final_mtx[i] );
+ }
+}
+
+/*
+ * Apply all IK modifiers (2 bone ik reference from blender is supported)
+ */
+void skeleton_apply_ik_pass( struct ms_skeleton *skele, f32 final_mtx[][4][3] )
+{
+ for( u32 i=0; i<skele->ik_count; i++ )
+ {
+ struct ms_skeleton_ik *ik = &skele->ik[i];
+ f32 v0[3], /* base -> target */
+ v1[3], /* base -> pole */
+ vaxis[3];
+ f32 co_base[3],
+ co_target[3],
+ co_pole[3];
+
+ v3_copy( final_mtx[ik->lower][3], co_base );
+ v3_copy( final_mtx[ik->target][3], co_target );
+ v3_copy( final_mtx[ik->pole][3], co_pole );
+
+ v3_sub( co_target, co_base, v0 );
+ v3_sub( co_pole, co_base, v1 );
+ v3_cross( v0, v1, vaxis );
+ v3_normalize( vaxis );
+ v3_normalize( v0 );
+ v3_cross( vaxis, v0, v1 );
+
+ /* localize problem into [x:v0,y:v1] 2d plane */
+ f32 base[2] = { v3_dot( v0, co_base ), v3_dot( v1, co_base ) },
+ end[2] = { v3_dot( v0, co_target ), v3_dot( v1, co_target ) },
+ knee[2];
+
+ /* Compute angles (basic trig)*/
+ f32 delta[2];
+ v2_sub( end, base, delta );
+
+ f32 l1 = v3_length( skele->bones[ik->lower].end ),
+ l2 = v3_length( skele->bones[ik->upper].end ),
+ d = f32_clamp( v2_length(delta), fabsf(l1 - l2), l1+l2-0.00001f ),
+ c = acosf( (l1*l1 + d*d - l2*l2) / (2.0f*l1*d) ),
+ rot = atan2f( delta[1], delta[0] ) + c - VG_PIf/2.0f;
+
+ knee[0] = sinf(-rot) * l1;
+ knee[1] = cosf(-rot) * l1;
+
+ m4x3_identity( final_mtx[ik->lower] );
+ m4x3_identity( final_mtx[ik->upper] );
+
+ /* create rotation matrix */
+ f32 co_knee[3];
+ v3_muladds( co_base, v0, knee[0], co_knee );
+ v3_muladds( co_knee, v1, knee[1], co_knee );
+ // vg_line( co_base, co_knee, 0xff00ff00 );
+
+ f32 transform[4][3];
+ v3_copy( vaxis, transform[0] );
+ v3_muls( v0, knee[0], transform[1] );
+ v3_muladds( transform[1], v1, knee[1], transform[1] );
+ v3_normalize( transform[1] );
+ v3_cross( transform[0], transform[1], transform[2] );
+ v3_copy( co_base, transform[3] );
+
+ m3x3_mul( transform, ik->ia, transform );
+ m4x3_copy( transform, final_mtx[ik->lower] );
+
+ /* upper/knee bone */
+ v3_copy( vaxis, transform[0] );
+ v3_sub( co_target, co_knee, transform[1] );
+ v3_normalize( transform[1] );
+ v3_cross( transform[0], transform[1], transform[2] );
+ v3_copy( co_knee, transform[3] );
+
+ m3x3_mul( transform, ik->ib, transform );
+ m4x3_copy( transform, final_mtx[ik->upper] );
+ }
+}
+
+/*
+ * Applies the typical operations that you want for an IK rig:
+ * Pose, IK, Pose(deferred), Inverses, Transform
+ */
+void skeleton_apply_standard( struct ms_skeleton *skele, struct ms_keyframe *pose, f32 transform[4][3], f32 final_mtx[][4][3] )
+{
+ skeleton_apply_pose( skele, pose, k_anim_apply_defer_ik, final_mtx );
+ skeleton_apply_ik_pass( skele, final_mtx );
+ skeleton_apply_pose( skele, pose, k_anim_apply_deffered_only, final_mtx );
+ skeleton_apply_inverses( skele, final_mtx );
+ skeleton_apply_transform( skele, transform, final_mtx );
+}
+
+void skeleton_alloc_from( struct ms_skeleton *skele, struct stack_allocator *stack,
+ struct vg_model *model, struct mdl_armature *armature )
+{
+ skele->bone_count = armature->bone_count+1;
+ skele->ik_count = 0;
+ skele->collider_count = 0;
+
+ for( u32 i=0; i<armature->bone_count; i++ )
+ {
+ struct mdl_bone *bone = &model->bones[ armature->bone_start+i ];
+ if( bone->flags & k_bone_flag_ik )
+ skele->ik_count ++;
+ if( bone->collider )
+ skele->collider_count ++;
+ }
+
+ u32 bone_size = sizeof(struct ms_skeleton_bone) * skele->bone_count,
+ ik_size = sizeof(struct ms_skeleton_ik) * skele->ik_count;
+
+ skele->bones = stack_allocate( stack, bone_size, 8, NULL );
+ skele->ik = stack_allocate( stack, ik_size, 8, NULL );
+
+ zero_buffer( skele->bones, bone_size );
+ zero_buffer( skele->ik, ik_size );
+}
+
+/* Setup a skeleton from model. mdl's metadata should stick around */
+void skeleton_setup( struct ms_skeleton *skele, struct vg_model *model, u32 index, struct stack_allocator *stack )
+{
+ u32 ik_count = 0, collider_count = 0;
+ skele->bone_count = 0;
+ skele->bones = NULL;
+
+ if( !model->armature_count )
+ {
+ $log( $fatal, {"No skeleton in model"} );
+ _fatal_exit();
+ }
+
+ struct mdl_armature *armature = &model->armatures[ index ];
+ skeleton_alloc_from( skele, stack, model, armature );
+
+ for( u32 i=0; i<armature->bone_count; i++ )
+ {
+ struct mdl_bone *bone = &model->bones[ armature->bone_start+i ];
+ struct ms_skeleton_bone *sb = &skele->bones[i+1];
+
+ v3_copy( bone->co, sb->co );
+ v3_copy( bone->end, sb->end );
+
+ sb->parent = bone->parent;
+ sb->name = af_str( model->packed_strings, bone->pstr_name );
+ sb->flags = bone->flags;
+ sb->collider = bone->collider;
+ sb->orig_bone = bone;
+
+ if( sb->flags & k_bone_flag_ik )
+ {
+ skele->bones[ sb->parent ].flags |= k_bone_flag_ik;
+
+ if( ik_count == skele->ik_count )
+ {
+ $log( $fatal, {"Too many ik bones, corrupt model file"} );
+ _fatal_exit();
+ }
+
+ struct ms_skeleton_ik *ik = &skele->ik[ ik_count ++ ];
+ ik->upper = i+1;
+ ik->lower = bone->parent;
+ ik->target = bone->ik_target;
+ ik->pole = bone->ik_pole;
+ }
+
+ box_copy( bone->hitbox, sb->hitbox );
+ if( bone->collider )
+ {
+ if( collider_count == skele->collider_count )
+ {
+ $log( $fatal, {"Too many collider bones"} );
+ _fatal_exit();
+ }
+ collider_count ++;
+ }
+ }
+
+ /* fill in implicit root bone */
+ v3_fill( skele->bones[0].co, 0 );
+ v3_copy( (f32[3]){0.0f,1.0f,0.0f}, skele->bones[0].end );
+ skele->bones[0].parent = 0xffffffff;
+ skele->bones[0].flags = 0;
+ skele->bones[0].name = "[root]";
+
+ skeleton_create_inverses( skele );
+ $log( $ok, {"Loaded skeleton with "}, $unsigned( skele->bone_count ), {" bones"} );
+ $log( $ok, {" "}, $unsigned( skele->collider_count), {" colliders"} );
+}
+
+#if 0
+void skeleton_debug( struct ms_skeleton *skele, f32 final_mtx[][4][3] )
+{
+ for( u32 i=1; i<skele->bone_count; i ++ )
+ {
+ struct ms_skeleton_bone *sb = &skele->bones[i];
+ f32 p0[3], p1[3];
+ v3_copy( sb->co, p0 );
+ v3_add( p0, sb->end, p1 );
+
+ m4x3_mulv( final_mtx[i], p0, p0 );
+ m4x3_mulv( final_mtx[i], p1, p1 );
+
+ if( sb->flags & k_bone_flag_deform )
+ {
+ if( sb->flags & k_bone_flag_ik )
+ vg_line( p0, p1, 0xff0000ff );
+ else
+ vg_line( p0, p1, 0xffcccccc );
+ }
+ else
+ vg_line( p0, p1, 0xff00ffff );
+ }
+}
+#endif
--- /dev/null
+#pragma once
+#include "array_file.h"
+#include "model.h"
+
+#define MS_VERSION_NR 2
+#define MS_VERSION_MIN 2
+
+struct ms_scene_info
+{
+ f32 framerate;
+ u32 end_frame;
+};
+
+struct metascene
+{
+ const void *packed_strings;
+ struct ms_scene_info info;
+ struct array_file_ptr infos,
+ instances,
+ overrides,
+ strips,
+ tracks,
+ keyframes,
+ curves,
+
+ audios, /* kinda temp? */
+ audio_clips,
+ cameras;
+};
+
+struct ms_instance
+{
+ u32 pstr_name,
+ override_start,
+ override_count;
+};
+
+struct ms_override
+{
+ u32 entity_type, pstr_name;
+ struct mdl_transform transform;
+};
+
+enum ms_strip_mode
+{
+ k_ms_strip_mode_keyframes = 0x1,
+ k_ms_strip_mode_curves = 0x2,
+ k_ms_strip_mode_animation = 0x1|0x2,
+
+ k_ms_strip_mode_camera = 0x10,
+ k_ms_strip_mode_event = 0x20,
+ k_ms_strip_mode_subtitle = 0x40,
+ k_ms_strip_mode_fadeout = 0x80,
+};
+
+struct ms_strip
+{
+ u8 mode;
+ i32 offset;
+
+ union
+ {
+ struct
+ {
+ u32 start, count, length,
+ pstr_name,
+ pstr_internal_name,
+ instance_id, object_id;
+ f32 timing_offset;
+ }
+ strip;
+
+ struct
+ {
+ u32 entity_id;
+ }
+ camera;
+
+ struct
+ {
+ u32 pstr_en, res0, res1, res2;
+ u8 character;
+ }
+ subtitle;
+
+ struct
+ {
+ u32 pstr_string;
+ }
+ event;
+ };
+};
+
+struct ms_track
+{
+ u32 keyframe_start,
+ keyframe_count,
+ pstr_datapath,
+ semantic_type;
+};
+
+struct ms_curve_keyframe
+{
+ f32 co[2], l[2], r[2];
+};
+
+struct ms_keyframe
+{
+ f32 co[3], s[3];
+ f32 q[4];
+};
+
+/* skeletons
+ * ------------------------------------------------------------------------------------------------------------------ */
+
+struct ms_skeleton
+{
+ struct ms_skeleton_bone
+ {
+ f32 co[3], end[3];
+ u32 parent;
+
+ u32 flags;
+ int defer;
+
+ //ms_keyframe kf;
+ struct mdl_bone *orig_bone;
+ u32 collider; // TODO: SOA
+ f32 hitbox[2][3]; // TODO: SOA
+ const char *name; // TODO: SOA
+ }
+ *bones;
+ u32 bone_count;
+
+ struct ms_skeleton_ik
+ {
+ u32 lower, upper, target, pole;
+ f32 ia[3][3], ib[3][3];
+ }
+ *ik;
+ u32 ik_count;
+ u32 collider_count,
+ bindable_count;
+};
+
+void metascene_load( struct metascene *ms, const c8 *path, struct stack_allocator *stack );
+void keyframe_copy_pose( struct ms_keyframe *kfa, struct ms_keyframe *kfb, i32 num );
+void keyframe_rotate_around( struct ms_keyframe *kf, f32 origin[3], f32 offset[3], f32 q[4] );
+void keyframe_lerp( struct ms_keyframe *kfa, struct ms_keyframe *kfb, f32 t, struct ms_keyframe *kfd );
+void keyframe_lerp_pose( struct ms_keyframe *kfa, struct ms_keyframe *kfb, f32 t, struct ms_keyframe *kfd, i32 count );
+void skeleton_alloc_from( struct ms_skeleton *skele, struct stack_allocator *stack,
+ struct vg_model *model, struct mdl_armature *armature );
+u32 skeleton_bone_id( struct ms_skeleton *skele, const c8 *name );
+void skeleton_lerp_pose( struct ms_skeleton *skele, struct ms_keyframe *kfa, struct ms_keyframe *kfb, f32 t, struct ms_keyframe *kfd );
+void skeleton_copy_pose( struct ms_skeleton *skele, struct ms_keyframe *kfa, struct ms_keyframe *kfd );
+
+struct ms_skeletal_animation
+{
+ struct ms_strip *strip;
+ struct ms_keyframe *keyframes_base;
+ f32 framerate;
+};
+
+void skeleton_sample_anim( struct ms_skeleton *skele, struct ms_skeletal_animation *anim, f32 time, struct ms_keyframe *output );
+int skeleton_sample_anim_clamped( struct ms_skeleton *skele, struct ms_skeletal_animation *anim, f32 time, struct ms_keyframe *output );
+
+enum anim_apply
+{
+ k_anim_apply_always,
+ k_anim_apply_defer_ik,
+ k_anim_apply_deffered_only,
+ k_anim_apply_absolute
+}
+anim_apply;
+
+void skeleton_apply_pose( struct ms_skeleton *skele, struct ms_keyframe *pose, enum anim_apply passtype, f32 final_mtx[][4][3] );
+void skeleton_decompose_mtx_absolute( struct ms_skeleton *skele, struct ms_keyframe *anim, f32 final_mtx[][4][3] );
+void skeleton_inverse_for_ik( struct ms_skeleton *skele, f32 ivaxis[3], u32 id, f32 inverse[3][3] );
+void skeleton_create_inverses( struct ms_skeleton *skele );
+void skeleton_apply_transform( struct ms_skeleton *skele, f32 transform[4][3], f32 final_mtx[][4][3] );
+void skeleton_apply_inverses( struct ms_skeleton *skele, f32 final_mtx[][4][3] );
+void skeleton_apply_ik_pass( struct ms_skeleton *skele, f32 final_mtx[][4][3] );
+void skeleton_apply_standard( struct ms_skeleton *skele, struct ms_keyframe *pose, f32 transform[4][3], f32 final_mtx[][4][3] );
+void skeleton_setup( struct ms_skeleton *skele, struct vg_model *model, u32 index, struct stack_allocator *stack );
+void skeleton_debug( struct ms_skeleton *skele, f32 final_mtx[][4][3] );
--- /dev/null
+#include "common_api.h"
+#include "common_thread_api.h"
+#include "maths/common_maths.h"
+
+#include "model.h"
+#include "array_file.h"
+#include "opengl.h"
+#include "shader_props.h"
+#include <stddef.h>
+
+struct stream *vg_model_stream_pack_stream( struct vg_model_stream_context *ctx, struct mdl_file *file )
+{
+ if( !file->pack_size )
+ {
+ $log( $fatal, {"Packed file is only a header; it is not packed\n"},
+ {"Path: "}, {af_str( &ctx->model->packed_strings, file->pstr_path )} );
+ _fatal_exit();
+ }
+
+ stream_seek( &ctx->stream, ctx->model->pack_base_offset + file->pack_offset );
+
+ // WE dont ever read backwards so this just sets us up an uppper bound to read against
+ ctx->stream.buffer_length = ctx->model->pack_base_offset + file->pack_offset + file->pack_size;
+ return &ctx->stream;
+}
+
+/* This also compiles them */
+static void vg_model_stream_materials( struct vg_model_stream_context *ctx, struct stack_allocator *stack )
+{
+ struct array_file_ptr mats_ptr;
+ af_load_array( &ctx->af, &mats_ptr, "mdl_material", stack, sizeof(struct mdl_material) );
+ ctx->model->materials = mats_ptr.data;
+ ctx->model->material_count = mats_ptr.count;
+
+ u32 size = sizeof(union shader_props) * mats_ptr.count;
+ ctx->model->shader_props = stack_allocate( stack, size, 8, "Compiled shader properties" );
+
+ /*
+ * Step 0:
+ * Acquiring the data source
+ *
+ * Step 1:
+ * Converting into formal KV structure
+ * We have 3 different modes;
+ * v101+: old simple binary structures, requires 'generating' correct kvs
+ * v106+: deprecated 'vg_msg' similar to kvs, only requires conversion
+ * v110+: text KV's, direct parsing into vg_kvs
+ *
+ * Step 2:
+ * Formal KV structure is then compiled into the binary union
+ */
+
+ /* step0 ----------------------------- */
+ u32 temp_frame = _start_temporary_frame();
+ {
+#if (VG_MODEL_VERSION_MIN <= 101)
+ struct array_file_ptr v101_materials;
+#endif
+#if (VG_MODEL_VERSION_MIN <= 106)
+ struct array_file_ptr v106_data;
+#endif
+ struct array_file_ptr v110_data;
+
+ if( ctx->model->version <= 105 )
+#if (VG_MODEL_VERSION_MIN <= 105)
+ af_load_array( &ctx->af, &v101_materials, "mdl_material", _temporary_stack_allocator(), sizeof(struct mdl_material_v101) );
+#else
+ {
+ $log( $fatal, {"Unsupported model version: "}, $unsigned( ctx->model->version ) );
+ }
+#endif
+#if (VG_MODEL_VERSION_MIN <= 109)
+ else if( ctx->model->version <= 109 )
+ af_load_array( &ctx->af, &v106_data, "shader_data", _temporary_stack_allocator(), 1 );
+#endif
+ else
+ af_load_array( &ctx->af, &v110_data, "shader_props", _temporary_stack_allocator(), 1 );
+
+ struct keyvalues kvs;
+ keyvalues_init( &kvs, _temporary_stack_allocator() );
+
+ /* step1 ----------------------------- */
+ if( ctx->model->version <= 105 )
+ {
+#if (VG_MODEL_VERSION_MIN <= 105)
+ for( u32 i=0; i<ctx->model->material_count; i ++ )
+ {
+ struct mdl_material *mat = &ctx->model->materials[ i ];
+ struct mdl_material_v101 *old = af_arritm( &v101_materials, i );
+
+ mat->props.kv_root = keyvalues_append_frame( &kvs, 0, NULL );
+
+ keyvalues_append_string( &kvs, mat->props.kv_root, "version", "101" );
+ keyvalues_append_u32s( &kvs, mat->props.kv_root, "tex_diffuse", &old->tex_diffuse, 1 );
+
+ if( mat->shader == k_shader_cubemap )
+ {
+ keyvalues_append_u32s( &kvs, mat->props.kv_root, "cubemap", &old->tex_none0, 1 );
+ keyvalues_append_f32s( &kvs, mat->props.kv_root, "tint", old->colour, 4 );
+ }
+ else if( mat->shader == k_shader_terrain_blend )
+ {
+ keyvalues_append_f32s( &kvs, mat->props.kv_root, "sand_colour", old->colour, 4 );
+ keyvalues_append_f32s( &kvs, mat->props.kv_root, "blend_offset", old->colour1, 2 );
+ }
+ else if( mat->shader == k_shader_standard_vertex_blend )
+ {
+ keyvalues_append_f32s( &kvs, mat->props.kv_root, "blend_offset", old->colour1, 2 );
+ }
+ else if( mat->shader == k_shader_water )
+ {
+ keyvalues_append_f32s( &kvs, mat->props.kv_root, "shore_colour", old->colour, 4 );
+ keyvalues_append_f32s( &kvs, mat->props.kv_root, "deep_colour", old->colour1, 4 );
+ }
+ }
+#else
+ ASSERT_CRITICAL( 0 );
+#endif
+ }
+ else if( ctx->model->version <= 109 )
+ {
+#if (VG_MODEL_VERSION_MIN <= 109)
+ for( u32 i=0; i<ctx->model->material_count; i ++ )
+ {
+ struct mdl_material *mat = &ctx->model->materials[ i ];
+ u32 root = keyvalues_append_frame( &kvs, 0, NULL );
+ keyvalues_append_string( &kvs, root, "version", "106" );
+
+ void *buffer = NULL;
+ if( v106_data.data )
+ buffer = v106_data.data + mat->props.kvs.offset;
+
+ vg_kvs_append_from_legacy_msg2( &kvs, root, buffer, mat->props.kvs.size );
+ mat->props.kv_root = root;
+ }
+#else
+ ASSERT_CRITICAL( 0 );
+#endif
+ }
+ else
+ {
+ for( u32 i=0; i<ctx->model->material_count; i ++ )
+ {
+ struct mdl_material *mat = &ctx->model->materials[ i ];
+ u32 root = keyvalues_append_frame( &kvs, 0, NULL );
+ keyvalues_append_string( &kvs, root, "version", "110" );
+
+ const c8 *buffer = NULL;
+ if( v110_data.data )
+ {
+ buffer = v110_data.data + mat->props.kvs.offset;
+ struct stream kv_stream;
+ stream_open_buffer_read( &kv_stream, buffer, mat->props.kvs.size, 0 );
+ keyvalues_parse_stream( &kvs, root, &kv_stream );
+ }
+ mat->props.kv_root = root;
+ }
+ }
+
+ /* step2 ----------------------------- */
+ for( u32 i=0; i<ctx->model->material_count; i ++ )
+ {
+ struct mdl_material *mat = &ctx->model->materials[ i ];
+ u32 root = mat->props.kv_root;
+ union shader_props *props = &ctx->model->shader_props[ i ];
+
+ if( mat->shader == k_shader_standard ||
+ mat->shader == k_shader_standard_cutout ||
+ mat->shader == k_shader_foliage ||
+ mat->shader == k_shader_fxglow )
+ {
+ keyvalues_read_u32s( &kvs, root, "tex_diffuse", (u32[]){0}, &props->standard.tex_diffuse, 1 );
+ keyvalues_read_u32s( &kvs, root, "render_flags", (u32[]){0}, &props->standard.render_flags, 1 );
+ }
+ else if( mat->shader == k_shader_standard_vertex_blend )
+ {
+ keyvalues_read_u32s( &kvs, root, "tex_diffuse", (u32[]){0}, &props->vertex_blend.tex_diffuse, 1 );
+ keyvalues_read_f32s( &kvs, root, "blend_offset", (f32[]){ 0.5, 0.0 }, props->vertex_blend.blend_offset, 2 );
+ }
+ else if( mat->shader == k_shader_cubemap )
+ {
+ keyvalues_read_u32s( &kvs, root, "tex_diffuse", (u32[]){0}, &props->cubemapped.tex_diffuse, 1 );
+ keyvalues_read_u32s( &kvs, root, "cubemap_entity", (u32[]){0}, &props->cubemapped.cubemap_entity, 1 );
+ keyvalues_read_f32s( &kvs, root, "tint", (f32[]){1.0,1.0,1.0,1.0}, props->cubemapped.tint, 4 );
+ }
+ else if( mat->shader == k_shader_terrain_blend )
+ {
+ keyvalues_read_u32s( &kvs, root, "tex_diffuse", (u32[]){0}, &props->terrain.tex_diffuse, 1 );
+ keyvalues_read_f32s( &kvs, root, "sand_colour", (f32[]){ 0.79, 0.63, 0.48, 1.0 }, props->terrain.sand_colour, 4 );
+ keyvalues_read_f32s( &kvs, root, "blend_offset", (f32[]){ 0.5, 0.0 }, props->terrain.blend_offset, 2 );
+ }
+ else if( mat->shader == k_shader_water )
+ {
+ keyvalues_read_f32s( &kvs, root, "shore_colour", (f32[]){0.03,0.32,0.61,1.0}, props->water.shore_colour, 4 );
+ keyvalues_read_f32s( &kvs, root, "deep_colour", (f32[]){0.0,0.006,0.03,1.0}, props->water.deep_colour, 4 );
+ keyvalues_read_f32s( &kvs, root, "fog_scale", (f32[]){0.04}, &props->water.fog_scale, 1 );
+ keyvalues_read_f32s( &kvs, root, "fresnel", (f32[]){5.0}, &props->water.fresnel, 1 );
+ keyvalues_read_f32s( &kvs, root, "water_scale", (f32[]){ 0.008 }, &props->water.water_sale, 1 );
+ keyvalues_read_f32s( &kvs, root, "wave_speed", (f32[]){0.008,0.006,0.003,0.03}, props->water.wave_speed, 4 );
+ }
+ else if( mat->shader == k_shader_workshop )
+ {
+ const c8 *_shader_prop_workshop_keys[] =
+ {
+ [k_workshop_shader_part_truck1 ] = "truck1",
+ [k_workshop_shader_part_truck2 ] = "truck2",
+ [k_workshop_shader_part_wheel1 ] = "wheel1",
+ [k_workshop_shader_part_wheel2 ] = "wheel2",
+ [k_workshop_shader_part_wheel3 ] = "wheel3",
+ [k_workshop_shader_part_wheel4 ] = "wheel4",
+ [k_workshop_shader_part_edge ] = "edge",
+ [k_workshop_shader_part_griptape] = "griptape",
+ [k_workshop_shader_part_deck ] = "deck"
+ };
+
+ for( u32 j=0; j<k_workshop_shader_part_max; j ++ )
+ keyvalues_read_u32s( &kvs, root, _shader_prop_workshop_keys[j], (u32[]){0}, &props->workshop.tex_all[j], 1 );
+ }
+ }
+ }
+ _end_temporary_frame( temp_frame );
+}
+
+void vg_model_stream_metadata( struct vg_model_stream_context *ctx, struct stack_allocator *stack )
+{
+ struct array_file_ptr strings;
+ af_load_array( &ctx->af, &strings, "strings", stack, 1 );
+ ctx->model->packed_strings = strings.data;
+ ctx->model->flags |= VG_MODEL_CPU_METADATA;
+
+ struct array_file_meta *pack = af_find_array( &ctx->af, "pack" );
+ if( pack ) ctx->model->pack_base_offset = pack->file_offset;
+ else ctx->model->pack_base_offset = 0;
+
+ struct array_file_ptr ptr;
+ af_load_array( &ctx->af, &ptr, "mdl_mesh", stack, sizeof(struct mdl_mesh) );
+ ctx->model->meshes = ptr.data;
+ ctx->model->mesh_count = ptr.count;
+
+ af_load_array( &ctx->af, &ptr, "mdl_submesh", stack, sizeof(struct mdl_submesh) );
+ ctx->model->submeshes = ptr.data;
+ ctx->model->submesh_count = ptr.count;
+
+ af_load_array( &ctx->af, &ptr, "mdl_texture", stack, sizeof(union mdl_texture) );
+ ctx->model->textures = ptr.data;
+ ctx->model->texture_count = ptr.count;
+
+ af_load_array( &ctx->af, &ptr, "mdl_armature", stack, sizeof(struct mdl_armature) );
+ ctx->model->armatures = ptr.data;
+ ctx->model->armature_count = ptr.count;
+
+ af_load_array( &ctx->af, &ptr, "mdl_bone", stack, sizeof(struct mdl_bone) );
+ ctx->model->bones = ptr.data;
+ ctx->model->bone_count = ptr.count;
+
+ vg_model_stream_materials( ctx, stack );
+}
+
+void vg_model_stream_meshes_cpu( struct vg_model_stream_context *ctx, struct stack_allocator *stack )
+{
+ ASSERT_CRITICAL( ctx->model->flags & VG_MODEL_CPU_METADATA );
+ ctx->model->flags |= VG_MODEL_CPU_MESHES;
+
+ struct array_file_ptr ptr;
+ af_load_array( &ctx->af, &ptr, "mdl_vert", stack, sizeof(struct mdl_vert) );
+ ctx->model->verts = ptr.data;
+ ctx->model->vert_count = ptr.count;
+
+ af_load_array( &ctx->af, &ptr, "mdl_indice", stack, sizeof(u32) );
+ ctx->model->indices = ptr.data;
+ ctx->model->indice_count = ptr.count;
+}
+
+struct model_upload_task
+{
+ struct vg_model *model;
+ struct mdl_vert *vert_buffer;
+ u32 *indice_buffer;
+};
+
+static void vg_model_upload_task( struct task *task )
+{
+ struct model_upload_task *in_args = task_buffer(task);
+ ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_MAIN ) );
+
+ glGenVertexArrays( 1, &in_args->model->vao );
+ glBindVertexArray( in_args->model->vao );
+ glGenBuffers( 1, &in_args->model->vbo );
+ glGenBuffers( 1, &in_args->model->ebo );
+
+ u32 stride = sizeof(struct mdl_vert);
+ glBindBuffer( GL_ARRAY_BUFFER, in_args->model->vbo );
+ glBufferData( GL_ARRAY_BUFFER, in_args->model->vert_count*stride, in_args->vert_buffer, GL_STATIC_DRAW );
+
+ glBindVertexArray( in_args->model->vao );
+ glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, in_args->model->ebo );
+ glBufferData( GL_ELEMENT_ARRAY_BUFFER, in_args->model->indice_count*sizeof(u32),
+ in_args->indice_buffer, GL_STATIC_DRAW );
+
+ /* 0: coordinates */
+ glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, stride, (void*)0 );
+ glEnableVertexAttribArray( 0 );
+
+ /* 1: normal */
+ glVertexAttribPointer( 1, 3, GL_FLOAT, GL_FALSE, stride, (void *)offsetof(struct mdl_vert, norm) );
+ glEnableVertexAttribArray( 1 );
+
+ /* 2: uv */
+ glVertexAttribPointer( 2, 2, GL_FLOAT, GL_FALSE, stride, (void *)offsetof(struct mdl_vert, uv) );
+ glEnableVertexAttribArray( 2 );
+
+ /* 3: colour */
+ glVertexAttribPointer( 3, 4, GL_UNSIGNED_BYTE, GL_TRUE, stride, (void *)offsetof(struct mdl_vert, colour) );
+ glEnableVertexAttribArray( 3 );
+
+ /* 4: weights */
+ glVertexAttribPointer( 4, 4, GL_UNSIGNED_SHORT, GL_TRUE, stride, (void *)offsetof(struct mdl_vert, weights) );
+ glEnableVertexAttribArray( 4 );
+
+ /* 5: groups */
+ glVertexAttribIPointer( 5, 4, GL_UNSIGNED_BYTE, stride, (void *)offsetof(struct mdl_vert, groups) );
+ glEnableVertexAttribArray( 5 );
+}
+
+void vg_model_stream_meshes_gpu( struct vg_model_stream_context *ctx, u32 *fixup_table )
+{
+ ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_ASYNC ) );
+ ASSERT_CRITICAL( ctx->model->flags & VG_MODEL_CPU_METADATA );
+ ctx->model->flags |= VG_MODEL_GPU_MESHES;
+
+ /* NOTE: We could check here if we already have CPU meshes and use those buffers.
+ * In very rare instances (1 time) we use both paths. */
+
+ struct array_file_meta *arr_vertices = af_find_array( &ctx->af, "mdl_vert" );
+ struct array_file_meta *arr_indices = af_find_array( &ctx->af, "mdl_indice" );
+
+ if( arr_vertices && arr_indices )
+ {
+ u32 size_verts = PAD_TO_8(sizeof(struct mdl_vert)*arr_vertices->item_count),
+ size_indices = PAD_TO_8(sizeof(u32)*arr_indices->item_count),
+ size_hdr = PAD_TO_8(sizeof(struct model_upload_task)),
+ total = size_hdr + size_verts + size_indices;
+
+ struct task *upload_task = _task_new( k_thread_main, total, 0, "Model upload to GPU task" );
+ struct model_upload_task *args = task_buffer( upload_task );
+
+ args->model = ctx->model;
+ args->vert_buffer = ((void *)args) + size_hdr;
+ args->indice_buffer = ((void *)args) + size_hdr + size_verts;
+ ctx->model->vert_count = arr_vertices->item_count;
+ ctx->model->indice_count = arr_indices->item_count;
+
+ af_load_array_file_buffer( &ctx->af, arr_vertices, args->vert_buffer, sizeof(struct mdl_vert) );
+ af_load_array_file_buffer( &ctx->af, arr_indices, args->indice_buffer, sizeof(u32) );
+
+ if( fixup_table )
+ {
+ for( u32 i=0; i<ctx->model->vert_count; i ++ )
+ {
+ struct mdl_vert *vert = &args->vert_buffer[i];
+ for( u32 j=0; j<4; j++ )
+ vert->groups[j] = fixup_table[vert->groups[j]];
+ }
+ }
+
+ /*
+ * Unpack the indices (if there are meshes)
+ * ---------------------------------------------------------
+ */
+ if( ctx->model->submesh_count )
+ {
+ struct mdl_submesh *sm = &ctx->model->submeshes[ 0 ];
+ u32 offset = sm->vertex_count;
+
+ for( u32 i=1; i<ctx->model->submesh_count; i++ )
+ {
+ struct mdl_submesh *sm = &ctx->model->submeshes[ i ];
+ u32 *indices = args->indice_buffer + sm->indice_start;
+
+ for( u32 j=0; j<sm->indice_count; j++ )
+ indices[j] += offset;
+ offset += sm->vertex_count;
+ }
+ }
+
+ task_send( upload_task, vg_model_upload_task );
+ }
+ else
+ {
+ $log( $fatal, {"No vertex/indice data in model file"} );
+ _fatal_exit();
+ }
+}
+
+void vg_model_stream_textures_gpu( struct vg_model_stream_context *ctx )
+{
+ ASSERT_CRITICAL( ctx->model->flags & VG_MODEL_CPU_METADATA );
+ ctx->model->flags |= VG_MODEL_GPU_TEXTURES;
+ for( u32 i=0; i<ctx->model->texture_count; i ++ )
+ {
+ union mdl_texture *tex = &ctx->model->textures[ i ];
+ struct mdl_file pack_info = tex->file;
+ _vg_tex_load_stream( &tex->tex, vg_model_stream_pack_stream( ctx, &pack_info ),
+ VG_TEX_REPEAT | VG_TEX_NEAREST | VG_TEX_NOMIP );
+ }
+}
+
+bool vg_model_stream_open( struct vg_model_stream_context *ctx, struct vg_model *model, const c8 *path )
+{
+ zero_buffer( ctx, sizeof(struct vg_model_stream_context) );
+ zero_buffer( model, sizeof(struct vg_model) );
+ if( stream_open_file( &ctx->stream, path, k_stream_read ) )
+ {
+ ctx->model = model;
+ ctx->temp_frame = _start_temporary_frame();
+ if( !af_open_stream( &ctx->af, &ctx->stream, VG_MODEL_VERSION_MIN, VG_MODEL_VERSION_NR, _temporary_stack_allocator()) )
+ {
+ stream_close( &ctx->stream );
+ _end_temporary_frame( ctx->temp_frame );
+ return 0;
+ }
+ ctx->model->version = ctx->af.header.version;
+ return 1;
+ }
+ else return 0;
+}
+
+void vg_model_stream_close( struct vg_model_stream_context *ctx )
+{
+ stream_close( &ctx->stream );
+ _end_temporary_frame( ctx->temp_frame );
+}
+
+bool vg_model_load( struct vg_model *model, u32 model_flags, const c8 *path, struct stack_allocator *stack )
+{
+ bool success = 0;
+ model_flags |= VG_MODEL_CPU_METADATA;
+
+ struct vg_model_stream_context ctx;
+ if( vg_model_stream_open( &ctx, model, path ) )
+ {
+ if( model_flags & VG_MODEL_CPU_METADATA )
+ vg_model_stream_metadata( &ctx, stack );
+
+ if( model_flags & VG_MODEL_CPU_MESHES )
+ vg_model_stream_meshes_cpu( &ctx, stack );
+
+ ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_ASYNC ) );
+ if( model_flags & VG_MODEL_GPU_MESHES )
+ vg_model_stream_meshes_gpu( &ctx, NULL );
+
+ if( model_flags & VG_MODEL_GPU_TEXTURES )
+ vg_model_stream_textures_gpu( &ctx );
+
+ vg_model_stream_close( &ctx );
+ success = 1;
+ }
+ return success;
+}
+
+void vg_model_unload_gpu( struct vg_model *model )
+{
+ if( model->flags & VG_MODEL_GPU_MESHES )
+ {
+ model->flags &= ~(u32)(VG_MODEL_GPU_MESHES);
+ glDeleteVertexArrays( 1, &model->vao );
+ glDeleteBuffers( 1, &model->ebo );
+ glDeleteBuffers( 1, &model->vbo );
+ }
+
+ if( model->flags & VG_MODEL_GPU_TEXTURES )
+ for( u32 i=0; i<model->texture_count; i ++ )
+ vg_tex_delete( &model->textures[i].tex );
+}
+
+void mdl_transform_m4x3( struct mdl_transform *transform, f32 mtx[4][3] )
+{
+ q_m3x3( transform->q, mtx );
+ v3_muls( mtx[0], transform->s[0], mtx[0] );
+ v3_muls( mtx[1], transform->s[1], mtx[1] );
+ v3_muls( mtx[2], transform->s[2], mtx[2] );
+ v3_copy( transform->co, mtx[3] );
+}
+
+void mdl_transform_identity( struct mdl_transform *transform )
+{
+ v3_fill( transform->co, 0 );
+ q_identity( transform->q );
+ v3_fill( transform->s, 1.0f );
+}
+
+void mdl_transform_vector( struct mdl_transform *transform, f32 vec[3], f32 dest[3] )
+{
+ v3_mul( transform->s, vec, dest );
+ q_mulv( transform->q, dest, dest );
+}
+
+void mdl_transform_point( struct mdl_transform *transform, f32 co[3], f32 dest[3] )
+{
+ mdl_transform_vector( transform, co, dest );
+ v3_add( transform->co, dest, dest );
+}
+
+void mdl_transform_mul( struct mdl_transform *a, struct mdl_transform *b, struct mdl_transform *d )
+{
+ mdl_transform_point( a, b->co, d->co );
+ q_mul( a->q, b->q, d->q );
+ q_normalize( d->q );
+ v3_mul( a->s, b->s, d->s );
+}
+
+void vg_model_bind_mesh( struct vg_model *model )
+{
+ glBindVertexArray( model->vao );
+}
+
+void vg_model_draw_elements( u32 start, u32 count )
+{
+ glDrawElements( GL_TRIANGLES, count, GL_UNSIGNED_INT, (void *)(start*sizeof(u32)) );
+}
+
+void vg_model_draw_submesh( struct mdl_submesh *sm )
+{
+ vg_model_draw_elements( sm->indice_start, sm->indice_count );
+}
+
+i32 vg_model_get_mesh_index( struct vg_model *model, const c8 *name )
+{
+ u32 hash = buffer_djb2( name, 0 );
+ for( u32 i=0; i<model->mesh_count; i++ )
+ {
+ struct mdl_mesh *mesh = &model->meshes[ i ];
+ if( af_str_eq( model->packed_strings, mesh->pstr_name, name, hash ) )
+ return i;
+ }
+ return -1;
+}
+
+i32 vg_model_get_submesh_index( struct vg_model *model, const c8 *mesh_name )
+{
+ i32 mesh_index = vg_model_get_mesh_index( model, mesh_name );
+ if( mesh_index == -1 )
+ return -1;
+
+ struct mdl_mesh *mesh = &model->meshes[ mesh_index ];
+ if( !mesh->submesh_count )
+ return -1;
+ return mesh->submesh_start;
+}
--- /dev/null
+#pragma once
+#include "vg_tex.h"
+#include "shader_props.h"
+#include "array_file.h"
+
+#define VG_MODEL_VERSION_MIN 110
+#define VG_MODEL_VERSION_NR 110
+
+enum mdl_shader
+{
+ k_shader_standard = 0,
+ k_shader_standard_cutout = 1,
+ k_shader_terrain_blend = 2,
+ k_shader_standard_vertex_blend = 3,
+ k_shader_water = 4,
+ k_shader_invisible = 5,
+ k_shader_boundary = 6,
+ k_shader_fxglow = 7,
+ k_shader_cubemap = 8,
+ k_shader_walking = 9,
+ k_shader_foliage = 10,
+ k_shader_workshop = 11,
+ k_shader_override = 30000
+};
+
+enum mdl_surface_prop
+{
+ k_surface_prop_concrete = 0,
+ k_surface_prop_wood = 1,
+ k_surface_prop_grass = 2,
+ k_surface_prop_tiles = 3,
+ k_surface_prop_metal = 4,
+ k_surface_prop_snow = 5,
+ k_surface_prop_sand = 6
+};
+
+enum material_flag
+{
+ k_material_flag_skate_target = 0x0001,
+ k_material_flag_collision = 0x0002,
+ k_material_flag_grow_grass = 0x0004,
+ k_material_flag_grindable = 0x0008,
+ k_material_flag_invisible = 0x0010,
+ k_material_flag_boundary = 0x0020,
+ k_material_flag_preview_visibile = 0x0040,
+ k_material_flag_walking = 0x0080,
+
+ k_material_flag_ghosts =
+ k_material_flag_boundary|
+ k_material_flag_invisible|
+ k_material_flag_walking
+};
+
+#pragma pack(push,1)
+
+/* 48 byte */
+struct mdl_vert
+{
+ f32 co[3], /* 3*32 */
+ norm[3]; /* 3*32 */
+ f32 uv[2]; /* 2*32 */
+
+ u8 colour[4]; /* 4*8 */
+ u16 weights[4];/* 4*16 */
+ u8 groups[4]; /* 4*8 */
+};
+
+#pragma pack(pop)
+
+struct mdl_transform
+{
+ f32 co[3], s[3];
+ f32 q[4];
+};
+
+void mdl_transform_identity( struct mdl_transform *transform );
+void mdl_transform_vector( struct mdl_transform *transform, f32 vec[3], f32 dest[3] );
+void mdl_transform_point( struct mdl_transform *transform, f32 co[3], f32 dest[3] );
+void mdl_transform_mul( struct mdl_transform *a, struct mdl_transform *b, struct mdl_transform *d );
+
+#if (VG_MODEL_VERSION_MIN <= 105)
+struct mdl_material_v101
+{
+ u32 pstr_name,
+ shader,
+ flags,
+ surface_prop;
+
+ f32 colour[4],
+ colour1[4];
+
+ u32 tex_diffuse, /* Indexes start from 1. 0 if missing. */
+ tex_none0,
+ tex_none1;
+};
+#endif
+
+struct mdl_material
+{
+ u32 pstr_name,
+ shader,
+ flags,
+ surface_prop;
+
+ union
+ {
+ struct
+ {
+ u32 offset, size; /* indexes shader data */
+ }
+ kvs;
+ u32 kv_root; /* runtime */
+ }
+ props;
+};
+
+struct mdl_bone
+{
+ f32 co[3], end[3];
+ u32 parent,
+ collider,
+ ik_target,
+ ik_pole,
+ flags,
+ pstr_name;
+
+ f32 hitbox[2][3];
+ f32 conevx[3], conevy[3], coneva[3];
+ f32 conet;
+};
+
+enum bone_flag
+{
+ k_bone_flag_deform = 0x00000001,
+ k_bone_flag_ik = 0x00000002,
+ k_bone_flag_cone_constraint = 0x00000004
+};
+
+enum bone_collider
+{
+ k_bone_collider_none = 0,
+ k_bone_collider_box = 1,
+ k_bone_collider_capsule = 2
+};
+
+struct mdl_armature
+{
+ struct mdl_transform transform;
+ u32 bone_start,
+ bone_count,
+ anim_start_OBSOLETE_107, // obsolete 107+
+ anim_count_OBSOLETE_107, // .
+ pstr_name; // v107+
+};
+
+struct mdl_submesh
+{
+ u32 indice_start,
+ indice_count,
+ vertex_start,
+ vertex_count;
+
+ f32 bbx[2][3];
+ u16 material_id, flags;
+};
+
+enum esubmesh_flags
+{
+ k_submesh_flag_none = 0x0000,
+ k_submesh_flag_consumed = 0x0001
+};
+
+struct mdl_mesh
+{
+ struct mdl_transform transform;
+ u32 submesh_start,
+ submesh_count,
+ pstr_name,
+ entity_id, /* upper 16 bits: type, lower 16 bits: index. hgn: 11.06.2025 Is this still used??? */
+ armature_id;
+};
+
+struct mdl_file
+{
+ u32 pstr_path,
+ pack_offset,
+ pack_size;
+};
+
+union mdl_texture
+{
+ struct mdl_file file;
+ struct vg_tex tex;
+};
+
+struct vg_model
+{
+ u32 version;
+ u32 flags;
+
+ /* VG_MODEL_CPU_METADATA ---------------------- */
+ const void *packed_strings;
+ union shader_props *shader_props;
+
+ struct mdl_mesh *meshes;
+ u32 mesh_count;
+
+ struct mdl_submesh *submeshes;
+ u32 submesh_count;
+
+ struct mdl_material *materials;
+ u32 material_count;
+
+ union mdl_texture *textures;
+ u32 texture_count;
+
+ struct mdl_armature *armatures;
+ u32 armature_count;
+
+ struct mdl_bone *bones;
+ u32 bone_count;
+
+ /* VG_MODEL_CPU_MESHES ---------------------- */
+ struct mdl_vert *verts;
+ u32 vert_count;
+ u32 *indices;
+ u32 indice_count;
+
+ u32 pack_base_offset;
+
+ /* VG_MODEL_GPU_MESHES ----------------------- */
+ GLuint vao, vbo, ebo;
+};
+
+#define VG_MODEL_CPU_METADATA 0x4
+#define VG_MODEL_CPU_MESHES 0x8
+# define VG_MODEL_GPU_TEXTURES 0x1
+# define VG_MODEL_GPU_MESHES 0x2
+# define VG_MODEL_ENGINE_STANDARD (VG_MODEL_CPU_METADATA|VG_MODEL_GPU_TEXTURES|VG_MODEL_GPU_MESHES)
+# define VG_MODEL_ENGINE_PROCEDURAL_SOURCE (VG_MODEL_CPU_METADATA|VG_MODEL_GPU_TEXTURES|VG_MODEL_CPU_MESHES)
+
+bool vg_model_load( struct vg_model *model, u32 model_flags, const c8 *path, struct stack_allocator *stack );
+void vg_model_unload_gpu( struct vg_model *model );
+
+/* Sub functions for each part of the load process
+ * --------------------------------------------------------------------------------------------------------------- */
+struct vg_model_stream_context
+{
+ struct stream stream;
+ struct array_file_context af;
+ struct vg_model *model;
+ u32 temp_frame;
+};
+
+bool vg_model_stream_open( struct vg_model_stream_context *ctx, struct vg_model *model, const c8 *path );
+void vg_model_stream_close( struct vg_model_stream_context *ctx );
+
+/* Parts which you might want (currently they all require metadata to be explicitly loaded) */
+void vg_model_stream_metadata( struct vg_model_stream_context *ctx, struct stack_allocator *stack );
+void vg_model_stream_meshes_cpu( struct vg_model_stream_context *ctx, struct stack_allocator *stack );
+
+void vg_model_stream_meshes_gpu( struct vg_model_stream_context *ctx, u32 *fixup_table );
+void vg_model_stream_textures_gpu( struct vg_model_stream_context *ctx );
+
+struct stream *vg_model_stream_pack_stream( struct vg_model_stream_context *ctx, struct mdl_file *file );
+
+/* Rendering operations
+ * ----------------------------------------------------------------------------------------------------------------- */
+void vg_model_bind_mesh( struct vg_model *model );
+void vg_model_draw_elements( u32 start, u32 count );
+void vg_model_draw_submesh( struct mdl_submesh *sm );
+i32 vg_model_get_mesh_index( struct vg_model *model, const c8 *name );
+i32 vg_model_get_submesh_index( struct vg_model *model, const c8 *mesh_name );
+void mdl_transform_m4x3( struct mdl_transform *transform, f32 mtx[4][3] );
--- /dev/null
+add model.c
+add metascene.c
+add array_file.c
+include ""
--- /dev/null
+#pragma once
+
+enum material_render_flag
+{
+ k_material_render_additive = 0x20
+};
+
+enum workshop_shader_part
+{
+ k_workshop_shader_part_truck1,
+ k_workshop_shader_part_truck2,
+ k_workshop_shader_part_wheel1,
+ k_workshop_shader_part_wheel2,
+ k_workshop_shader_part_wheel3,
+ k_workshop_shader_part_wheel4,
+ k_workshop_shader_part_edge,
+ k_workshop_shader_part_griptape,
+ k_workshop_shader_part_deck,
+ k_workshop_shader_part_max
+};
+
+union shader_props
+{
+ struct shader_props_standard
+ {
+ u32 tex_diffuse;
+ u32 render_flags;
+ }
+ standard;
+
+ struct shader_props_terrain
+ {
+ u32 tex_diffuse;
+ f32 blend_offset[2];
+ f32 sand_colour[4];
+ }
+ terrain;
+
+ struct shader_props_vertex_blend
+ {
+ u32 tex_diffuse;
+ f32 blend_offset[2];
+ }
+ vertex_blend;
+
+ struct shader_props_water
+ {
+ f32 shore_colour[4];
+ f32 deep_colour[4];
+ f32 fog_scale;
+ f32 fresnel;
+ f32 water_sale;
+ f32 wave_speed[4];
+ }
+ water;
+
+ struct shader_props_cubemapped
+ {
+ u32 tex_diffuse;
+ u32 cubemap_entity;
+ f32 tint[4];
+ }
+ cubemapped;
+
+ struct shader_props_workshop
+ {
+ u32 tex_all[k_workshop_shader_part_max];
+ }
+ workshop;
+};
+
+extern const char *_shader_prop_workshop_keys[k_workshop_shader_part_max];
--- /dev/null
+#include "common_api.h"
+#include "common_thread_api.h"
+#include "vg_tex.h"
+
+#define QOI_OP_INDEX 0x00 /* 00xxxxxx */
+#define QOI_OP_DIFF 0x40 /* 01xxxxxx */
+#define QOI_OP_LUMA 0x80 /* 10xxxxxx */
+#define QOI_OP_RUN 0xc0 /* 11xxxxxx */
+#define QOI_OP_RGB 0xfe /* 11111110 */
+#define QOI_OP_RGBA 0xff /* 11111111 */
+#define QOI_MASK_2 0xc0 /* 11000000 */
+
+#define QOI_COLOR_HASH(C) (C.rgba[0]*3 + C.rgba[1]*5 + C.rgba[2]*7 + C.rgba[3]*11)
+#define QOI_MAGIC \
+ (((u32)'q') | ((u32)'o') << 8 | \
+ ((u32)'i') << 16 | ((u32)'f') << 24)
+
+static const u8 qoi_padding[8] = {0,0,0,0,0,0,0,1};
+
+#define cpu_to_big32 big32_to_cpu
+static inline u32 big32_to_cpu( u32 x )
+{
+ return ((x & 0xFF) << 24) | ((x & 0xFF00) << 8) | ((x & 0xFF0000) >> 8) | (x >> 24);
+}
+
+bool vg_qoi_validate( const struct qoi_desc *desc )
+{
+ if( (desc->width == 0) || (desc->height == 0) ||
+ (desc->width >= 2048) || (desc->height >= 2048) )
+ {
+ $log( $error, {"QOI file is invalid; Unpermitted size: "}, $unsigned( desc->width ), {" by "}, $unsigned( desc->height ) );
+ return 0;
+ }
+
+ if( !(desc->channels == 3 || desc->channels == 4) )
+ {
+ $log( $error, {"QOI file is invalid; Only 3 or 4 channels allowed, file has: "}, $unsigned( desc->channels ) );
+ return 0;
+ }
+ return 1;
+}
+
+/* Initalize stream context and return the number of bytes required to store the final RGB/A image data. */
+u32 vg_qoi_stream_init( struct qoi_desc *desc, struct stream *stream )
+{
+ stream_read( stream, desc, sizeof(struct qoi_desc) );
+ if( desc->magic != QOI_MAGIC )
+ {
+ $log( $error, {"QOI file is invalid; Magic Number incorrect."} );
+ return 0;
+ }
+
+ desc->width = big32_to_cpu( desc->width );
+ desc->height = big32_to_cpu( desc->height );
+
+ if( vg_qoi_validate( desc ) )
+ return desc->width * desc->height * desc->channels;
+ else return 0;
+}
+
+void vg_qoi_stream_decode( struct qoi_desc *desc, struct stream *stream, u8 *pixels, bool v_flip )
+{
+ union qoi_rgba_t index[64], px;
+ zero_buffer( index, sizeof(union qoi_rgba_t)*64 );
+ zero_buffer( &px, sizeof(union qoi_rgba_t) );
+ px.rgba[3] = 255;
+
+ u32 run=0;
+ for( u32 y=0; y<desc->height; y ++ )
+ {
+ for( u32 x=0; x<desc->width; x ++ )
+ {
+ if( run > 0 )
+ run --;
+ else
+ {
+ u8 b1;
+ stream_read( stream, &b1, 1 );
+
+ if( b1 == QOI_OP_RGB )
+ stream_read( stream, px.rgba, 3 );
+ else if( b1 == QOI_OP_RGBA )
+ stream_read( stream, px.rgba, 4 );
+ else if( (b1 & QOI_MASK_2) == QOI_OP_INDEX )
+ px = index[b1];
+ else if( (b1 & QOI_MASK_2) == QOI_OP_DIFF )
+ {
+ px.rgba[0] += (i32)((b1 >> 4) & 0x03) - 2;
+ px.rgba[1] += (i32)((b1 >> 2) & 0x03) - 2;
+ px.rgba[2] += (i32)( b1 & 0x03) - 2;
+ }
+ else if( (b1 & QOI_MASK_2) == QOI_OP_LUMA )
+ {
+ u8 b2;
+ stream_read( stream, &b2, 1 );
+ i32 vg = (i32)(b1 & 0x3f) - 32;
+ px.rgba[0] += vg - 8 + (i32)((b2 >> 4) & 0x0f);
+ px.rgba[1] += vg;
+ px.rgba[2] += vg - 8 + (i32)(b2 & 0x0f);
+ }
+ else if( (b1 & QOI_MASK_2) == QOI_OP_RUN )
+ run = (b1 & 0x3f);
+ index[ QOI_COLOR_HASH(px) % 64 ] = px;
+ }
+
+ u32 row = v_flip? desc->height-(y+1): y;
+ for( u32 i=0; i < desc->channels; i ++ )
+ pixels[ ((row*desc->width) + x)*desc->channels + i ] = px.rgba[i];
+ }
+ }
+}
+
+u32 vg_query_qoi_max_compressed_size( const struct qoi_desc *desc )
+{
+ return desc->width * desc->height * (desc->channels + 1) + sizeof(struct qoi_desc) + sizeof(qoi_padding);
+}
+
+u32 vg_qoi_stream_encode( const struct qoi_desc *desc, const u8 *pixels, struct stream *stream, bool v_flip )
+{
+ if( !vg_qoi_validate( desc ) )
+ return 0;
+
+ struct qoi_desc file_header = *desc;
+ file_header.magic = QOI_MAGIC;
+ file_header.width = cpu_to_big32( file_header.width );
+ file_header.height = cpu_to_big32( file_header.height );
+ stream_write( stream, &file_header, sizeof(struct qoi_desc) );
+
+ union qoi_rgba_t index[64];
+ zero_buffer( index, sizeof(union qoi_rgba_t)*64 );
+ union qoi_rgba_t px_prev = { .rgba = { 0, 0, 0, 255 } };
+ union qoi_rgba_t px = px_prev;
+
+ u32 run = 0;
+ for( u32 y=0; y<desc->height; y ++ )
+ {
+ for( u32 x=0; x<desc->width; x ++ )
+ {
+ u32 row = v_flip? desc->height-(y+1): y;
+ for( u32 i=0; i < desc->channels; i ++ )
+ px.rgba[i] = pixels[ ((row*desc->width) + x)*desc->channels + i ];
+
+ if( px.v == px_prev.v )
+ {
+ run ++;
+ if( run == 62 || ((y+1 == desc->height) && (x+1 == desc->width)) )
+ {
+ u8 b1 = QOI_OP_RUN | (run - 1);
+ stream_write( stream, &b1, 1 );
+ run = 0;
+ }
+ }
+ else
+ {
+ if( run > 0 )
+ {
+ u8 b1 = QOI_OP_RUN | (run - 1);
+ stream_write( stream, &b1, 1 );
+ run = 0;
+ }
+
+ u32 index_pos = QOI_COLOR_HASH( px ) % 64;
+ if( index[ index_pos ].v == px.v )
+ {
+ u8 b1 = QOI_OP_INDEX | index_pos;
+ stream_write( stream, &b1, 1 );
+ }
+ else
+ {
+ index[ index_pos ] = px;
+ if( px.rgba[3] == px_prev.rgba[3] )
+ {
+ i8 vr = px.rgba[0] - px_prev.rgba[0],
+ vg = px.rgba[1] - px_prev.rgba[1],
+ vb = px.rgba[2] - px_prev.rgba[2],
+ vg_r = vr - vg,
+ vg_b = vb - vg;
+
+ if( vr > -3 && vr < 2 &&
+ vg > -3 && vg < 2 &&
+ vb > -3 && vb < 2 )
+ {
+ stream_write( stream, (u8[]){ QOI_OP_DIFF | (vr + 2) << 4 | (vg + 2) << 2 | (vb + 2) }, 1 );
+ }
+ else if( vg_r > -9 && vg_r < 8 &&
+ vg > -33 && vg < 32 &&
+ vg_b > -9 && vg_b < 8 )
+ {
+ stream_write( stream, (u8[]){ QOI_OP_LUMA | (vg + 32), (vg_r + 8) << 4 | (vg_b + 8) }, 2 );
+ }
+ else
+ {
+ stream_write( stream, (u8[]){ QOI_OP_RGB }, 1 );
+ stream_write( stream, px.rgba, 3 );
+ }
+ }
+ else
+ {
+ stream_write( stream, (u8 []){ QOI_OP_RGBA }, 1 );
+ stream_write( stream, px.rgba, 4 );
+ }
+ }
+ }
+ px_prev = px;
+ }
+ }
+ stream_write( stream, qoi_padding, sizeof(qoi_padding) );
+ return 1;
+}
+
+/* VG_PART
+ * ------------------------------------------------------------------------------------------------------------------ */
+
+struct
+{
+ GLuint error2d, errorcube;
+}
+static _vg_tex;
+
+void _vg_tex_init(void)
+{
+ ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_OPENGL ) );
+ $log( $info, {"[INIT] _vg_tex_init"} );
+
+ static u8 const_vg_tex2d_err[] =
+ {
+ 0xff,0x00,0xff,0xff, 0x00,0x00,0x00,0xff, 0xff,0x00,0xff,0xff, 0x00,0x00,0x00,0xff,
+ 0x00,0x00,0x00,0xff, 0xff,0x00,0xff,0xff, 0x00,0x00,0x00,0xff, 0xff,0x00,0xff,0xff,
+ 0xff,0x00,0xff,0xff, 0x00,0x00,0x00,0xff, 0xff,0x00,0xff,0xff, 0x00,0x00,0x00,0xff,
+ 0x00,0x00,0x00,0xff, 0xff,0x00,0xff,0xff, 0x00,0x00,0x00,0xff, 0xff,0x00,0xff,0xff,
+ };
+
+ glGenTextures( 1, &_vg_tex.error2d );
+ glBindTexture( GL_TEXTURE_2D, _vg_tex.error2d );
+ glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, 4, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, const_vg_tex2d_err );
+ glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
+ glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
+ glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
+ glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
+
+ glGenTextures( 1, &_vg_tex.errorcube );
+ glBindTexture( GL_TEXTURE_CUBE_MAP, _vg_tex.errorcube );
+ glTexParameteri( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
+ glTexParameteri( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
+ glTexParameteri( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
+ glTexParameteri( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
+ glTexParameteri( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE );
+
+ for( u32 j=0; j<6; j ++ )
+ {
+ glTexImage2D( GL_TEXTURE_CUBE_MAP_POSITIVE_X + j, 0, GL_RGBA, 4, 4,
+ 0, GL_RGBA, GL_UNSIGNED_BYTE, const_vg_tex2d_err );
+ }
+}
+
+struct tex_upload_task
+{
+ struct vg_tex *tex;
+ u32 width, height, channels, flags;
+ u8 image_buffer[];
+};
+
+static void _vg_tex_upload( struct task *task )
+{
+ struct tex_upload_task *in_args = task_buffer( task );
+ ASSERT_CRITICAL( _vg_tex.errorcube && _vg_tex.error2d );
+ u32 flags = in_args->flags;
+ struct vg_tex *tex = in_args->tex;
+
+ if( flags & VG_TEX_ERROR )
+ {
+ tex->name = (flags & VG_TEX_CUBEMAP)? _vg_tex.errorcube: _vg_tex.error2d;
+ tex->flags = (flags & VG_TEX_CUBEMAP) | VG_TEX_ERROR | VG_TEX_COMPLETE;
+ }
+ else
+ {
+ u32 pixel_format = 0;
+ if( in_args->channels == 3 ) pixel_format = GL_RGB;
+ else if( in_args->channels == 4 ) pixel_format = GL_RGBA;
+ else
+ {
+ $log( $fatal, {"Can't upload texture with "}, $unsigned( in_args->channels ), {" channels."} );
+ _fatal_exit();
+ }
+
+ glGenTextures( 1, &tex->name );
+ u32 filter_min = 0,
+ filter_mag = 0;
+ if( flags & VG_TEX_LINEAR )
+ {
+ if( flags & VG_TEX_NOMIP ) filter_min = GL_LINEAR;
+ else filter_min = GL_LINEAR_MIPMAP_LINEAR;
+ filter_mag = GL_LINEAR;
+ }
+ else
+ {
+ ASSERT_CRITICAL( flags & VG_TEX_NEAREST );
+ filter_min = GL_NEAREST;
+ filter_mag = GL_NEAREST;
+ }
+
+ if( flags & VG_TEX_CUBEMAP )
+ {
+ u32 w = in_args->width,
+ h = in_args->height/6;
+
+ glBindTexture( GL_TEXTURE_CUBE_MAP, tex->name );
+ glTexParameteri( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, filter_min );
+ glTexParameteri( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, filter_mag );
+ glTexParameteri( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE ); /* can this be anything else? */
+ glTexParameteri( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
+ glTexParameteri( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE );
+
+ for( u32 j=0; j<6; j ++ )
+ {
+ u32 offset = w*h*j*in_args->channels;
+ glTexImage2D( GL_TEXTURE_CUBE_MAP_POSITIVE_X + j, 0, pixel_format,
+ w, h,
+ 0, pixel_format, GL_UNSIGNED_BYTE, in_args->image_buffer + offset );
+ }
+
+ if( !(flags & VG_TEX_NOMIP) )
+ glGenerateMipmap( GL_TEXTURE_CUBE_MAP );
+ }
+ else
+ {
+ glBindTexture( GL_TEXTURE_2D, tex->name );
+ glTexImage2D( GL_TEXTURE_2D, 0, pixel_format, in_args->width, in_args->height,
+ 0, pixel_format, GL_UNSIGNED_BYTE, in_args->image_buffer );
+
+ glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, filter_min );
+ glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, filter_mag );
+
+ u32 wrap_s = 0,
+ wrap_t = 0;
+
+ if( flags & VG_TEX_CLAMP )
+ {
+ wrap_s = GL_CLAMP_TO_EDGE;
+ wrap_t = GL_CLAMP_TO_EDGE;
+ }
+ else
+ {
+ ASSERT_CRITICAL( flags & VG_TEX_REPEAT );
+ wrap_s = GL_REPEAT;
+ wrap_t = GL_REPEAT;
+ }
+
+ glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, wrap_s );
+ glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, wrap_t );
+
+ if( !(flags & VG_TEX_NOMIP) )
+ glGenerateMipmap( GL_TEXTURE_2D );
+ }
+ tex->flags = flags | VG_TEX_COMPLETE;
+ }
+}
+
+bool _vg_tex_load_stream( struct vg_tex *out_tex, struct stream *in_stream, u32 flags )
+{
+ ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_ASYNC ) );
+
+ struct qoi_desc qoi;
+ u32 size = vg_qoi_stream_init( &qoi, in_stream );
+ if( size )
+ {
+ _async_push_groups( ASYNC_GROUP_OPENGL, 0 );
+
+ struct task *upload_task = _task_new( k_thread_main, sizeof( struct tex_upload_task ) + size, 0, "Texture upload task" );
+ struct tex_upload_task *args = task_buffer( upload_task );
+ vg_qoi_stream_decode( &qoi, in_stream, args->image_buffer, flags & VG_TEX_FLIP_V? 1: 0 );
+ args->tex = out_tex;
+ args->width = qoi.width;
+ args->height = qoi.height;
+ args->channels = qoi.channels;
+ args->flags = flags;
+ task_send( upload_task, _vg_tex_upload );
+ _async_pop_groups();
+ return 1;
+ }
+ else
+ return 0;
+}
+
+void _vg_tex_load( struct vg_tex *out_tex, const c8 *path, u32 flags )
+{
+ ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_ASYNC ) );
+
+ bool error = 0;
+ struct stream file;
+ if( stream_open_file( &file, path, k_stream_read ) )
+ {
+ if( !_vg_tex_load_stream( out_tex, &file, flags ) )
+ error = 1;
+ stream_close( &file );
+ }
+ else
+ error = 1;
+
+ if( error )
+ {
+ _async_push_groups( ASYNC_GROUP_OPENGL, 0 );
+ struct task *upload_task = _task_new( k_thread_main, sizeof( struct tex_upload_task ), 0, "Texture upload task" );
+ struct tex_upload_task *args = task_buffer( upload_task );
+ args->tex = out_tex;
+ args->width = 0;
+ args->height = 0;
+ args->channels = 0;
+ args->flags = VG_TEX_ERROR;
+ task_send( upload_task, _vg_tex_upload );
+ _async_pop_groups();
+ }
+}
+
+u32 vg_tex_name( GLuint target, struct vg_tex *tex )
+{
+ if( !tex )
+ {
+ return (target == GL_TEXTURE_2D)? _vg_tex.error2d: _vg_tex.errorcube;
+ }
+ if( tex->flags & VG_TEX_COMPLETE ) return tex->name;
+ else return (target == GL_TEXTURE_2D)? _vg_tex.error2d: _vg_tex.errorcube;
+}
+
+void vg_tex_bind( GLuint target, struct vg_tex *tex, u32 slot )
+{
+ glActiveTexture( GL_TEXTURE0 + slot );
+ glBindTexture( target, vg_tex_name( target, tex ) );
+}
+
+void vg_tex_delete( struct vg_tex *tex )
+{
+ ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_OPENGL ) );
+ ASSERT_CRITICAL( tex->flags & VG_TEX_COMPLETE );
+ if( !(tex->flags & VG_TEX_ERROR) )
+ glDeleteTextures( 1, &tex->name );
+ zero_buffer( tex, sizeof(struct vg_tex) );
+}
--- /dev/null
+#pragma once
+#include "opengl.h"
+
+/* TODO: Include Okaypeg alongside QOI. */
+
+struct vg_tex
+{
+ u32 name;
+ u32 flags;
+};
+
+#pragma pack(push,1)
+union qoi_rgba_t
+{
+ u8 rgba[4];
+ u32 v;
+};
+
+struct qoi_desc
+{
+ u32 magic;
+ u32 width;
+ u32 height;
+ u8 channels;
+ u8 colorspace;
+};
+#pragma pack(pop)
+
+# define QOI_SRGB 0
+# define QOI_LINEAR 1
+
+/* Reading qois */
+u32 vg_qoi_stream_init( struct qoi_desc *desc, struct stream *file );
+void vg_qoi_stream_decode( struct qoi_desc *desc, struct stream *file, u8 *dest_buffer, bool v_flip );
+
+/* Writing qois */
+u32 vg_query_qoi_max_compressed_size( const struct qoi_desc *desc );
+u32 vg_qoi_stream_encode( const struct qoi_desc *desc, const u8 *pixels, struct stream *stream, bool v_flip );
+
+# define VG_TEX_LINEAR 0x1
+# define VG_TEX_NEAREST 0x2
+# define VG_TEX_REPEAT 0x4
+# define VG_TEX_CLAMP 0x8
+# define VG_TEX_NOMIP 0x10
+# define VG_TEX_CUBEMAP 0x20
+# define VG_TEX_ERROR 0x40
+# define VG_TEX_COMPLETE 0x80
+# define VG_TEX_FLIP_V 0x100
+# define VG_TEX_FRAMEBUFFER_ATTACHMENT 0x200
+# define VG_TEX_PRIVATE 0x400 /* used on renderbuffers... */
+
+u32 vg_tex_name( GLuint target, struct vg_tex *tex );
+void vg_tex_bind( GLuint target, struct vg_tex *tex, u32 slot );
+void vg_tex_delete( struct vg_tex *tex );
+void _vg_tex_load( struct vg_tex *out_tex, const c8 *path, u32 flags );
+bool _vg_tex_load_stream( struct vg_tex *out_tex, struct stream *in_stream, u32 flags );
--- /dev/null
+add vg_tex.c
+include ""
+
+hook
+{
+ event START
+ function _vg_tex_init
+}