mkdir -p bin/vg.metacompiler-linux-x86_64-zig-cc/ &&
taskset -c 0-1 zig cc -I. -I./include/ -fsanitize=address -lasan -Wall -Wno-unused-function -O0 -ggdb \
- -std=c99 -D_DEFAULT_SOURCE \
+ -std=c11 -D_DEFAULT_SOURCE \
-include "common_api.h" \
source/terminal/main.c \
source/foundation/options.c \
layer_mask console
}
+ thread
+ {
+ name main
+ queue_size_m 40
+ }
+ thread
+ {
+ name async
+ queue_size_m 40
+ }
+ thread
+ {
+ name audio
+ }
+ thread
+ {
+ name exitor
+ queue_size_m 1
+ }
+
append graphics.kv
append glfw3.kv
#define $error 0x10
#define $fatal 0x20
#define $shell 0x40
+#define $raw 0x80
/* One day we will replace this shit with a good pre-processor. */
#define $TO_STRING( S ) $TO_STRING_1( S )
#define $TO_STRING_1( S ) #S
#define $line __FILE__ ":" $TO_STRING(__LINE__)
-#define $log( C, ... ) $v_string( _log_event( C, $line ), __VA_ARGS__, {"\n"} )
+#define $log( C, ... ) $v_string( _log_event( C, $line ), __VA_ARGS__ ); _log_end_event();
struct stream *_log_event( u32 type, const c8 *code_location );
+void _log_init(void);
+void _log_end_event(void);
void _log_set_journal_path( const c8 *path );
-struct stream *_get_console_stream();
+void _log_add_listener( void (*fn)(const c8 *line, u32 length, u32 type), u32 filter );
/* Keyvalues
* ------------------------------------------------------------------------------------------------------------------ */
-struct semaphore;
-struct mutex;
-struct task_queue;
-
-struct semaphore *semaphore_new( struct stack_allocator *stack, u32 initial_value );
-void semaphore_delete( struct semaphore *s );
-void semaphore_post( struct semaphore *s );
-void semaphore_wait( struct semaphore *s );
-
-struct mutex *mutex_new( struct stack_allocator *stack );
-void mutex_delete( struct mutex *m );
-void mutex_lock( struct mutex *m );
-void mutex_unlock( struct mutex *m );
-
-bool _thread_has_flags( u32 flags );
-void _thread_set_flags( u32 flags );
-const c8 *_thread_prefix(void);
-
-void *task_new( struct task_queue *queue, u32 buffer_size, u32 async_flags, const c8 *debug_info );
-void task_send( struct task_queue *queue, void *buffer, void (*fn)(void) );
-
-struct task_queue *task_queue_new( struct stack_allocator *stack );
-void task_queue_end( struct task_queue *queue, bool immediate );
-bool task_queue_has_work( struct task_queue *queue );
-bool task_queue_process_next( struct task_queue *queue );
-bool task_queue_checksize( struct task_queue *queue, u32 bytes );
+#include "generated/threads.h"
+
+struct thread_info
+{
+ const c8 *name;
+ u32 queue_size_m;
+ u32 flags;
+};
+struct thread_info *_get_thread_info( enum thread_id thread );
+
+#define ASYNC_CRITICAL 0x1
+#define ASYNC_NONBLOCKING 0x2
+
+void _set_thread_id( enum thread_id id );
+enum thread_id _get_thread_id(void);
+bool _thread_has_flags( enum thread_id id, u32 flags );
+
+struct task *_task_new( enum thread_id thread, u32 buffer_size, u32 async_flags, const c8 *debug_info );
+void task_send( struct task *task, void (*fn)( struct task *task ) );
+
+void *task_buffer( struct task *task );
+u32 task_buffer_size( struct task *task );
+
+bool _task_queue_process( bool blocking );
+bool _task_queue_can_fit( enum thread_id thread, u32 bytes );
+
+void _async_init(void);
+
+void _async_push_groups( u16 groups, bool exclusive );
+void _async_pop_groups(void);
+u16 _async_get_groups(void);
+i16 _async_group_count( u16 group );
IMPL void _engine_console_ui(void);
+IMPL void _engine_console_init(void);
IMPL void _engine_ui_init(void);
IMPL void _engine_ui_pre_render(void);
#include "generated/console.c"
-struct
-{
- struct queue_allocator input_history, log_history;
-}
-_console;
-
-struct history_item
-{
- u32 repeat_count;
- const c8 *entry;
-};
-
i32 _console_exec_ccmd( struct console_arguments *args )
{
const c8 *path = console_get_argument( args, 0 );
#include "graphics_api.h"
#include "engine_interface.h"
+struct
+{
+ struct queue_allocator input_history, log_history;
+}
+_console;
+
+struct log_history_item
+{
+ u32 type;
+ c8 buffer[];
+};
+
static c8 _input_buffer[ 1024 ];
void _engine_console_ui(void)
i16 kerning[3];
_font_get_kerning( kerning );
i16 history_box[4] = { 0, 0, _engine.w, kerning[1] * 32 };
-
- for( u32 i=0; i<32; i ++ )
- _graphics_line_rect( (i16[]){ 0, kerning[1] * i, _engine.w, kerning[1] }, (union colour){{0,255,0,255}} );
+
+ if( _console.log_history.allocation_count )
+ {
+ u32 item_offset = _console.log_history.head_offset;
+ for( u32 i=0; i<32; i ++ )
+ {
+ struct log_history_item *item = queue_data( &_console.log_history, item_offset );
+ i16 line_rect[4] = (i16[]){ 0, kerning[1] * (31-i), _engine.w, kerning[1] };
+ _ui_text( line_rect, item->buffer, k_ui_align_x_left, (union colour){{128,128,128,255}} );
+
+ if( !queue_previous( &_console.log_history, item_offset, &item_offset ) )
+ break;
+ }
+ }
+
_graphics_line_rect( history_box, (union colour){{0,255,255,255}} );
- enum textbox_action action = _ui_textbox( (i16[]){ 0, kerning[1]*32, _engine.w, kerning[1] }, _input_buffer, sizeof(_input_buffer), 1, UI_AUTOFOCUS );
+ enum textbox_action action = _ui_textbox( (i16[]){ 0, kerning[1]*32, _engine.w, kerning[1] },
+ _input_buffer, sizeof(_input_buffer), 1, UI_AUTOFOCUS );
if( action == k_textbox_enter )
{
_input_buffer[0] = '\0';
}
}
+
+static void _log_listener( const c8 *line, u32 length, u32 type )
+{
+again:;
+ struct log_history_item *item = queue_alloc( &_console.log_history, sizeof( struct log_history_item ) + length+1 );
+ if( !item )
+ {
+ queue_pop( &_console.log_history );
+ goto again;
+ }
+
+ item->type = type;
+ buffer_copy( line, 0, item->buffer, length+1 );
+}
+
+void _engine_console_init(void)
+{
+ _log_add_listener( _log_listener, ($info | $ok | $warning | $error | $fatal | $shell) );
+ queue_init( &_console.input_history, _heap_allocate( BYTES_KB(2) ), BYTES_KB(2) );
+ queue_init( &_console.log_history, _heap_allocate( BYTES_KB(4) ), BYTES_KB(4) );
+}
i32 main( i32 argc, const c8 *argv[] )
{
_exit_init();
+ _log_init();
_options_init( argc, argv );
const c8 *arg;
_engine_ui_init();
_input_init();
_console_init();
+ _engine_console_init();
L_new_frame:;
f64 now = glfwGetTime();
void *queue_data( struct queue_allocator *queue, u32 offset )
{
- return queue->buffer + offset;
+ struct queue_item *item = queue->buffer + offset;
+ return item->data;
}
void *queue_tail_data( struct queue_allocator *queue )
else return NULL;
}
+void *queue_head_data( struct queue_allocator *queue )
+{
+ if( queue->allocation_count ) return queue_data( queue, queue->head_offset );
+ else return NULL;
+}
+
u32 queue_offset( struct queue_allocator *queue, void *pointer )
{
return pointer - queue->buffer;
+#include "common_thread_api.h"
+#include "generated/threads.c"
+
+#include <threads.h>
+
+struct thread_context
+{
+ u16 async_groups[ 8 ];
+ u32 async_group_depth;
+}
+_thread_contexts[ k_thread_count ];
+
+struct thread_info *_get_thread_info( enum thread_id thread )
+{
+ ASSERT_CRITICAL( thread < k_thread_count );
+ return &_thread_infos[ thread ];
+}
+
+bool _thread_has_flags( enum thread_id thread, u32 flags )
+{
+ return (_get_thread_info( thread )->flags & flags) == flags;
+}
+
+void _async_push_groups( u16 groups, bool exclusive )
+{
+ struct thread_context *context = &_thread_contexts[ _get_thread_id() ];
+
+ ASSERT_CRITICAL( context->async_group_depth < ARRAY_COUNT( context->async_groups ) );
+ context->async_group_depth ++;
+ if( !exclusive )
+ groups |= context->async_groups[ context->async_group_depth-1 ];
+ context->async_groups[ context->async_group_depth ] = groups;
+}
+
+void _async_pop_groups(void)
+{
+ struct thread_context *context = &_thread_contexts[ _get_thread_id() ];
+ ASSERT_CRITICAL( context->async_group_depth );
+ context->async_group_depth --;
+}
+
+u16 _async_get_groups(void)
+{
+ struct thread_context *context = &_thread_contexts[ _get_thread_id() ];
+ return context->async_groups[ context->async_group_depth ];
+}
+
+struct
+{
+ i16 group_counts[ 16 ];
+ mtx_t count_lock;
+}
+_async;
+
+struct task_queue
+{
+ u32 count;
+
+ cnd_t blocking_signal, work_signal;
+ mtx_t lock, data_lock;
+ struct queue_allocator queue;
+
+ struct task *allocating_task;
+}
+static _task_queues[ k_thread_count ];
+
+struct task
+{
+ const c8 *alloc_debug_info;
+
+ union
+ {
+ void (*fn)( struct task *task );
+ enum thread_id target_thread;
+ };
+
+ u32 buffer_size;
+ u16 groups, unused0;
+
+ union
+ {
+ u64 _force_8byte_align[];
+ u8 buffer[];
+ };
+};
+
+_Thread_local enum thread_id _thread_id;
+
+void _set_thread_id( enum thread_id id )
+{
+ _thread_id = id;
+}
+
+enum thread_id _get_thread_id(void)
+{
+ return _thread_id;
+}
+
+void _async_init( void )
+{
+ ASSERT_CRITICAL( mtx_init( &_async.count_lock, mtx_plain ) == thrd_success );
+
+ for( u32 i=0; i<k_thread_count; i ++ )
+ {
+ struct thread_info *thread_info = _get_thread_info( i );
+
+ if( thread_info->queue_size_m )
+ {
+ struct task_queue *queue = &_task_queues[ i ];
+ ASSERT_CRITICAL( mtx_init( &queue->lock, mtx_plain ) == thrd_success );
+ ASSERT_CRITICAL( mtx_init( &queue->data_lock, mtx_plain ) == thrd_success );
+ ASSERT_CRITICAL( cnd_init( &queue->blocking_signal ) == thrd_success );
+ ASSERT_CRITICAL( cnd_init( &queue->work_signal ) == thrd_success );
+ u32 bytes = BYTES_MB(thread_info->queue_size_m);
+ queue->queue.buffer = _heap_allocate( bytes );
+ queue->queue.size = bytes;
+ }
+ }
+}
+
+static void _async_group_increment( u16 groups, i16 dir )
+{
+ if( !groups )
+ return;
+
+ ASSERT_CRITICAL( mtx_lock( &_async.count_lock ) == thrd_success );
+ for( u16 i=0; i<16; i ++ )
+ {
+ if( (groups >> i) & 0x1 )
+ {
+ _async.group_counts[i] += dir;
+ ASSERT_CRITICAL( _async.group_counts[i] >= 0 );
+ ASSERT_CRITICAL( _async.group_counts[i] <= 2048 );
+ $log( $warning, {"The task count for group "}, $unsigned(i), {" has "}, {dir>0? "increased": "decreased"},
+ {" to "}, $unsigned(_async.group_counts[i]) );
+ }
+ }
+ ASSERT_CRITICAL( mtx_unlock( &_async.count_lock ) == thrd_success );
+}
+
+i16 _async_group_count( u16 group )
+{
+ ASSERT_CRITICAL( group );
+ u32 index = __builtin_ctz( (u32)group );
+ ASSERT_CRITICAL( mtx_lock( &_async.count_lock ) == thrd_success );
+ i16 count = _async.group_counts[ index ];
+ ASSERT_CRITICAL( mtx_unlock( &_async.count_lock ) == thrd_success );
+ return count;
+}
+
+static struct task_queue *_get_thread_task_queue( enum thread_id thread )
+{
+ ASSERT_CRITICAL( thread < k_thread_count );
+
+ return &_task_queues[ thread ];
+}
+
+bool _task_queue_can_fit( enum thread_id thread, u32 bytes )
+{
+ struct task_queue *queue = _get_thread_task_queue( thread );
+ u32 total_size = sizeof(struct task) + bytes;
+ return total_size <= queue->queue.size;
+}
+
+struct task *_task_new( enum thread_id target_thread, u32 buffer_size, u32 async_flags, const c8 *debug_info )
+{
+ ASSERT_CRITICAL( target_thread != _get_thread_id() );
+
+ struct task_queue *queue = _get_thread_task_queue( target_thread );
+ struct queue_allocator *ring = &queue->queue;
+ u32 total_size = sizeof(struct task) + buffer_size;
+ ASSERT_CRITICAL( total_size <= queue->queue.size );
+
+ ASSERT_CRITICAL( mtx_lock( &queue->data_lock ) == thrd_success );
+ if( queue->allocating_task )
+ {
+ $log( $fatal, {"Overlapping async allocations. \n"
+ " Previous allocation began at: "}, {queue->allocating_task->alloc_debug_info}, {"\n"
+ " Overlapping call at: "}, {debug_info} );
+ _fatal_exit();
+ }
+ ASSERT_CRITICAL( mtx_lock( &queue->lock ) == thrd_success );
+
+ struct task *task = queue_alloc( ring, total_size );
+ while( ((async_flags & ASYNC_CRITICAL) || !(async_flags & ASYNC_NONBLOCKING)) && !task )
+ {
+ if( async_flags & ASYNC_CRITICAL )
+ {
+ $log( $fatal, { "Too many tasks allocated on this queue, so we cant make this (critical allocation)"} );
+ _fatal_exit();
+ }
+
+ ASSERT_CRITICAL( cnd_wait( &queue->blocking_signal, &queue->lock ) == thrd_success );
+ task = queue_alloc( ring, total_size );
+ }
+
+ if( task )
+ {
+ queue->allocating_task = task;
+ task->target_thread = target_thread;
+ task->alloc_debug_info = debug_info;
+ task->buffer_size = buffer_size;
+ task->groups = _async_get_groups();
+ _async_group_increment( task->groups, +1 );
+
+ ASSERT_CRITICAL( mtx_unlock( &queue->lock ) == thrd_success );
+ return task;
+ }
+ else
+ {
+ ASSERT_CRITICAL( mtx_unlock( &queue->lock ) == thrd_success );
+ return NULL;
+ }
+}
+
+void *task_buffer( struct task *task )
+{
+ return task->buffer;
+}
+
+u32 task_buffer_size( struct task *task )
+{
+ return task->buffer_size;
+}
+
+void task_send( struct task *task, void (*fn)( struct task *task ) )
+{
+ struct task_queue *queue = _get_thread_task_queue( task->target_thread );
+
+ ASSERT_CRITICAL( task );
+ ASSERT_CRITICAL( queue->allocating_task == task );
+
+ if( fn ) task->fn = fn;
+ else _async_group_increment( task->groups, -1 );
+ queue->allocating_task = NULL;
+
+ ASSERT_CRITICAL( mtx_lock( &queue->lock ) == thrd_success );
+ queue->count ++;
+ ASSERT_CRITICAL( mtx_unlock( &queue->lock ) == thrd_success );
+
+ ASSERT_CRITICAL( mtx_unlock( &queue->data_lock ) == thrd_success );
+ ASSERT_CRITICAL( cnd_signal( &queue->work_signal ) == thrd_success );
+}
+
+bool _task_queue_process( bool blocking )
+{
+ struct task_queue *queue = _get_thread_task_queue( _get_thread_id() );
+ ASSERT_CRITICAL( mtx_lock( &queue->lock ) == thrd_success );
+
+ while( blocking && (queue->count == 0) )
+ ASSERT_CRITICAL( cnd_wait( &queue->work_signal, &queue->lock ) == thrd_success );
+
+ struct task *task = queue_tail_data( &queue->queue );
+ ASSERT_CRITICAL( mtx_unlock( &queue->lock ) == thrd_success );
+
+ if( task )
+ {
+ /* task fn can be NULL if it was cancelled (so this is a NOP). Makes code easier if we do this instead of
+ * reverting the queue to cancel. */
+ if( task->fn )
+ {
+ _async_push_groups( task->groups, 0 );
+ task->fn( task );
+ _async_pop_groups();
+ _async_group_increment( task->groups, -1 );
+ }
+
+ ASSERT_CRITICAL( mtx_lock( &queue->lock ) == thrd_success );
+ queue_pop( &queue->queue );
+ queue->count --;
+ ASSERT_CRITICAL( mtx_unlock( &queue->lock ) == thrd_success );
+
+ ASSERT_CRITICAL( cnd_signal( &queue->blocking_signal ) == thrd_success );
+ return 1;
+ }
+ else
+ return 0;
+}
c8 *keyvalues_value( struct keyvalues *kvs, u32 kv_offset, u32 *out_length )
{
- if( kv_offset == 0 )
- return NULL;
- if( keyvalues_type( kvs, kv_offset ) == k_keyvalue_type_frame )
- return NULL;
+ c8 *value = NULL;
+ u32 length = 0;
+
+ if( kv_offset == 0 ) {}
+ else if( keyvalues_type( kvs, kv_offset ) == k_keyvalue_type_frame ) {}
else
{
struct keyvalue *kv = stack_pointer( kvs->stack, kv_offset );
- if( out_length )
- *out_length = kv->value.length;
- return stack_pointer( kvs->stack, kv->key_offset + kv->value.offset_from_key );
+ length = kv->value.length;
+ value = stack_pointer( kvs->stack, kv->key_offset + kv->value.offset_from_key );
}
+ if( out_length )
+ *out_length = length;
+ return value;
}
u32 keyvalues_get_next( struct keyvalues *kvs, u32 kv_offset )
#include <stdio.h>
#include <string.h>
+#include <threads.h>
static struct stream _log_stream;
static struct stream _stdout_stream;
static struct stream _journal_stream;
+static mtx_t _lock;
+static bool _writing_event;
+
+struct
+{
+ c8 line[ 121 ];
+ u32 line_length;
+
+ bool wait;
+ u32 current_type;
+
+ struct log_line_callback
+ {
+ u32 filter;
+ void( *fn )( const c8 *line, u32 length, u32 type );
+ }
+ callbacks[ 8 ];
+ u32 callback_count;
+}
+_listeners;
+
+void _log_add_listener( void (*fn)(const c8 *line, u32 length, u32 type), u32 filter )
+{
+ ASSERT_CRITICAL( _listeners.callback_count < ARRAY_COUNT( _listeners.callbacks ) );
+ struct log_line_callback *cb = &_listeners.callbacks[ _listeners.callback_count ++ ];
+ cb->fn = fn;
+ cb->filter = filter;
+}
static u32 _log_stream_passthrough( struct stream *stream, const void *buffer, u32 length )
{
- stream_write( &_stdout_stream, buffer, length );
- if( _journal_stream.flags & k_stream_write )
+ for( u32 i=0; i<length; i ++ )
{
- static bool wait = 0;
- for( u32 i=0; i<length; i ++ )
+ u8 c = ((u8 *)buffer)[i];
+ if( _listeners.wait )
{
- u8 c = ((u8 *)buffer)[i];
- if( wait )
- {
- if( c == 'm' )
- wait = 0;
- }
+ if( c == 'm' )
+ _listeners.wait = 0;
+ }
+ else
+ {
+ if( c == 0x1B )
+ _listeners.wait = 1;
else
{
- if( c == 0x1B )
- wait = 1;
- else
- stream_write( &_journal_stream, (c8 []){ c }, 1 );
+ bool nl = ( c == '\n' );
+ _listeners.line[ _listeners.line_length ++ ] = c;
+
+ bool wrap = (_listeners.line_length == (ARRAY_COUNT( _listeners.line )-1));
+ if( wrap || nl )
+ {
+ _listeners.line[ _listeners.line_length ] = 0;
+
+ if( _journal_stream.flags & k_stream_write )
+ stream_write( &_journal_stream, _listeners.line, _listeners.line_length );
+
+ for( u32 j=0; j<_listeners.callback_count; j ++ )
+ {
+ struct log_line_callback *cb = &_listeners.callbacks[j];
+ if( cb->filter & _listeners.current_type )
+ cb->fn( _listeners.line, _listeners.line_length, _listeners.current_type );
+ }
+ stream_write( &_stdout_stream, _listeners.line, _listeners.line_length );
+ _listeners.line_length = 0;
+ }
+
+ if( _writing_event && (nl||wrap) )
+ {
+ while( _listeners.line_length < 23 )
+ _listeners.line[ _listeners.line_length ++ ] = ' ';
+ _listeners.line[ _listeners.line_length ++ ] = '.';
+ }
}
}
}
+
stream->offset += length;
return length;
}
_journal_stream.flags = k_stream_posix | k_stream_write;
}
-struct stream *_get_console_stream()
-{
- static bool init = 0;
- if( !init )
- {
- init = 1;
- _stdout_stream.posix_stream = stdout;
- _stdout_stream.offset = 0;
- _stdout_stream.buffer_length = 0;
- _stdout_stream.flags = k_stream_posix | k_stream_write;
-
- _log_stream.flags = k_stream_procedural | k_stream_write;
- _log_stream.buffer_length = 0;
- _log_stream.offset = 0;
- _log_stream.write_procedure = _log_stream_passthrough;
- }
- return &_log_stream;
-}
-
struct stream *_log_event( u32 type, const c8 *code_location )
{
- struct stream *output = _get_console_stream();
+ ASSERT_CRITICAL( mtx_lock( &_lock ) == thrd_success );
+ _listeners.current_type = type;
+
+ struct stream *output = &_log_stream;
string_append( output, KBLK, 0 );
u32 line_start = output->offset;
}
string_append( output, code_location, 0 );
- while( (output->offset-line_start) < 32 ) string_append_c8( output, ' ' );
+ while( (output->offset-line_start) < 20 ) string_append_c8( output, ' ' );
if( type == $error ) string_append( output, KRED "ERR|", 0 );
else if( type == $warning ) string_append( output, KYEL "WRN|", 0 );
else if( type == $ok ) string_append( output, KGRN "OK |", 0 );
else if( type == $fatal ) string_append( output, KRED "!!!|", 0 );
else if( type == $shell ) string_append( output, KBLU "SHL|", 0 );
string_append( output, KNRM, 0 );
+
+ _writing_event = 1;
return output;
}
+
+void _log_end_event(void)
+{
+ _writing_event = 0;
+
+ struct stream *output = &_log_stream;
+ string_append( output, "\n", 0 );
+
+ ASSERT_CRITICAL( mtx_unlock( &_lock ) == thrd_success );
+}
+
+void _log_init(void)
+{
+ ASSERT_CRITICAL( mtx_init( &_lock, mtx_plain ) == thrd_success );
+ _stdout_stream.posix_stream = stdout;
+ _stdout_stream.offset = 0;
+ _stdout_stream.buffer_length = 0;
+ _stdout_stream.flags = k_stream_posix | k_stream_write;
+ _log_stream.flags = k_stream_procedural | k_stream_write;
+ _log_stream.buffer_length = 0;
+ _log_stream.offset = 0;
+ _log_stream.write_procedure = _log_stream_passthrough;
+}
void _options_check_end(void)
{
- struct stream *console = _get_console_stream();
if( _option_long( "help", "Helps you" ) )
{
for( u32 i=0; i<_options.option_count; i ++ )
struct option *option = &_options.options[i];
const c8 *desc = option->desc? option->desc: "";
- u32 base_offset = console->offset;
+ struct stream *log = _log_event( $raw, NULL );
+
+ u32 base_offset = log->offset;
if( option->type == k_option_type_flag || option->type == k_option_type_option )
{
- string_append( console, "-", 0 );
- string_append_c8( console, option->alias_c );
+ $v_string( log, {"-"}, {(const c8[]){ option->alias_c, 0 }} );
if( option->type == k_option_type_option )
- string_append( console, " <value>", 0 );
+ $v_string( log, {" <value>"} );
}
if( option->type == k_option_type_long_flag || option->type == k_option_type_long_option )
{
- string_append( console, "--", 0 );
- string_append( console, option->alias, 0 );
+ $v_string( log, {"--"}, {option->alias} );
if( option->type == k_option_type_long_option )
- string_append( console, "=<value>", 0 );
+ $v_string( log, {"=<value>"} );
}
- while( console->offset < base_offset + 60 )
- string_append_c8( console, ' ' );
-
- string_append( console, desc, 0 );
- string_append_c8( console, '\n' );
+ while( log->offset < base_offset + 60 )
+ string_append_c8( log, ' ' );
+ string_append( log, desc, 0 );
+ string_append_c8( log, '\n' );
+ _log_end_event();
}
_normal_exit();
}
{
if( !(argument->used & (0x1<<j)) )
{
- string_append( console, "Unknown option '", 0 );
- string_append_c8( console, argument->name[j] );
- string_append( console, "'\n", 0 );
+ $log( $error, {"Unknown option '"}, {(c8[]){argument->name[j],0}}, {"'\n"} );
}
}
}
else
{
- string_append( console, "Unknown option '", 0 );
- string_append( console, argument->name, 0 );
- string_append( console, "'\n", 0 );
+ $log( $error, {"Unknown option '"}, {argument->name}, {"'\n"} );
}
errors = 1;
}
if( stream->flags & k_stream_posix )
return length;
- if( (stream->offset + length) > stream->buffer_length )
+ if( (stream->offset + length) >= stream->buffer_length )
return stream->buffer_length - stream->offset;
else return length;
}
i32 main( i32 argc, const c8 *argv[] )
{
_exit_init();
-
+ _log_init();
_options_init( argc, argv );
_terminal_init();
_options_check_end();
}
static _input;
+struct
+{
+ bool using;
+ struct stream enums, structures;
+}
+static _threads;
+
struct
{
bool using;
k_target_input,
k_target_shader,
k_target_cvar,
- k_target_ccmd
+ k_target_ccmd,
+ k_target_thread
}
target;
u32 feature_count;
}
else if( compare_buffers( block_key, 0, "shader", 0 ))
context.target = k_target_shader;
+ else if( compare_buffers( block_key, 0, "thread", 0 ))
+ {
+ _threads.using = 1;
+ context.target = k_target_thread;
+ const c8 *name = keyvalues_read_string( kvs, block, "name", NULL );
+ ASSERT_CRITICAL( name );
+
+ $v_string( &_threads.enums, {" k_thread_"},{name}, {",\n"} );
+ $v_string( &_threads.structures, {" [k_thread_"},{name},{"]=\n {\n"} );
+ $v_string( &_threads.structures, {" .name = \""}, {name}, {"\",\n"} );
+
+ u32 queue_size_m = 0;
+ if( keyvalues_read_u32s( kvs, block, "queue_size_m", NULL, &queue_size_m, 1 ) )
+ $v_string( &_threads.structures, {" .queue_size_m = "}, $unsigned(queue_size_m), {",\n"} );
+
+ $v_string( &_threads.structures, {" },\n"} );
+ }
else
{
$log( $warning, {"We don't have a compiler definition for block '"}, {block_key}, {"'"} );
stream_open_buffer_write( &_console.command_definitions, _heap_allocate(size), size, options );
stream_open_buffer_write( &_console.command_prototypes, _heap_allocate(size), size, options );
+ stream_open_buffer_write( &_threads.enums, _heap_allocate(size), size, options );
+ stream_open_buffer_write( &_threads.structures, _heap_allocate(size), size, options );
+
if( platform == k_platform_linux ) _metacompiler.enabled_features[0] = "linux";
if( platform == k_platform_windows ) _metacompiler.enabled_features[0] = "windows";
stream_close( &input_source );
}
+ if( _threads.using )
+ {
+ struct stream thread_header, thread_source;
+ ASSERT_CRITICAL( stream_open_file( &thread_header, "generated/threads.h", k_stream_write ) );
+ $v_string( &thread_header, {"enum thread_id\n{\n"},
+ {string_get( &_threads.enums )},
+ {" k_thread_count\n};\n"} );
+ stream_close( &thread_header );
+
+ ASSERT_CRITICAL( stream_open_file( &thread_source, "generated/threads.c", k_stream_write ) );
+ $v_string( &thread_source, {"struct thread_info _thread_infos[k_thread_count] = \n{\n"},
+ {string_get( &_threads.structures )}, {"};\n"} );
+ stream_close( &thread_source );
+ }
+
if( _console.using )
{
struct stream console_header, console_source;
stream_open_stack( &command_string, _temporary_stack_allocator(), k_stream_null_terminate );
$v_string( &command_string, {"taskset -c 0-"}, $unsigned(processors),
- {" zig cc -Wall -Wno-unused-function -std=c99 -D_DEFAULT_SOURCE -lm \\\n"},
+ {" zig cc -Wall -Wno-unused-function -std=c11 -D_DEFAULT_SOURCE -lm \\\n"},
{" -include \"common_api.h\" \\\n"} );
if( use_tsan ) $v_string( &command_string, {" -fsanitize=thread -lasan \\\n"} );