3 struct vg_async vg_async
;
5 enum vg_thread_purpose
vg_thread_purpose(void);
6 enum engine_status
_vg_engine_status(void);
9 * Allocate an asynchronous call with a bit of memory
11 vg_async_item
*vg_async_alloc( u32 size
)
13 /* ditch out here if engine crashed. this serves as the 'quit checking' */
14 if( _vg_engine_status() == k_engine_status_crashed
){
15 longjmp( vg
.env_loader_exit
, 1 );
18 SDL_AtomicLock( &vg_async
.sl_index
);
20 u32 total_allocation
= vg_align8(size
) + vg_align8(sizeof(vg_async_item
)),
21 remaining
= vg_linear_remaining( vg_async
.buffer
),
22 capacity
= vg_linear_get_capacity( vg_async
.buffer
);
24 if( total_allocation
> capacity
){
25 SDL_AtomicUnlock( &vg_async
.sl_index
);
26 vg_error( "Requested: %umb. Buffer size: %umb\n",
27 (total_allocation
/1024)/1024,
28 (capacity
/1024)/1024 );
30 vg_fatal_error( "async alloc invalid size\n" );
33 if( total_allocation
> remaining
){
34 SDL_AtomicUnlock( &vg_async
.sl_index
);
35 SDL_SemWait( vg_async
.sem_wait_for_flush
);
36 SDL_AtomicLock( &vg_async
.sl_index
);
38 remaining
= vg_linear_remaining( vg_async
.buffer
);
39 capacity
= vg_linear_get_capacity( vg_async
.buffer
);
42 void *block
= vg_linear_alloc( vg_async
.buffer
, total_allocation
);
44 vg_async_item
*entry
= block
;
47 if( size
) entry
->payload
= ((u8
*)block
) + vg_align8(sizeof(vg_async_item
));
48 else entry
->payload
= NULL
;
51 entry
->fn_runner
= NULL
;
54 vg_async
.end
->next
= entry
;
57 vg_async
.start
= entry
;
61 SDL_AtomicUnlock( &vg_async
.sl_index
);
67 * Wait until the current stack of async calls is completely flushed out
69 void vg_async_stall(void)
71 vg_assert_thread(k_thread_purpose_loader
);
73 vg_info( "async_stall: %d\n", SDL_SemValue( vg_async
.sem_wait_for_flush
) );
75 SDL_SemWait( vg_async
.sem_wait_for_flush
);
79 * Mark the call as being filled and ready to go
81 void vg_async_dispatch( vg_async_item
*item
,
82 void (*runner
)( void *payload
, u32 size
) )
84 vg_assert_thread(k_thread_purpose_loader
);
85 if( SDL_SemValue(vg_async
.sem_wait_for_flush
) )
86 SDL_SemWait(vg_async
.sem_wait_for_flush
);
88 SDL_AtomicLock( &vg_async
.sl_index
);
89 item
->fn_runner
= runner
;
90 SDL_AtomicUnlock( &vg_async
.sl_index
);
94 * Make a simple async call without allocating extra.
96 void vg_async_call( void (*runner
)( void *payload
, u32 size
),
97 void *payload
, u32 size
)
99 vg_assert_thread(k_thread_purpose_loader
);
100 vg_async_item
*call
= vg_async_alloc(0);
101 call
->payload
= payload
;
103 vg_async_dispatch( call
, runner
);
107 * Run as much of the async buffer as possible
109 void vg_run_async_checked(void)
111 SDL_AtomicLock( &vg_async
.sl_index
);
113 while( vg_async
.start
){
114 vg_async_item
*entry
= vg_async
.start
;
116 if( entry
->fn_runner
){
117 entry
->fn_runner( entry
->payload
, entry
->size
);
118 vg_async
.start
= entry
->next
;
120 if( vg_async
.start
== NULL
){
123 vg_linear_clear( vg_async
.buffer
);
125 if( !SDL_SemValue( vg_async
.sem_wait_for_flush
) ){
126 SDL_SemPost( vg_async
.sem_wait_for_flush
);
131 SDL_AtomicUnlock( &vg_async
.sl_index
);
135 /* TODO: if exceed max frametime.... */
138 if( !SDL_SemValue( vg_async
.sem_wait_for_flush
) ){
139 SDL_SemPost( vg_async
.sem_wait_for_flush
);
142 SDL_AtomicUnlock( &vg_async
.sl_index
);
145 void vg_async_init(void)
147 vg_async
.sem_wait_for_flush
= SDL_CreateSemaphore(0);
148 vg_async
.buffer
= vg_create_linear_allocator( NULL
, 50*1024*1024,