add vg_engine.c
add vg_ui.c
add vg_input.c
+add vg_asset.c
ccmd
{
layer_mask ui
}
+shader
+{
+ name invisible
+}
+
shader
{
name blit
}
add vg_model.c
+add vg_material.c
add vg_entity.c
add vg_metascene.c
add vg_af.c
#pragma once
+#if 0
enum material_render_flag
{
k_material_render_additive = 0x20
};
extern const char *_shader_prop_workshop_keys[k_workshop_shader_part_max];
+#endif
--- /dev/null
+#include "vg_asset.h"
+#include "SDL3/SDL.h"
+
+u32 vg_asset_index( struct vg_asset_list *list, u16 item )
+{
+ return pool_index( &list->pool, item );
+}
+
+void vg_allocate_asset_list( struct vg_asset_list *list, u16 asset_count )
+{
+ list->pool.nodes = _heap_allocate( sizeof(struct pool_node) * (u32)asset_count );
+ list->pool.count = asset_count;
+ list->hashes = _heap_allocate( sizeof(u32)*(u32)asset_count );
+ list->keys = _heap_allocate( 128*(u32)asset_count );
+ list->mutex = SDL_CreateMutex();
+ ASSERT_CRITICAL( list->mutex );
+}
+
+u16 vg_asset_get( struct vg_asset_list *list, const c8 *key )
+{
+ u32 hash = u32_max( 1, buffer_djb2( key, 0 ) );
+ SDL_LockMutex( list->mutex );
+ u16 n = list->active_chain.head;
+ while(n)
+ {
+ u32 index = pool_index( &list->pool, n );
+ if( list->hashes[index] == hash )
+ {
+ if( compare_buffers( key, 0, list->keys, 0 ) )
+ {
+ pool_reference( &list->pool, n, +1 );
+ return n;
+ }
+ }
+ n = pool_next( &list->pool, n, 1 );
+ }
+ SDL_UnlockMutex( list->mutex );
+ return 0;
+}
+
+u16 vg_asset_create( struct vg_asset_list *list, const c8 *key )
+{
+ SDL_LockMutex( list->mutex );
+ ASSERT_CRITICAL( list->pool.nodes );
+ u16 n = pool_allocate( &list->pool, &list->free_chain, &list->active_chain );
+ u32 index = pool_index( &list->pool, n );
+ ASSERT_CRITICAL( buffer_copy( key, 0, list->keys[index], sizeof(list->keys[index]) ) );
+ list->hashes[index] = u32_max( 1, buffer_djb2( list->keys[index], 0 ) );
+ pool_reference( &list->pool, n, +1 );
+ SDL_UnlockMutex( list->mutex );
+ return n;
+}
+
+u16 vg_asset_create_anonymous( struct vg_asset_list *list )
+{
+ SDL_LockMutex( list->mutex );
+ ASSERT_CRITICAL( list->pool.nodes );
+ u16 n = pool_allocate( &list->pool, &list->free_chain, &list->active_chain );
+ u32 index = pool_index( &list->pool, n );
+ list->hashes[index] = 0;
+ buffer_copy( "[anonymous]", 0, list->keys[index], sizeof(list->keys[index]) );
+ pool_reference( &list->pool, n, +1 );
+ SDL_UnlockMutex( list->mutex );
+ return n;
+}
+
+b8 vg_asset_release( struct vg_asset_list *list, u16 item )
+{
+ SDL_LockMutex( list->mutex );
+ if( pool_reference( &list->pool, item, -1 ) == 0 )
+ {
+ pool_free( &list->pool, &list->free_chain, &list->active_chain, item );
+ SDL_UnlockMutex( list->mutex );
+ return 1;
+ }
+ SDL_UnlockMutex( list->mutex );
+ return 0;
+}
--- /dev/null
+#pragma once
+#include "foundation.h"
+#include "shader_props.h"
+
+struct vg_asset_list
+{
+ struct pool_allocator pool;
+ struct pool_chain free_chain, active_chain;
+ void *mutex;
+
+ u32 *hashes;
+ c8 (*keys) [ 128 ];
+};
+
+void vg_allocate_asset_list( struct vg_asset_list *list, u16 asset_count );
+
+u32 vg_asset_index( struct vg_asset_list *list, u16 item );
+u16 vg_asset_get( struct vg_asset_list *list, const c8 *key );
+u16 vg_asset_create( struct vg_asset_list *list, const c8 *key );
+u16 vg_asset_create_anonymous( struct vg_asset_list *list );
+b8 vg_asset_release( struct vg_asset_list *list, u16 item );
#include "vg_shader.h"
#include "vg_render.h"
#include "vg_tex.h"
+#include "vg_material.h"
#include "vg_lines.h"
#include "vg_framebuffer.h"
#include "vg_console.h"
_vg_render_init();
_shader_init();
_vg_tex_init();
+ _vg_material_init();
_vg_lines_init();
if( _vg_engine_hooks.start ) _vg_engine_hooks.start();
--- /dev/null
+#include "vg_asset.h"
+#include "vg_material.h"
+#include "vg_shader.h"
+
+// FIXME!!!!!!!!!!!!!!!!!!!! QSORT
+#include <stdlib.h>
+
+struct
+{
+ struct vg_asset_list asset_list;
+ struct vg_material *assets;
+}
+_vg_material;
+
+void _vg_material_init(void)
+{
+ _vg_material.assets = _heap_allocate( sizeof(struct vg_material) * VG_ASSET_MATERIALS_MAX );
+ vg_allocate_asset_list( &_vg_material.asset_list, VG_ASSET_MATERIALS_MAX );
+}
+
+i32 compar( const void *a, const void *b )
+{
+ return ((struct sort_index *)a)->value - ((struct sort_index *)b)->value;
+}
+
+void index_sort( struct sort_index *indices, u32 indice_count )
+{
+ qsort( indices, indice_count, sizeof(struct sort_index), compar );
+}
+
+u16 _vg_material_load( const c8 *path, struct stack_allocator *stack )
+{
+ u16 id = vg_asset_get( &_vg_material.asset_list, path );
+ if( id )
+ return id;
+
+ $log( $info, {"Loading material asset: "}, {path} );
+ id = vg_asset_create( &_vg_material.asset_list, path );
+
+ struct vg_material *material = &_vg_material.assets[ vg_asset_index( &_vg_material.asset_list, id ) ];
+ zero_buffer( material, sizeof(struct vg_material) );
+ if( !keyvalues_read_file( &material->kvs, path, stack ) )
+ {
+ $log( $error, {"Failed to read material file"} );
+ return 0;
+ }
+
+ u32 tilemap_block = keyvalues_get( &material->kvs, 0, "tilemap", 0 );
+ if( tilemap_block )
+ {
+ material->flags |= VG_MATERIAL_TILEMAPPED;
+ material->tilemap.set_count = 0;
+ u32 tileset_it = 0;
+ while( keyvalues_foreach( &material->kvs, &tileset_it, tilemap_block, "tileset" ) )
+ material->tilemap.set_count ++;
+
+ material->tilemap.sets = stack_allocate( stack, sizeof(struct tex_tileset) * material->tilemap.set_count, 8, "Tilesets" );
+
+ i32 image_size[2];
+ keyvalues_read_i32s( &material->kvs, tilemap_block, "size", (i32[]){1024,1024}, image_size, 2 );
+ material->tilemap.image_size[0] = image_size[0];
+ material->tilemap.image_size[1] = image_size[1];
+
+ material->tilemap.tiles = stack_allocate( stack, 0, 8, "Tiles" );
+ struct sort_index *sort_indices = stack_allocate( _temporary_stack_allocator(), 0, 8, "" );
+
+ tileset_it = 0;
+ u32 i = 0;
+ u32 total_tiles = 0;
+ while( keyvalues_foreach( &material->kvs, &tileset_it, tilemap_block, "tileset" ) )
+ {
+ struct tex_tileset *tileset = &material->tilemap.sets[i ++];
+ tileset->alias = keyvalues_read_string( &material->kvs, tileset_it, "name", "null" );
+
+ i32 grid[2], size[2], co[2];
+ keyvalues_read_i32s( &material->kvs, tileset_it, "grid", (i32[]){1,1}, grid, 2 );
+ ASSERT_CRITICAL( (grid[0] >= 1) && (grid[1] >= 1) &&
+ (grid[0] <= 64) && (grid[1] <=64) );
+
+ keyvalues_read_i32s( &material->kvs, tileset_it, "co", (i32[]){0,0}, co, 2 );
+ keyvalues_read_i32s( &material->kvs, tileset_it, "size", (i32[]){0,0}, size, 2 );
+ ASSERT_CRITICAL( (size[0] >= 1) && (size[1] >= 1) &&
+ (size[0] <= 1024) && (size[1] <=1024) );
+
+ tileset->co[0] = co[0];
+ tileset->co[1] = co[1];
+ tileset->grid[0] = grid[0];
+ tileset->grid[1] = grid[1];
+ tileset->size[0] = size[0];
+ tileset->size[1] = size[1];
+ tileset->tile_start = total_tiles;
+ tileset->tile_count = grid[0]*grid[1];
+
+ stack_extend_last( stack, material->tilemap.tiles, sizeof(struct tex_tile)*tileset->tile_count );
+ stack_extend_last( _temporary_stack_allocator(), sort_indices, sizeof(struct sort_index)*tileset->tile_count );
+
+ for( u32 j=0; j<tileset->tile_count; j ++ )
+ {
+ u32 index = total_tiles + j;
+ sort_indices[ index ].index = index;
+ sort_indices[ index ].value = size[1];
+ material->tilemap.tiles[ index ].size[0] = size[0];
+ material->tilemap.tiles[ index ].size[1] = size[1];
+ }
+
+ total_tiles += tileset->tile_count;
+ }
+
+ /* Pack rectangles into their places (working out the pixel_root s) */
+ index_sort( sort_indices, total_tiles );
+ material->tilemap.sheet_size[0] = 256;
+ material->tilemap.sheet_size[1] = 256;
+ $log( $info, {"total tiles: "}, $signed( total_tiles ) );
+
+ try_again:
+ {
+ i32 co[2] = {0,0},
+ rh = 0;
+ for( u32 j=0; j<total_tiles; j ++ )
+ {
+ struct tex_tile *tile = &material->tilemap.tiles[ sort_indices[(total_tiles-j)-1].index ];
+ i32 tw = tile->size[0] + VG_TEX_TILEMAP_PADDING*2,
+ th = tile->size[1] + VG_TEX_TILEMAP_PADDING*2;
+ if( rh == 0 ) rh = th;
+
+ if( co[0] + tw > material->tilemap.sheet_size[0] )
+ {
+ co[0] = 0;
+ co[1] += rh;
+ rh = th;
+
+ if( co[1] + th > material->tilemap.sheet_size[1] )
+ {
+ if( material->tilemap.sheet_size[0] >= 2048 ||
+ material->tilemap.sheet_size[1] >= 2048 )
+ {
+ $log( $error, {"Tile mapping failed because sheet became too large (>2048)"} );
+ ASSERT_CRITICAL( 0 );
+ }
+ material->tilemap.sheet_size[0] *= 2;
+ material->tilemap.sheet_size[1] *= 2;
+ goto try_again;
+ }
+ }
+
+ tile->pixel_root[0] = co[0]+VG_TEX_TILEMAP_PADDING;
+ tile->pixel_root[1] = co[1]+VG_TEX_TILEMAP_PADDING;
+ co[0] += tw;
+ }
+ }
+ }
+
+
+ u32 shader_block = keyvalues_get_child( &material->kvs, 0, 0 );
+ if( shader_block )
+ {
+ const c8 *shader_alias = keyvalues_key( &material->kvs, shader_block, NULL );
+ material->shader_id = _get_shader_id( shader_alias );
+
+ if( material->shader_id == -1 )
+ {
+ $log( $error, {"Unkown shader alias '"}, {shader_alias}, {"'"} );
+ return 0;
+ }
+
+ struct shader *shader = _get_shader( material->shader_id );
+ for( u32 i=0; i<shader->uniform_count; i ++ )
+ {
+ const struct shader_uniform *uniform = _get_shader_uniform( material->shader_id, i );
+ if( uniform->expose )
+ {
+ ASSERT_CRITICAL( uniform->type == k_uniform_type_sampler2D );
+ const c8 *image_path = keyvalues_read_string( &material->kvs, shader_block, uniform->alias, NULL );
+ u32 texture_id = _vg_texture_load( image_path, material->flags & VG_MATERIAL_TILEMAPPED? &material->tilemap: NULL );
+ ASSERT_CRITICAL( uniform->property_offset < VG_MATERIAL_PROPERTIES_MAX );
+ material->properties._u32[ uniform->property_offset ] = texture_id;
+ }
+ }
+ }
+ else
+ {
+ $log( $error, {"No shader block found in material file!"} );
+ return 0;
+ }
+
+ return id;
+}
+
+void *_vg_material_property( struct vg_material *material, enum shader_uniform_id uniform_id )
+{
+ const struct shader_uniform *uniform = _shader_uniform( uniform_id );
+ return (void *)(&material->properties._f32[ uniform->property_offset ]);
+}
+
+u32 _vg_material_texture( struct vg_material *material, enum shader_uniform_id uniform_id )
+{
+ return ((u32 *)_vg_material_property( material, uniform_id ))[0];
+}
+
+struct vg_material *_vg_material_get( u16 id )
+{
+ if( id == 0 )
+ return NULL;
+ else
+ return &_vg_material.assets[ vg_asset_index( &_vg_material.asset_list, id ) ];
+}
+
+void _vg_material_release( u16 id )
+{
+ if( vg_asset_release( &_vg_material.asset_list, id ) )
+ {
+ /* nothing to do?? */
+ }
+}
+
+i32 _vg_material_get_tileset( u16 id, const c8 *name )
+{
+ ASSERT_CRITICAL( id );
+ struct vg_material *mat = _vg_material_get( id );
+ for( i32 i=0; i<mat->tilemap.set_count; i ++ )
+ {
+ struct tex_tileset *tileset = &mat->tilemap.sets[ i ];
+ if( compare_buffers( tileset->alias, 0, name, 0 ) )
+ return i;
+ }
+
+ return -1;
+}
+
+struct tex_tileset *_vg_material_tileset( u16 id, i32 index )
+{
+ struct vg_material *mat = _vg_material_get( id );
+ ASSERT_CRITICAL( id );
+ ASSERT_CRITICAL( mat->tilemap.set_count );
+ ASSERT_CRITICAL( index >= 0 );
+ return &mat->tilemap.sets[ index ];
+}
+
+struct tex_tile *_vg_material_tileset_tile( u16 id, u32 tileset_id, u32 index )
+{
+ ASSERT_CRITICAL( id );
+ struct vg_material *mat = _vg_material_get( id );
+ struct tex_tileset *tileset = _vg_material_tileset( id, tileset_id );
+ return &mat->tilemap.tiles[ tileset->tile_start + index ];
+}
+
+void _vg_material_tileset_offset( u16 id, u32 tileset_id, u32 frame, f32 out_offset[2] )
+{
+ ASSERT_CRITICAL( id );
+ struct vg_material *mat = _vg_material_get( id );
+ struct tex_tileset *tileset = _vg_material_tileset( id, tileset_id );
+ struct tex_tile *tile_0 = &mat->tilemap.tiles[ tileset->tile_start ],
+ *tile_n = &mat->tilemap.tiles[ tileset->tile_start + frame ];
+
+ i16 dy = tile_n->pixel_root[1] - tile_0->pixel_root[1],
+ dx = tile_n->pixel_root[0] - tile_0->pixel_root[0];
+ out_offset[0] = (f32)dx / (f32)mat->tilemap.sheet_size[0];
+ out_offset[1] = (f32)dy / -(f32)mat->tilemap.sheet_size[1];
+}
--- /dev/null
+#pragma once
+#include "foundation.h"
+#include "vg_tex.h"
+#include "vg_shader.h"
+
+#define VG_ASSET_MATERIALS_MAX 64
+#define VG_MATERIAL_TILEMAPPED 0x20
+#define VG_MATERIAL_ADDITIVE 0x40
+#define VG_MATERIAL_PROPERTIES_MAX 16
+
+struct vg_material
+{
+ u16 shader_id, flags;
+ struct keyvalues kvs;
+ struct tex_tilemap tilemap;
+ union
+ {
+ u32 _u32[ VG_MATERIAL_PROPERTIES_MAX ];
+ f32 _f32[ VG_MATERIAL_PROPERTIES_MAX ];
+ }
+ properties;
+};
+
+void _vg_material_init(void);
+u16 _vg_material_load( const c8 *path, struct stack_allocator *stack );
+struct vg_material *_vg_material_get( u16 id );
+void _vg_material_release( u16 id );
+
+i32 _vg_material_get_tileset( u16 id, const c8 *name );
+struct tex_tileset *_vg_material_tileset( u16 id, i32 index );
+struct tex_tile *_vg_material_tileset_tile( u16 id, u32 tileset, u32 index );
+void _vg_material_tileset_offset( u16 id, u32 tileset_id, u32 frame, f32 out_offset[2] );
+void *_vg_material_property( struct vg_material *material, enum shader_uniform_id uniform_id );
+u32 _vg_material_texture( struct vg_material *material, enum shader_uniform_id uniform_id );
#include "vg_opengl.h"
#include "shader_props.h"
#include <stddef.h>
+#include "vg_material.h"
struct stream *vg_model_stream_pack_stream( struct vg_model_stream_context *ctx, struct mdl_file *file )
{
if( !file->pack_size )
{
$log( $fatal, {"Packed file is only a header; it is not packed\n"},
- {"Path: "}, {af_str( &ctx->model->packed_strings, file->pstr_path )} );
+ {"Path: "}, {af_str( ctx->model->packed_strings, file->pstr_path )} );
_fatal_exit();
}
return &ctx->stream;
}
-/* This also compiles them */
-static void vg_model_stream_materials( struct vg_model_stream_context *ctx, struct stack_allocator *stack )
+void vg_model_stream_materials( struct vg_model_stream_context *ctx, struct stack_allocator *stack )
{
- struct array_file_ptr mats_ptr;
- af_load_array( &ctx->af, &mats_ptr, "mdl_material", stack, sizeof(struct mdl_material) );
- ctx->model->materials = mats_ptr.data;
- ctx->model->material_count = mats_ptr.count;
-
- u32 size = sizeof(union shader_props) * mats_ptr.count;
- ctx->model->shader_props = stack_allocate( stack, size, 8, "Compiled shader properties" );
-
- /*
- * Step 0:
- * Acquiring the data source
- *
- * Step 1:
- * Converting into formal KV structure
- * We have 3 different modes;
- * v101+: old simple binary structures, requires 'generating' correct kvs
- * v106+: deprecated 'vg_msg' similar to kvs, only requires conversion
- * v110+: text KV's, direct parsing into vg_kvs
- *
- * Step 2:
- * Formal KV structure is then compiled into the binary union
- */
-
- /* step0 ----------------------------- */
- u32 temp_frame = _start_temporary_frame();
+ for( u32 i=0; i<ctx->model->surface_count; i ++ )
{
-#if (VG_MODEL_VERSION_MIN <= 101)
- struct array_file_ptr v101_materials;
-#endif
-#if (VG_MODEL_VERSION_MIN <= 106)
- struct array_file_ptr v106_data;
-#endif
- struct array_file_ptr v110_data;
-
- if( ctx->model->version <= 105 )
-#if (VG_MODEL_VERSION_MIN <= 105)
- af_load_array( &ctx->af, &v101_materials, "mdl_material", _temporary_stack_allocator(), sizeof(struct mdl_material_v101) );
-#else
- {
- $log( $fatal, {"Unsupported model version: "}, $unsigned( ctx->model->version ) );
- }
-#endif
-#if (VG_MODEL_VERSION_MIN <= 109)
- else if( ctx->model->version <= 109 )
- af_load_array( &ctx->af, &v106_data, "shader_data", _temporary_stack_allocator(), 1 );
-#endif
- else
- af_load_array( &ctx->af, &v110_data, "shader_props", _temporary_stack_allocator(), 1 );
-
- struct keyvalues kvs;
- keyvalues_init( &kvs, _temporary_stack_allocator() );
-
- /* step1 ----------------------------- */
- if( ctx->model->version <= 105 )
- {
-#if (VG_MODEL_VERSION_MIN <= 105)
- for( u32 i=0; i<ctx->model->material_count; i ++ )
- {
- struct mdl_material *mat = &ctx->model->materials[ i ];
- struct mdl_material_v101 *old = af_arritm( &v101_materials, i );
-
- mat->props.kv_root = keyvalues_append_frame( &kvs, 0, NULL );
-
- keyvalues_append_string( &kvs, mat->props.kv_root, "version", "101" );
- keyvalues_append_u32s( &kvs, mat->props.kv_root, "tex_diffuse", &old->tex_diffuse, 1 );
+ struct mdl_surface *surface = &ctx->model->surfaces[ i ];
+ const c8 *material_path = af_str( ctx->model->packed_strings, surface->pstr_material );
+ surface->material_id = _vg_material_load( material_path, stack );
+ }
- if( mat->shader == k_shader_cubemap )
- {
- keyvalues_append_u32s( &kvs, mat->props.kv_root, "cubemap", &old->tex_none0, 1 );
- keyvalues_append_f32s( &kvs, mat->props.kv_root, "tint", old->colour, 4 );
- }
- else if( mat->shader == k_shader_terrain_blend )
- {
- keyvalues_append_f32s( &kvs, mat->props.kv_root, "sand_colour", old->colour, 4 );
- keyvalues_append_f32s( &kvs, mat->props.kv_root, "blend_offset", old->colour1, 2 );
- }
- else if( mat->shader == k_shader_standard_vertex_blend )
- {
- keyvalues_append_f32s( &kvs, mat->props.kv_root, "blend_offset", old->colour1, 2 );
- }
- else if( mat->shader == k_shader_water )
- {
- keyvalues_append_f32s( &kvs, mat->props.kv_root, "shore_colour", old->colour, 4 );
- keyvalues_append_f32s( &kvs, mat->props.kv_root, "deep_colour", old->colour1, 4 );
- }
- }
-#else
- ASSERT_CRITICAL( 0 );
-#endif
- }
- else if( ctx->model->version <= 109 )
+ for( u32 i=0; i<ctx->model->submesh_count; i ++ )
+ {
+ struct mdl_submesh *sm = &ctx->model->submeshes[ i ];
+ if( sm->surface_index )
{
-#if (VG_MODEL_VERSION_MIN <= 109)
- for( u32 i=0; i<ctx->model->material_count; i ++ )
- {
- struct mdl_material *mat = &ctx->model->materials[ i ];
- u32 root = keyvalues_append_frame( &kvs, 0, NULL );
- keyvalues_append_string( &kvs, root, "version", "106" );
-
- void *buffer = NULL;
- if( v106_data.data )
- buffer = v106_data.data + mat->props.kvs.offset;
-
- vg_kvs_append_from_legacy_msg2( &kvs, root, buffer, mat->props.kvs.size );
- mat->props.kv_root = root;
- }
-#else
- ASSERT_CRITICAL( 0 );
-#endif
+ struct mdl_surface *surface = &ctx->model->surfaces[ sm->surface_index-1 ];
+ sm->material_id = surface->material_id;
}
else
- {
- for( u32 i=0; i<ctx->model->material_count; i ++ )
- {
- struct mdl_material *mat = &ctx->model->materials[ i ];
- u32 root = keyvalues_append_frame( &kvs, 0, NULL );
- keyvalues_append_string( &kvs, root, "version", "110" );
-
- const c8 *buffer = NULL;
- if( v110_data.data )
- {
- buffer = v110_data.data + mat->props.kvs.offset;
- struct stream kv_stream;
- stream_open_buffer_read( &kv_stream, buffer, mat->props.kvs.size, 0 );
- keyvalues_parse_stream( &kvs, root, &kv_stream );
- }
- mat->props.kv_root = root;
- }
- }
-
- /* step2 ----------------------------- */
- for( u32 i=0; i<ctx->model->material_count; i ++ )
- {
- struct mdl_material *mat = &ctx->model->materials[ i ];
- u32 root = mat->props.kv_root;
- union shader_props *props = &ctx->model->shader_props[ i ];
-
- if( mat->shader == k_shader_standard ||
- mat->shader == k_shader_standard_cutout ||
- mat->shader == k_shader_foliage ||
- mat->shader == k_shader_fxglow ||
- mat->shader == k_shader_pipe )
- {
- keyvalues_read_u32s( &kvs, root, "tex_diffuse", (u32[]){0}, &props->standard.tex_diffuse, 1 );
- keyvalues_read_u32s( &kvs, root, "tex_normal", (u32[]){0}, &props->standard.tex_normal, 1 );
- keyvalues_read_u32s( &kvs, root, "render_flags", (u32[]){0}, &props->standard.render_flags, 1 );
- }
- else if( mat->shader == k_shader_standard_vertex_blend )
- {
- keyvalues_read_u32s( &kvs, root, "tex_diffuse", (u32[]){0}, &props->vertex_blend.tex_diffuse, 1 );
- keyvalues_read_f32s( &kvs, root, "blend_offset", (f32[]){ 0.5, 0.0 }, props->vertex_blend.blend_offset, 2 );
- }
- else if( mat->shader == k_shader_cubemap )
- {
- keyvalues_read_u32s( &kvs, root, "tex_diffuse", (u32[]){0}, &props->cubemapped.tex_diffuse, 1 );
- keyvalues_read_u32s( &kvs, root, "cubemap_entity", (u32[]){0}, &props->cubemapped.cubemap_entity, 1 );
- keyvalues_read_f32s( &kvs, root, "tint", (f32[]){1.0,1.0,1.0,1.0}, props->cubemapped.tint, 4 );
- }
- else if( mat->shader == k_shader_terrain_blend )
- {
- keyvalues_read_u32s( &kvs, root, "tex_diffuse", (u32[]){0}, &props->terrain.tex_diffuse, 1 );
- keyvalues_read_f32s( &kvs, root, "sand_colour", (f32[]){ 0.79, 0.63, 0.48, 1.0 }, props->terrain.sand_colour, 4 );
- keyvalues_read_f32s( &kvs, root, "blend_offset", (f32[]){ 0.5, 0.0 }, props->terrain.blend_offset, 2 );
- }
- else if( mat->shader == k_shader_water )
- {
- keyvalues_read_f32s( &kvs, root, "shore_colour", (f32[]){0.03,0.32,0.61,1.0}, props->water.shore_colour, 4 );
- keyvalues_read_f32s( &kvs, root, "deep_colour", (f32[]){0.0,0.006,0.03,1.0}, props->water.deep_colour, 4 );
- keyvalues_read_f32s( &kvs, root, "fog_scale", (f32[]){0.04}, &props->water.fog_scale, 1 );
- keyvalues_read_f32s( &kvs, root, "fresnel", (f32[]){5.0}, &props->water.fresnel, 1 );
- keyvalues_read_f32s( &kvs, root, "water_scale", (f32[]){ 0.008 }, &props->water.water_sale, 1 );
- keyvalues_read_f32s( &kvs, root, "wave_speed", (f32[]){0.008,0.006,0.003,0.03}, props->water.wave_speed, 4 );
- }
- else if( mat->shader == k_shader_workshop )
- {
- const c8 *_shader_prop_workshop_keys[] =
- {
- [k_workshop_shader_part_truck1 ] = "truck1",
- [k_workshop_shader_part_truck2 ] = "truck2",
- [k_workshop_shader_part_wheel1 ] = "wheel1",
- [k_workshop_shader_part_wheel2 ] = "wheel2",
- [k_workshop_shader_part_wheel3 ] = "wheel3",
- [k_workshop_shader_part_wheel4 ] = "wheel4",
- [k_workshop_shader_part_edge ] = "edge",
- [k_workshop_shader_part_griptape] = "griptape",
- [k_workshop_shader_part_deck ] = "deck"
- };
-
- for( u32 j=0; j<k_workshop_shader_part_max; j ++ )
- keyvalues_read_u32s( &kvs, root, _shader_prop_workshop_keys[j], (u32[]){0}, &props->workshop.tex_all[j], 1 );
- }
- }
+ sm->material_id = 0;
}
- _end_temporary_frame( temp_frame );
}
void vg_model_stream_metadata( struct vg_model_stream_context *ctx, struct stack_allocator *stack )
ctx->model->submeshes = ptr.data;
ctx->model->submesh_count = ptr.count;
- af_load_array( &ctx->af, &ptr, "mdl_texture", stack, sizeof(union mdl_texture) );
- ctx->model->textures = ptr.data;
- ctx->model->texture_count = ptr.count;
-
af_load_array( &ctx->af, &ptr, "mdl_armature", stack, sizeof(struct mdl_armature) );
ctx->model->armatures = ptr.data;
ctx->model->armature_count = ptr.count;
ctx->model->bones = ptr.data;
ctx->model->bone_count = ptr.count;
- vg_model_stream_materials( ctx, stack );
+ af_load_array( &ctx->af, &ptr, "mdl_surface", stack, sizeof(struct mdl_surface) );
+ ctx->model->surfaces = ptr.data;
+ ctx->model->surface_count = ptr.count;
}
void vg_model_stream_meshes_cpu( struct vg_model_stream_context *ctx, struct stack_allocator *stack )
struct vg_model *model;
struct mdl_vert *vert_buffer;
u32 *indice_buffer;
+ u32 vertex_count, indice_count;
};
static void vg_model_upload_task( struct task *task )
struct model_upload_task *in_args = task_buffer(task);
ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_MAIN ) );
+ $log( $info, {"Upload mesh, verts: "}, $unsigned(in_args->vertex_count), {" indices: "}, $unsigned(in_args->indice_count) );
+
glGenVertexArrays( 1, &in_args->model->vao );
glBindVertexArray( in_args->model->vao );
glGenBuffers( 1, &in_args->model->vbo );
u32 stride = sizeof(struct mdl_vert);
glBindBuffer( GL_ARRAY_BUFFER, in_args->model->vbo );
- glBufferData( GL_ARRAY_BUFFER, in_args->model->vert_count*stride, in_args->vert_buffer, GL_STATIC_DRAW );
+ glBufferData( GL_ARRAY_BUFFER, in_args->vertex_count*stride, in_args->vert_buffer, GL_STATIC_DRAW );
glBindVertexArray( in_args->model->vao );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, in_args->model->ebo );
- glBufferData( GL_ELEMENT_ARRAY_BUFFER, in_args->model->indice_count*sizeof(u32),
+ glBufferData( GL_ELEMENT_ARRAY_BUFFER, in_args->indice_count*sizeof(u32),
in_args->indice_buffer, GL_STATIC_DRAW );
/* 0: coordinates */
glEnableVertexAttribArray( 5 );
}
-void vg_model_stream_meshes_gpu( struct vg_model_stream_context *ctx, u32 *fixup_table )
+u32 vg_model_emit_vertex( struct model_upload_task *task, struct mdl_vert *vert, u32 *fixup_table )
{
- ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_ASYNC ) );
- ASSERT_CRITICAL( ctx->model->flags & VG_MODEL_CPU_METADATA );
- ctx->model->flags |= VG_MODEL_GPU_MESHES;
-
- /* NOTE: We could check here if we already have CPU meshes and use those buffers.
- * In very rare instances (1 time) we use both paths. */
+ u32 index = task->vertex_count;
+ struct mdl_vert *dest = &task->vert_buffer[index];
+ *dest = *vert;
+ if( fixup_table )
+ for( u32 j=0; j<4; j++ )
+ dest->groups[j] = fixup_table[dest->groups[j]];
+ task->vertex_count ++;
+ return index;
+}
- struct array_file_meta *arr_vertices = af_find_array( &ctx->af, "mdl_vert" );
- struct array_file_meta *arr_indices = af_find_array( &ctx->af, "mdl_indice" );
+void vg_model_emit_indice( struct model_upload_task *task, u32 indice )
+{
+ task->indice_buffer[ task->indice_count ++ ] = indice;
+}
- if( arr_vertices && arr_indices )
+void vg_model_upload_gpu( struct vg_model_stream_context *ctx, struct stack_allocator *stack, u32 *fixup_table )
+{
+ ASSERT_CRITICAL( ctx->model->verts );
+ ASSERT_CRITICAL( ctx->model->indices );
+ ASSERT_CRITICAL( ctx->model->indice_count > ctx->model->vert_count );
+
+ u32 batches_size = sizeof(struct mdl_batch) * ctx->model->submesh_count;
+ ctx->model->batches = stack_allocate( stack, batches_size, 8, "Batches" );
+ zero_buffer( ctx->model->batches, batches_size );
+
+ /* Vertex buffer is allocated to same length as indices, as we may be making more vertexes for the GPU.
+ * We currently never exceed the indice amount if we say max 1 emitted vert per indice (all unique triangles, say) */
+ u32 size_verts = PAD_TO_8( sizeof(struct mdl_vert) * ctx->model->indice_count ),
+ size_indices = PAD_TO_8( sizeof(u32) * ctx->model->indice_count ),
+ size_hdr = PAD_TO_8( sizeof(struct model_upload_task)),
+ total = size_hdr + size_verts + size_indices;
+ struct task *upload_task = _task_new( k_thread_main, total, 0, "Model upload to GPU task" );
+ struct model_upload_task *args = task_buffer( upload_task );
+ args->model = ctx->model;
+ args->vertex_count = 0;
+ args->indice_count = 0;
+ args->vert_buffer = ((void *)args) + size_hdr;
+ args->indice_buffer = ((void *)args) + size_hdr + size_verts;
+
+ i32 triangles_ok = 0,
+ triangles_total = 0;
+
+ for( u32 mi=0; mi<ctx->model->submesh_count; mi ++ )
{
- u32 size_verts = PAD_TO_8(sizeof(struct mdl_vert)*arr_vertices->item_count),
- size_indices = PAD_TO_8(sizeof(u32)*arr_indices->item_count),
- size_hdr = PAD_TO_8(sizeof(struct model_upload_task)),
- total = size_hdr + size_verts + size_indices;
+ struct mdl_submesh *sm = &ctx->model->submeshes[ mi ];
+ struct mdl_batch *batch = &ctx->model->batches[ mi ];
- struct task *upload_task = _task_new( k_thread_main, total, 0, "Model upload to GPU task" );
- struct model_upload_task *args = task_buffer( upload_task );
+ b8 tilemapped = 0;
+ struct vg_material *material = NULL;
- args->model = ctx->model;
- args->vert_buffer = ((void *)args) + size_hdr;
- args->indice_buffer = ((void *)args) + size_hdr + size_verts;
- ctx->model->vert_count = arr_vertices->item_count;
- ctx->model->indice_count = arr_indices->item_count;
-
- af_load_array_file_buffer( &ctx->af, arr_vertices, args->vert_buffer, sizeof(struct mdl_vert) );
- af_load_array_file_buffer( &ctx->af, arr_indices, args->indice_buffer, sizeof(u32) );
-
- if( fixup_table )
+ if( sm->material_id )
{
- for( u32 i=0; i<ctx->model->vert_count; i ++ )
- {
- struct mdl_vert *vert = &args->vert_buffer[i];
- for( u32 j=0; j<4; j++ )
- vert->groups[j] = fixup_table[vert->groups[j]];
- }
+ material = _vg_material_get( sm->material_id );
+ if( material->flags & VG_MATERIAL_TILEMAPPED )
+ tilemapped = 1;
}
- /*
- * Unpack the indices (if there are meshes)
- * ---------------------------------------------------------
- */
- if( ctx->model->submesh_count )
+ if( tilemapped )
{
- struct mdl_submesh *sm = &ctx->model->submeshes[ 0 ];
- u32 offset = sm->vertex_count;
+ batch->offset = args->indice_count * sizeof(u32);
+ batch->count = 0;
- for( u32 i=1; i<ctx->model->submesh_count; i++ )
+ for( u32 j=0; j<sm->indice_count/3; j ++ )
{
- struct mdl_submesh *sm = &ctx->model->submeshes[ i ];
- u32 *indices = args->indice_buffer + sm->indice_start;
+ u32 i0 = sm->vertex_start + ctx->model->indices[sm->indice_start + j*3+0],
+ i1 = sm->vertex_start + ctx->model->indices[sm->indice_start + j*3+1],
+ i2 = sm->vertex_start + ctx->model->indices[sm->indice_start + j*3+2];
+ struct mdl_vert vs[3] = { ctx->model->verts[ i0 ],
+ ctx->model->verts[ i1 ],
+ ctx->model->verts[ i2 ] };
+ for( u32 l=0; l<3; l ++ )
+ vs[l].uv[1] = 1.0f - vs[l].uv[1];
+
+ b8 passed = 0;
+
+ f32 uv_centroid[2] = {0,0};
+ v2_add( vs[0].uv, vs[1].uv, uv_centroid );
+ v2_add( uv_centroid, vs[2].uv, uv_centroid );
+ v2_muls( uv_centroid, 1.0f/3.0f, uv_centroid );
+ for( u32 k=0; k<material->tilemap.set_count; k ++ )
+ {
+ struct tex_tileset *set = &material->tilemap.sets[k];
+ f32 uv_rect[4] = { (f32)set->co[0] / (f32)material->tilemap.image_size[0],
+ (f32)set->co[1] / (f32)material->tilemap.image_size[1],
+ (f32)(set->size[0]*set->grid[0]) / (f32)material->tilemap.image_size[0],
+ (f32)(set->size[1]*set->grid[1]) / (f32)material->tilemap.image_size[1] };
+ f32 v0[2];
+ v2_sub( uv_centroid, uv_rect, v0 );
+ if( (v0[0] >= 0.0f) && (v0[1] >= 0.0f) && (v0[0] <= uv_rect[2]) && (v0[1] <= uv_rect[3]) )
+ {
+ i32 tile_y = (v0[1]/uv_rect[3]) * set->grid[1],
+ tile_x = (v0[0]/uv_rect[2]) * set->grid[0],
+ tile_i = tile_y*set->grid[0] + tile_x;
+ struct tex_tile *tile = &material->tilemap.tiles[ set->tile_start + tile_i ];
+
+ f32 sheet_uv[2] = { (f32)tile->pixel_root[0]/(f32)material->tilemap.sheet_size[0],
+ (f32)tile->pixel_root[1]/(f32)material->tilemap.sheet_size[1] };
+
+ f32 image_uv[2] = { (f32)uv_rect[0] + (f32)(set->size[0]*tile_x)/(f32)material->tilemap.image_size[0],
+ (f32)uv_rect[1] + (f32)(set->size[0]*tile_y)/(f32)material->tilemap.image_size[1] };
+ f32 scale_factor[2] = { (f32)material->tilemap.image_size[0]/(f32)material->tilemap.sheet_size[0],
+ (f32)material->tilemap.image_size[1]/(f32)material->tilemap.sheet_size[1] };
+ v2_mul( image_uv, scale_factor, image_uv );
+
+ f32 uv_offset[2];
+ v2_sub( sheet_uv, image_uv, uv_offset );
+
+
+ for( u32 l=0; l<3; l ++ )
+ {
+ vs[l].uv[0] = vs[l].uv[0] * scale_factor[0] + uv_offset[0];
+ vs[l].uv[1] = 1.0f - (vs[l].uv[1] * scale_factor[1] + uv_offset[1]);
+ u32 new_vert = vg_model_emit_vertex( args, &vs[l], fixup_table );
+ vg_model_emit_indice( args, new_vert );
+ }
+
+ batch->count += 3;
+ passed = 1;
+ break;
+ }
+ }
- for( u32 j=0; j<sm->indice_count; j++ )
- indices[j] += offset;
- offset += sm->vertex_count;
+ triangles_total ++;
+ if( passed )
+ triangles_ok ++;
+ else
+ {
+ /* just emit the broken face... */
+ for( u32 l=0; l<3; l ++ )
+ {
+ u32 new_vert = vg_model_emit_vertex( args, &vs[l], fixup_table );
+ vg_model_emit_indice( args, new_vert );
+ }
+ }
}
}
+ else
+ {
+ u32 vertex_offset = args->vertex_count;
+ batch->offset = args->indice_count * sizeof(u32);
+ batch->count = sm->indice_count;
+
+ for( u32 j=0; j<sm->vertex_count; j ++ )
+ vg_model_emit_vertex( args, &ctx->model->verts[ sm->vertex_start + j ], fixup_table );
- task_send( upload_task, vg_model_upload_task );
+ /* TODO: why is this like this? Were indices stored in u16 before and needed unpacking? */
+ for( u32 j=0; j<sm->indice_count; j ++ )
+ vg_model_emit_indice( args, vertex_offset + ctx->model->indices[ sm->indice_start + j ] );
+ }
}
- else
+
+ if( triangles_ok != triangles_total )
{
- $log( $fatal, {"No vertex/indice data in model file"} );
- _fatal_exit();
+ $log( $warning, {"Only "}, $signed( triangles_ok ), {" of "}, $signed( triangles_total ),
+ {" triangles were mapped across the tilemap"} );
}
+
+ task_send( upload_task, vg_model_upload_task );
}
-void vg_model_stream_textures_gpu( struct vg_model_stream_context *ctx )
+void vg_model_stream_meshes_gpu( struct vg_model_stream_context *ctx, struct stack_allocator *stack, u32 *fixup_table )
{
+ ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_ASYNC ) );
ASSERT_CRITICAL( ctx->model->flags & VG_MODEL_CPU_METADATA );
- ctx->model->flags |= VG_MODEL_GPU_TEXTURES;
- for( u32 i=0; i<ctx->model->texture_count; i ++ )
+ ctx->model->flags |= VG_MODEL_GPU_MESHES;
+
+ if( ctx->model->flags & VG_MODEL_CPU_MESHES )
+ vg_model_upload_gpu( ctx, stack, fixup_table );
+ else
{
- union mdl_texture *tex = &ctx->model->textures[ i ];
- struct mdl_file pack_info = tex->file;
- _vg_tex_load_stream( &tex->tex, vg_model_stream_pack_stream( ctx, &pack_info ),
- VG_TEX_REPEAT | VG_TEX_LINEAR | VG_TEX_NOMIP );
+ u32 temp_frame = _start_temporary_frame();
+ {
+ vg_model_stream_meshes_cpu( ctx, _temporary_stack_allocator() );
+ vg_model_upload_gpu( ctx, stack, fixup_table );
+ ctx->model->flags &= ~(u32)VG_MODEL_CPU_MESHES;
+ ctx->model->verts = NULL;
+ ctx->model->indices = NULL;
+ }
+ _end_temporary_frame( temp_frame );
}
}
if( model_flags & VG_MODEL_CPU_MESHES )
vg_model_stream_meshes_cpu( &ctx, stack );
+ if( model_flags & VG_MODEL_LOAD_MATERIALS )
+ vg_model_stream_materials( &ctx, stack );
+
ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_ASYNC ) );
if( model_flags & VG_MODEL_GPU_MESHES )
- vg_model_stream_meshes_gpu( &ctx, NULL );
-
- if( model_flags & VG_MODEL_GPU_TEXTURES )
- vg_model_stream_textures_gpu( &ctx );
+ vg_model_stream_meshes_gpu( &ctx, stack, NULL );
vg_model_stream_close( &ctx );
success = 1;
glDeleteBuffers( 1, &model->vbo );
}
- if( model->flags & VG_MODEL_GPU_TEXTURES )
- for( u32 i=0; i<model->texture_count; i ++ )
- vg_tex_delete( &model->textures[i].tex );
+ for( u32 i=0; i<model->surface_count; i ++ )
+ _vg_material_release( model->surfaces[i].material_id );
}
void mdl_transform_m4x3( struct mdl_transform *transform, f32 mtx[4][3] )
glBindVertexArray( model->vao );
}
+#if 0
void vg_model_draw_elements( u32 start, u32 count )
{
glDrawElements( GL_TRIANGLES, count, GL_UNSIGNED_INT, (void *)(start*sizeof(u32)) );
}
+#endif
-void vg_model_draw_submesh( struct mdl_submesh *sm )
+void vg_model_draw_batch( struct mdl_batch *batch )
{
- vg_model_draw_elements( sm->indice_start, sm->indice_count );
+ glDrawElements( GL_TRIANGLES, batch->count, GL_UNSIGNED_INT, (void *)(u64)batch->offset );
}
i32 vg_model_get_mesh_index( struct vg_model *model, const c8 *name )
return mesh->submesh_start;
}
-void vg_model_bind_texture( struct vg_model *model, GLuint target, u32 tex_id, u32 slot )
+u16 vg_model_get_surface_material( struct vg_model *model, u32 surface_index )
{
- if( tex_id )
- {
- union mdl_texture *mdl_tex = &model->textures[ tex_id -1 ];
- vg_tex_bind( target, &mdl_tex->tex, slot );
- }
- else
- vg_tex_bind( target, NULL, slot );
+ struct mdl_surface *surface = &model->surfaces[ surface_index ];
+ return surface->material_id;
}
#include "shader_props.h"
#include "vg_af.h"
-#define VG_MODEL_VERSION_MIN 110
-#define VG_MODEL_VERSION_NR 110
+#define VG_MODEL_VERSION_MIN 111
+#define VG_MODEL_VERSION_NR 111
union entity_id
{
};
};
+#if 0
enum mdl_shader
{
k_shader_standard = 0,
k_shader_pipe = 12,
k_shader_override = 30000
};
+#endif
enum mdl_surface_prop
{
k_surface_prop_sand = 6
};
+#if 0
enum material_flag
{
k_material_flag_skate_target = 0x0001,
k_material_flag_invisible|
k_material_flag_walking,
- k_material_flag_spritesheet = 0x1000
+ k_material_flag_spritesheet = 0x1000,
+ k_material_flag_external = 0x2000,
+
+ k_surface_flag_missing_material = 0x4000
};
+#endif
#pragma pack(push,1)
void mdl_transform_point( struct mdl_transform *transform, f32 co[3], f32 dest[3] );
void mdl_transform_mul( struct mdl_transform *a, struct mdl_transform *b, struct mdl_transform *d );
-#if (VG_MODEL_VERSION_MIN <= 105)
-struct mdl_material_v101
-{
- u32 pstr_name,
- shader,
- flags,
- surface_prop;
-
- f32 colour[4],
- colour1[4];
-
- u32 tex_diffuse, /* Indexes start from 1. 0 if missing. */
- tex_none0,
- tex_none1;
-};
-#endif
-
-struct mdl_material
-{
- u32 pstr_name,
- shader,
- flags,
- surface_prop;
-
- union
- {
- struct
- {
- u32 offset, size; /* indexes shader data */
- }
- kvs;
- u32 kv_root; /* runtime */
- }
- props;
-};
-
struct mdl_bone
{
f32 co[3], end[3];
vertex_count;
f32 bbx[2][3];
- u16 material_id, flags;
+ union{ u16 surface_index; u16 material_id; };
+ u16 flags;
};
enum esubmesh_flags
{
k_submesh_flag_none = 0x0000,
- k_submesh_flag_consumed = 0x0001
+ k_submesh_flag_consumed = 0x0001,
+ k_submesh_flag_warned = 0x0002
};
struct mdl_mesh
pack_size;
};
-union mdl_texture
+/* THIS SHOULD ACTUALLY BE CALLED: MATERIAL REFERENCE */
+struct mdl_surface
{
- struct mdl_file file;
- struct vg_tex tex;
+ union{ u32 pstr_material; u16 material_id; };
+ u32 flags;
+};
+
+struct mdl_batch
+{
+ u32 count, offset;
};
struct vg_model
/* VG_MODEL_CPU_METADATA ---------------------- */
const void *packed_strings;
- union shader_props *shader_props;
+ /* model metadata */
struct mdl_mesh *meshes;
u32 mesh_count;
struct mdl_submesh *submeshes;
u32 submesh_count;
- struct mdl_material *materials;
- u32 material_count;
+ /* by default this corresponds to each submesh */
+ struct mdl_batch *batches;
+ u32 batch_count;
- union mdl_texture *textures;
- u32 texture_count;
+ struct mdl_surface *surfaces;
+ u32 surface_count;
struct mdl_armature *armatures;
u32 armature_count;
GLuint vao, vbo, ebo;
};
-#define VG_MODEL_CPU_METADATA 0x4
-#define VG_MODEL_CPU_MESHES 0x8
-# define VG_MODEL_GPU_TEXTURES 0x1
+# define VG_MODEL_CPU_METADATA 0x4
+# define VG_MODEL_CPU_MESHES 0x8
# define VG_MODEL_GPU_MESHES 0x2
-# define VG_MODEL_ENGINE_STANDARD (VG_MODEL_CPU_METADATA|VG_MODEL_GPU_TEXTURES|VG_MODEL_GPU_MESHES)
-# define VG_MODEL_ENGINE_PROCEDURAL_SOURCE (VG_MODEL_CPU_METADATA|VG_MODEL_GPU_TEXTURES|VG_MODEL_CPU_MESHES)
+# define VG_MODEL_LOAD_MATERIALS 0x100
+# define VG_MODEL_ENGINE_STANDARD (VG_MODEL_CPU_METADATA|VG_MODEL_LOAD_MATERIALS|VG_MODEL_GPU_MESHES)
+# define VG_MODEL_ENGINE_PROCEDURAL_SOURCE (VG_MODEL_CPU_METADATA|VG_MODEL_LOAD_MATERIALS|VG_MODEL_CPU_MESHES)
b8 vg_model_load( struct vg_model *model, u32 model_flags, const c8 *path, struct stack_allocator *stack );
void vg_model_unload_gpu( struct vg_model *model );
/* Parts which you might want (currently they all require metadata to be explicitly loaded) */
void vg_model_stream_metadata( struct vg_model_stream_context *ctx, struct stack_allocator *stack );
+void vg_model_stream_materials( struct vg_model_stream_context *ctx, struct stack_allocator *stack );
void vg_model_stream_meshes_cpu( struct vg_model_stream_context *ctx, struct stack_allocator *stack );
-
-void vg_model_stream_meshes_gpu( struct vg_model_stream_context *ctx, u32 *fixup_table );
-void vg_model_stream_textures_gpu( struct vg_model_stream_context *ctx );
+void vg_model_stream_meshes_gpu( struct vg_model_stream_context *ctx, struct stack_allocator *stack, u32 *fixup_table );
struct stream *vg_model_stream_pack_stream( struct vg_model_stream_context *ctx, struct mdl_file *file );
/* Rendering operations
* ----------------------------------------------------------------------------------------------------------------- */
void vg_model_bind_mesh( struct vg_model *model );
-void vg_model_bind_texture( struct vg_model *model, GLuint target, u32 tex_id, u32 slot );
-void vg_model_draw_elements( u32 start, u32 count );
-void vg_model_draw_submesh( struct mdl_submesh *sm );
+void vg_model_draw_batch( struct mdl_batch *batch );
i32 vg_model_get_mesh_index( struct vg_model *model, const c8 *name );
i32 vg_model_get_submesh_index( struct vg_model *model, const c8 *mesh_name );
void mdl_transform_m4x3( struct mdl_transform *transform, f32 mtx[4][3] );
+
+u16 vg_model_get_surface_material( struct vg_model *model, u32 surface_index );
GLuint _shader_programs[ k_shader_count ];
GLuint _uniform_locations[ k_shader_uniform_count ];
-struct shader
-{
- const c8 *name;
- u32 subshader_count, uniform_start, uniform_count;
-
- struct subshader
- {
- enum subshader_type
- {
- k_subshader_vertex,
- k_subshader_fragment,
- k_subshader_geometry
- }
- type;
- u32 source_count;
- const c8 **source_list;
- const c8 *source_static;
- const c8 *source_uniforms;
- }
- *subshaders;
-};
-
#include "generated/shaders.c"
static void _shaders_compile( b8 recompile )
for( u32 i=0; i<k_shader_count; i ++ )
{
struct shader *shader = &_shader_definitions[ i ];
+ if( shader->subshader_count < 2 )
+ {
+ $log( $warning, {"Shader '"}, {shader->name}, {"' has less than 2 subshaders ("},
+ $unsigned(shader->subshader_count), {")"} );
+ continue;
+ }
+
GLuint new_program = glCreateProgram();
GLuint sub_programs[3];
for( u32 j=0; j<shader->uniform_count; j ++ )
{
u32 index = shader->uniform_start+j;
- _uniform_locations[ index ] = glGetUniformLocation( new_program, _uniform_aliases[ index ] );
+ _uniform_locations[ index ] = glGetUniformLocation( new_program, _uniform_infos[ index ].alias );
}
$log( $ok, {"Shader complete: "}, {shader->name} );
goto cleanup;
_shaders_compile(1);
return 0;
}
+
+struct shader *_get_shader( enum shader_id shader_id )
+{
+ return &_shader_definitions[ shader_id ];
+}
+
+enum shader_id _get_shader_id( const c8 *name )
+{
+ for( u32 i=0; i<k_shader_count; i ++ )
+ {
+ struct shader *shader = &_shader_definitions[i];
+ if( compare_buffers( shader->name, 0, name, 0 ) )
+ return i;
+ }
+ return -1;
+}
+
+const struct shader_uniform *_get_shader_uniform( enum shader_id shader_id, u32 index )
+{
+ return &_uniform_infos[ _get_shader( shader_id )->uniform_start + index ];
+}
+
+const struct shader_uniform *_shader_uniform( enum shader_uniform_id uniform_id )
+{
+ return &_uniform_infos[ uniform_id ];
+}
+#pragma once
#include "vg_opengl.h"
#include "generated/shaders.h"
+struct shader
+{
+ const c8 *name;
+ u32 subshader_count, uniform_start, uniform_count;
+
+ struct subshader
+ {
+ enum subshader_type
+ {
+ k_subshader_vertex,
+ k_subshader_fragment,
+ k_subshader_geometry
+ }
+ type;
+ u32 source_count;
+ const c8 **source_list;
+ const c8 *source_static;
+ const c8 *source_uniforms;
+ }
+ *subshaders;
+};
+
+struct shader_uniform
+{
+ const c8 *alias;
+ enum expose_type
+ {
+ k_uniform_expose_none,
+ k_uniform_expose_image,
+ k_uniform_expose_colour
+ }
+ expose;
+ u32 property_offset;
+ enum uniform_type
+ {
+ k_uniform_type_int,
+ k_uniform_type_vec2,
+ k_uniform_type_vec3,
+ k_uniform_type_vec4,
+ k_uniform_type_bool,
+ k_uniform_type_mat2,
+ k_uniform_type_mat3,
+ k_uniform_type_mat4,
+ k_uniform_type_float,
+ k_uniform_type_mat4x3,
+ k_uniform_type_sampler2D,
+ k_uniform_type_usampler3D,
+ k_uniform_type_samplerCube,
+ k_uniform_type_samplerBuffer
+ }
+ type;
+};
+
void _shader_init(void);
GLuint compile_opengl_subshader( GLint type, const c8 *sources[], u32 source_count, b8 critical, const c8 *name );
b8 link_opengl_program( GLuint program, b8 critical );
+
+struct shader *_get_shader( enum shader_id shader_id );
+enum shader_id _get_shader_id( const c8 *name );
+const struct shader_uniform *_get_shader_uniform( enum shader_id shader_id, u32 index );
+const struct shader_uniform *_shader_uniform( enum shader_uniform_id uniform_id );
+
void _shader_bind( enum shader_id id );
#include "foundation.h"
#include "vg_tex.h"
#include "vg_async.h"
+#include "vg_asset.h"
#define QOI_OP_INDEX 0x00 /* 00xxxxxx */
#define QOI_OP_DIFF 0x40 /* 01xxxxxx */
struct
{
GLuint error2d, errorcube;
+
+ struct vg_tex *assets;
+ struct vg_asset_list asset_list;
}
static _vg_tex;
ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_OPENGL ) );
$log( $info, {"[INIT] _vg_tex_init"} );
- static u8 const_vg_tex2d_err[] =
+ static u8 checker[] =
{
0xff,0x00,0xff,0xff, 0x00,0x00,0x00,0xff, 0xff,0x00,0xff,0xff, 0x00,0x00,0x00,0xff,
0x00,0x00,0x00,0xff, 0xff,0x00,0xff,0xff, 0x00,0x00,0x00,0xff, 0xff,0x00,0xff,0xff,
glGenTextures( 1, &_vg_tex.error2d );
glBindTexture( GL_TEXTURE_2D, _vg_tex.error2d );
- glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, 4, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, const_vg_tex2d_err );
+ glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, 4, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, checker );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameteri( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE );
for( u32 j=0; j<6; j ++ )
- {
- glTexImage2D( GL_TEXTURE_CUBE_MAP_POSITIVE_X + j, 0, GL_RGBA, 4, 4,
- 0, GL_RGBA, GL_UNSIGNED_BYTE, const_vg_tex2d_err );
- }
+ glTexImage2D( GL_TEXTURE_CUBE_MAP_POSITIVE_X + j, 0, GL_RGBA, 4, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, checker );
+
+ _vg_tex.assets = _heap_allocate( sizeof(struct vg_tex) * VG_ASSET_TEXTURES_MAX );
+ vg_allocate_asset_list( &_vg_tex.asset_list, VG_ASSET_TEXTURES_MAX );
}
struct tex_upload_task
glDeleteTextures( 1, &tex->name );
zero_buffer( tex, sizeof(struct vg_tex) );
}
+
+u16 _vg_texture_load( const c8 *path, struct tex_tilemap *tilemap )
+{
+ ASSERT_CRITICAL( _thread_has_flags( _get_thread_id(), THREAD_FLAG_ASYNC ) );
+ if( path == NULL )
+ return 0;
+
+ u16 id;
+ if( tilemap )
+ {
+ id = vg_asset_create_anonymous( &_vg_tex.asset_list );
+ struct vg_tex *tex = &_vg_tex.assets[ vg_asset_index( &_vg_tex.asset_list, id ) ];
+ //_vg_tex_load( tex, path, VG_TEX_REPEAT | VG_TEX_LINEAR | VG_TEX_NOMIP | VG_TEX_FLIP_V );
+
+ struct stream file;
+ ASSERT_CRITICAL( stream_open_file( &file, path, k_stream_read ) );
+
+ struct qoi_desc qoi;
+ u32 size = vg_qoi_stream_init( &qoi, &file );
+ ASSERT_CRITICAL( size );
+ ASSERT_CRITICAL( tilemap->image_size[0] == qoi.width );
+ ASSERT_CRITICAL( tilemap->image_size[1] == qoi.height );
+
+ u32 temp_frame = _start_temporary_frame();
+ u8 *src_buffer = _temporary_allocate( size, 16 );
+ vg_qoi_stream_decode( &qoi, &file, src_buffer, 0 );
+ stream_close( &file );
+
+ _async_push_groups( ASYNC_GROUP_OPENGL, 0 );
+ u32 upload_size = (u32)tilemap->sheet_size[0] * (u32)tilemap->sheet_size[1] * 4;
+ struct task *upload_task = _task_new( k_thread_main, sizeof( struct tex_upload_task ) + upload_size,
+ 0, "Texture upload task" );
+ struct tex_upload_task *args = task_buffer( upload_task );
+ args->tex = tex;
+ args->width = tilemap->sheet_size[0];
+ args->height = tilemap->sheet_size[1];
+ args->channels = 4;
+ args->flags = VG_TEX_REPEAT | VG_TEX_LINEAR | VG_TEX_NOMIP;
+
+ zero_buffer( args->image_buffer, upload_size );
+
+ for( u32 j=0; j < tilemap->set_count; j ++ )
+ {
+ struct tex_tileset *tileset = &tilemap->sets[j];
+ for( i32 tile_y = 0; tile_y < tileset->grid[1]; tile_y ++ )
+ {
+ for( i32 tile_x = 0; tile_x < tileset->grid[0]; tile_x ++ )
+ {
+ struct tex_tile *tile = &tilemap->tiles[ tileset->tile_start + (tile_y*tileset->grid[0] + tile_x) ];
+
+ for( i32 oy = -VG_TEX_TILEMAP_PADDING; oy < tileset->size[1] +VG_TEX_TILEMAP_PADDING; oy ++ )
+ {
+ for( i32 ox = -VG_TEX_TILEMAP_PADDING; ox < tileset->size[0] +VG_TEX_TILEMAP_PADDING; ox ++ )
+ {
+ i32 tile_i = tile_y*tileset->grid[0] + tile_x,
+ dst_x = tile->pixel_root[0] + ox,
+ dst_y = tile->pixel_root[1] + oy,
+ src_x = tileset->co[0] + tile_x*tileset->size[0] + i32_clamp( ox, 0, tileset->size[0]-1 ),
+ src_y = tileset->co[1] + tile_y*tileset->size[1] + i32_clamp( oy, 0, tileset->size[1]-1 ),
+ dst_i = (((tilemap->sheet_size[1] - dst_y) -1)*tilemap->sheet_size[0] + dst_x)*4,
+ src_i = (src_y*tilemap->image_size[0] + src_x)*4;
+
+ if( src_buffer[ src_i + 3 ] > 0 )
+ {
+ for( u32 k=0; k<3; k ++ )
+ args->image_buffer[ dst_i + k ] = src_buffer[ src_i + k ];
+ }
+ else
+ {
+ i32 v[4] = {0,0,0,0};
+ for( i32 dy=-1; dy<=1; dy ++ )
+ {
+ for( i32 dx=-1; dx<=1; dx ++ )
+ {
+ i32 samp_x = i32_clamp( src_x+dx, 0, tilemap->image_size[0]-1 ),
+ samp_y = i32_clamp( src_y+dy, 0, tilemap->image_size[1]-1 ),
+ samp_i = (samp_y*tilemap->image_size[0] + samp_x)*4;
+
+ if( src_buffer[ samp_i + 3 ] > 0 )
+ {
+ for( u32 k=0; k<3; k ++ )
+ v[k] += (i32)src_buffer[ samp_i + k ];
+ v[3] ++;
+ }
+ }
+ }
+ for( u32 k=0; k<3; k ++ )
+ args->image_buffer[ dst_i + k ] = v[3]? (u8)((f32)v[k] / (f32)v[3]): 0;
+ }
+ args->image_buffer[ dst_i + 3 ] = src_buffer[ src_i + 3 ];
+ }
+ }
+ }
+ }
+ }
+ _end_temporary_frame( temp_frame );
+
+ task_send( upload_task, _vg_tex_upload );
+ _async_pop_groups();
+ }
+ else
+ {
+ id = vg_asset_get( &_vg_tex.asset_list, path );
+ if( id == 0 )
+ {
+ $log( $info, {"Loading texture asset: "}, {path} );
+ id = vg_asset_create( &_vg_tex.asset_list, path );
+ struct vg_tex *tex = &_vg_tex.assets[ vg_asset_index( &_vg_tex.asset_list, id ) ];
+ _vg_tex_load( tex, path, VG_TEX_REPEAT | VG_TEX_LINEAR | VG_TEX_NOMIP | VG_TEX_FLIP_V );
+ }
+ }
+
+ return id;
+}
+
+void _vg_texture_release( u16 id )
+{
+ if( id )
+ {
+ if( vg_asset_release( &_vg_tex.asset_list, id ) )
+ {
+ vg_tex_delete( &_vg_tex.assets[ vg_asset_index( &_vg_tex.asset_list, id ) ] );
+ }
+ }
+}
+
+void _vg_texture_bind( u16 id, GLuint target, u32 slot )
+{
+ struct vg_tex *tex = &_vg_tex.assets[ vg_asset_index( &_vg_tex.asset_list, id ) ];
+ vg_tex_bind( target, tex, slot );
+}
#include "foundation.h"
#include "vg_opengl.h"
+#define VG_ASSET_TEXTURES_MAX 256
+#define VG_TEX_TILEMAP_PADDING 1
+
struct vg_tex
{
u32 name;
u32 flags;
};
+struct tex_tilemap
+{
+ i16 image_size[2], sheet_size[2];
+
+ struct tex_tileset
+ {
+ const c8 *alias;
+ u32 tile_start, tile_count;
+
+ i16 co[2], size[2], grid[2];
+ }
+ *sets;
+ u32 set_count;
+
+ struct tex_tile
+ {
+ i16 size[2]; // for sorting and packing: TODO: Remove this and have it be temporary
+ i16 pixel_root[2];
+ }
+ *tiles;
+};
+
#pragma pack(push,1)
union qoi_rgba_t
{
void vg_tex_delete( struct vg_tex *tex );
void _vg_tex_load( struct vg_tex *out_tex, const c8 *path, u32 flags );
b8 _vg_tex_load_stream( struct vg_tex *out_tex, struct stream *in_stream, u32 flags );
+
+u16 _vg_texture_load( const c8 *path, struct tex_tilemap *tilemap );
+void _vg_texture_release( u16 id );
+void _vg_texture_bind( u16 id, GLuint target, u32 slot );
#include "foundation.h"
+#if 0
void pool_init( struct pool_allocator *pool, struct pool_node *nodes, u16 node_count, struct pool_chain *full_chain )
{
pool->nodes = nodes;
full_chain->count = node_count;
full_chain->unused0 = 0;
}
+#endif
+
+u16 pool_allocate( struct pool_allocator *pool, struct pool_chain *free_chain, struct pool_chain *active_chain )
+{
+ u16 id = 0;
+ if( free_chain->tail )
+ {
+ id = free_chain->tail;
+ pool_switch( pool, free_chain, active_chain, id );
+ }
+ else
+ {
+ ASSERT_CRITICAL( active_chain->count < pool->count );
+ id = active_chain->count + 1;
+ struct pool_node *pnode = &pool->nodes[ id -1 ];
+ zero_buffer( pnode, sizeof(struct pool_node) );
+ pool_switch( pool, NULL, active_chain, id );
+ }
+
+ return id;
+}
+
+void pool_free( struct pool_allocator *pool, struct pool_chain *free_chain, struct pool_chain *active_chain, u16 id )
+{
+ pool_switch( pool, active_chain, free_chain, id );
+}
u32 pool_index( struct pool_allocator *pool, u16 pool_id )
{
struct pool_node *nodes;
u32 count;
};
-void pool_init( struct pool_allocator *pool, struct pool_node *nodes, u16 node_count, struct pool_chain *full_chain );
+//void pool_init( struct pool_allocator *pool, struct pool_node *nodes, u16 node_count, struct pool_chain *full_chain );
u32 pool_index( struct pool_allocator *pool, u16 pool_id );
u16 pool_reference( struct pool_allocator *pool, u16 pool_id, b8 increment );
u16 pool_next( struct pool_allocator *pool, u16 pool_id, b8 right );
+
+u16 pool_allocate( struct pool_allocator *pool, struct pool_chain *free_chain, struct pool_chain *active_chain );
+void pool_free( struct pool_allocator *pool, struct pool_chain *free_chain, struct pool_chain *active_chain, u16 id );
+
void pool_switch( struct pool_allocator *pool, struct pool_chain *source, struct pool_chain *dest, u16 which );
/* Queue
}
else
{
- if( c==' '||c=='\t'||c=='\r'||c=='\n'||c=='{'||c=='}' )
+ if ( c==' '||c=='\t'||c=='\r'||c=='\n'||c=='{'||c=='}' )
is_control_character = 1;
}
$log( $warning, {"Performance: I/O file stream opened in main thread. This will cause frame stalls!"} )
}
#endif
+
+ if( !path )
+ {
+ $log( $error, {"Failure to open file stream, path is NULL"} );
+ return 0;
+ }
+
stream->posix_stream = fopen( path, (flags & k_stream_write)? "wb": "rb" );
stream->offset = 0;
stream->buffer_length = 0;
const c8 *alias;
const c8 *shader_name;
const c8 *count;
+ const c8 *expose;
+ u32 property_offset;
};
struct stretchy_allocator uniforms;
stretchy_init( &uniforms, sizeof(struct uniform_info) );
struct glsl_trans
{
+ u32 property_width;
const c8 *type, *args, *call, *end;
}
glsl_list[] =
{
- { "int", "i32 b", "glUniform1i", "b" },
- { "vec2", "f32 v[2]", "glUniform2fv", "1,v" },
- { "vec3", "f32 v[3]", "glUniform3fv", "1,v" },
- { "vec4", "f32 v[4]", "glUniform4fv", "1,v" },
- { "bool", "i32 b", "glUniform1i", "b" },
- { "mat2", "f32 m[2][2]", "glUniformMatrix2fv", "1,GL_FALSE,(f32*)m" },
- { "mat3", "f32 m[3][3]", "glUniformMatrix3fv", "1,GL_FALSE,(f32*)m" },
- { "mat4", "f32 m[4][4]", "glUniformMatrix4fv", "1,GL_FALSE,(f32*)m" },
- { "float", "f32 f", "glUniform1f", "f" },
- { "mat4x3", "f32 (*m)[4][3], u32 c","glUniformMatrix4x3fv", "c,GL_FALSE,(f32*)m" },
- { "sampler2D", "i32 i", "glUniform1i", "i" },
- { "usampler3D", "i32 i", "glUniform1i", "i" },
- { "samplerCube", "i32 i", "glUniform1i", "i" },
- { "samplerBuffer","i32 i", "glUniform1i", "i" },
+ { 1, "int", "i32 b", "glUniform1i", "b" },
+ { 2, "vec2", "f32 v[2]", "glUniform2fv", "1,v" },
+ { 3, "vec3", "f32 v[3]", "glUniform3fv", "1,v" },
+ { 4, "vec4", "f32 v[4]", "glUniform4fv", "1,v" },
+ { 1, "bool", "i32 b", "glUniform1i", "b" },
+ { 0, "mat2", "f32 m[2][2]", "glUniformMatrix2fv", "1,GL_FALSE,(f32*)m" },
+ { 0, "mat3", "f32 m[3][3]", "glUniformMatrix3fv", "1,GL_FALSE,(f32*)m" },
+ { 0, "mat4", "f32 m[4][4]", "glUniformMatrix4fv", "1,GL_FALSE,(f32*)m" },
+ { 1, "float", "f32 f", "glUniform1f", "f" },
+ { 0, "mat4x3", "f32 (*m)[4][3], u32 c","glUniformMatrix4x3fv", "c,GL_FALSE,(f32*)m" },
+ { 1, "sampler2D", "i32 i", "glUniform1i", "i" },
+ { 0, "usampler3D", "i32 i", "glUniform1i", "i" },
+ { 0, "samplerCube", "i32 i", "glUniform1i", "i" },
+ { 0, "samplerBuffer","i32 i", "glUniform1i", "i" },
};
$v_string( &C, {"struct shader _shader_definitions[] = {\n"} );
while( keyvalues_foreach( &_assembly, &it, 0, "shader" ) )
{
u32 uniform_start = stretchy_count( &uniforms ),
- uniform_count = 0;
+ uniform_count = 0,
+ property_slots = 0;
const c8 *shader_name = keyvalues_read_string( &_assembly, it, "name", NULL );
ASSERT_CRITICAL( shader_name );
uniform->type = keyvalues_read_string( &_assembly, uniform_it, "type", NULL );
uniform->alias = keyvalues_read_string( &_assembly, uniform_it, "alias", NULL );
uniform->count = keyvalues_read_string( &_assembly, uniform_it, "count", NULL );
+ uniform->expose= keyvalues_read_string( &_assembly, uniform_it, "expose", NULL );
uniform->shader_name = shader_name;
+ uniform->property_offset = property_slots;
ASSERT_CRITICAL( uniform->type && uniform->alias );
for( i32 i=0; i<ARRAY_COUNT( glsl_list ); i ++ )
if( compare_buffers( glsl_list[i].type, 0, uniform->type, 0 ) )
{
uniform->glsl_index = i;
+ if( uniform->expose )
+ property_slots += glsl_list[i].property_width;
break;
}
}
{"], "},{trans->end},{" ); }\n"});
}
- $v_string( &C, {"const c8 *_uniform_aliases[] = \n{\n"} );
+ $v_string( &C, {"const struct shader_uniform _uniform_infos[] = \n{\n"} );
$v_string( &H, {"enum shader_uniform_id\n{\n"} );
for( u32 i=0; i<stretchy_count(&uniforms); i ++ )
{
struct uniform_info *uniform = stretchy_get( &uniforms, i );
-
$v_string( &H, {" k_shader_"}, {uniform->shader_name}, {"_"}, {uniform->alias}, {",\n"} );
- $v_string( &C, {" [k_shader_"}, {uniform->shader_name}, {"_"}, {uniform->alias}, {"] = \""}, {uniform->alias},{"\",\n"} );
+ $v_string( &C, {" [k_shader_"}, {uniform->shader_name}, {"_"}, {uniform->alias}, {"] = {\n"} );
+ $v_string( &C, {" .alias = \""}, {uniform->alias},{"\",\n"} );
+ if( uniform->expose )
+ {
+ $v_string( &C, {" .expose = k_uniform_expose_"}, {uniform->expose}, {",\n"} );
+ $v_string( &C, {" .property_offset = "}, $unsigned(uniform->property_offset), {",\n"} );
+ }
+ $v_string( &C, {" .type = k_uniform_type_"}, {uniform->type},{",\n"} );
+ $v_string( &C, {" },\n"} );
}
$v_string( &H, {" k_shader_uniform_count\n};\n\n"} );
$v_string( &C, {"};\n"} );