mirror of
https://github.com/Karaka-Management/cOMS.git
synced 2026-01-10 19:08:39 +00:00
general fixes
This commit is contained in:
parent
d0076fa2ad
commit
faf95f3e1b
23
Guidelines.md
Normal file
23
Guidelines.md
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
## Guidelines
|
||||
|
||||
## Mentality
|
||||
|
||||
* Performance is a key concern, not an afterthought
|
||||
* If stuff needs to be broken to make it better don't be afraid, do it.
|
||||
|
||||
## Gpu API code
|
||||
|
||||
* If a function starts with `gpuapi_*` it is supposed to be a function that is available in at least one other API as well. This makes it easier to understand the work this function performs across different APIs, even if someone only has experience with one of the APIs. The parameters can still be different due to the inner workings of the API.
|
||||
|
||||
## Code Style
|
||||
|
||||
The following code style guidelines are the most important ones. These guidelines can be ignored if there is a very good reason.
|
||||
|
||||
* Function names must be lower snake case `my_function()`
|
||||
* Macros must be upper snake case `MY_MACRO`
|
||||
* Make functions `static` if they are only for internal use in the same file
|
||||
* Use `struct` instead of `class`
|
||||
* Functions should take structs as parameters instead of making them part of the struct ("avoid object orientated code")
|
||||
* Avoid forward declaration where possible, simply order the functions and includes in the correct way
|
||||
* C/C++ files should end with `.h` unless it is necessary to split structs and or definitions from the implementation (e.g. architecture dependent implementations). Then use `.h` and `.cpp`.
|
||||
* Todos etc. can be written in comments.
|
||||
|
|
@ -131,7 +131,7 @@ AssetArchiveElement* asset_archive_element_find(const AssetArchive* archive, int
|
|||
|
||||
void asset_archive_load(AssetArchive* archive, const char* path, BufferMemory* buf, RingMemory* ring, int32 steps = 8)
|
||||
{
|
||||
PROFILE_VERBOSE(PROFILE_ASSET_ARCHIVE_LOAD, path);
|
||||
PROFILE(PROFILE_ASSET_ARCHIVE_LOAD, path, false, true);
|
||||
|
||||
LOG_FORMAT_1(
|
||||
"Load AssetArchive %s",
|
||||
|
|
@ -195,7 +195,7 @@ Asset* asset_archive_asset_load(const AssetArchive* archive, int32 id, AssetMana
|
|||
char id_str[9];
|
||||
int_to_hex(id, id_str);
|
||||
|
||||
PROFILE_VERBOSE(PROFILE_ASSET_ARCHIVE_ASSET_LOAD, id_str);
|
||||
PROFILE(PROFILE_ASSET_ARCHIVE_ASSET_LOAD, id_str, false, true);
|
||||
// @todo add calculation from element->type to ams index. Probably requires an app specific conversion function
|
||||
|
||||
// We have to mask 0x00FFFFFF since the highest bits define the archive id, not the element id
|
||||
|
|
@ -267,7 +267,7 @@ Asset* asset_archive_asset_load(const AssetArchive* archive, int32 id, AssetMana
|
|||
asset->vram_size = texture->image.pixel_count * image_pixel_size_from_type(texture->image.image_settings);
|
||||
asset->ram_size = asset->vram_size + sizeof(Texture);
|
||||
|
||||
#if OPENGL
|
||||
#if OPENGL || VULKAN
|
||||
// If opengl, we always flip
|
||||
if (!(texture->image.image_settings & IMAGE_SETTING_BOTTOM_TO_TOP)) {
|
||||
image_flip_vertical(ring, &texture->image);
|
||||
|
|
|
|||
|
|
@ -227,7 +227,7 @@ uint32 qoa_encode_frame(const int16* sample_data, uint32 channels, uint32 frame_
|
|||
16 scalefactors, encode all samples for the current slice and
|
||||
meassure the total squared error.
|
||||
*/
|
||||
uint64 best_rank = 0;
|
||||
uint64 best_rank = UINT64_MAX;
|
||||
uint64 best_slice = 0;
|
||||
int32 best_scalefactor = 0;
|
||||
|
||||
|
|
|
|||
195
camera/Camera.h
195
camera/Camera.h
|
|
@ -13,11 +13,24 @@
|
|||
#include "../math/matrix/MatrixFloat32.h"
|
||||
#include "../compiler/CompilerUtils.h"
|
||||
#include "CameraMovement.h"
|
||||
#include "../gpuapi/GpuApiType.h"
|
||||
|
||||
#define CAMERA_MAX_INPUTS 4
|
||||
|
||||
// @todo Please check out if we can switch to quaternions. We tried but failed.
|
||||
|
||||
/**
|
||||
* Gpu API coordinate information
|
||||
*
|
||||
* Coord-Sys. NDC-X NDC-Y NDC-Z Clip-Space-Z Y-Axis
|
||||
* DirectX left [-1, 1] [-1, 1] [0, 1] [0, 1] Up = positive
|
||||
* Opengl right [-1, 1] [-1, 1] [-1, 1] [-1, 1] Up = positive
|
||||
* Vulkan right [-1, 1] [-1, 1] [0, 1] [0, 1] Down = positive
|
||||
* Metal right [-1, 1] [-1, 1] [0, 1] [0, 1] Up = positive
|
||||
*
|
||||
* The first value in Z always represents the near value and the second value the far value
|
||||
*/
|
||||
|
||||
enum CameraStateChanges : byte {
|
||||
CAMERA_STATE_CHANGE_NONE = 0,
|
||||
CAMERA_STATE_CHANGE_NORMAL = 1,
|
||||
|
|
@ -26,6 +39,7 @@ enum CameraStateChanges : byte {
|
|||
|
||||
struct Camera {
|
||||
byte state_changes;
|
||||
GpuApiType gpu_api_type;
|
||||
|
||||
v3_f32 location;
|
||||
v4_f32 orientation;
|
||||
|
|
@ -52,6 +66,56 @@ struct Camera {
|
|||
alignas(64) f32 orth[16];
|
||||
};
|
||||
|
||||
inline
|
||||
void camera_init_rh_opengl(Camera* camera) {
|
||||
camera->orientation = {0.0f, -90.0f, 0.0f, 1.0f};
|
||||
camera->front = {0.0f, 0.0f, -1.0f};
|
||||
camera->right = {1.0f, 0.0f, 0.0f};
|
||||
camera->up = {0.0f, 1.0f, 0.0f};
|
||||
camera->world_up = {0.0f, 1.0f, 0.0f};
|
||||
}
|
||||
|
||||
inline
|
||||
void camera_init_rh_vulkan(Camera* camera) {
|
||||
camera->orientation = {0.0f, -90.0f, 0.0f, 1.0f};
|
||||
camera->front = {0.0f, 0.0f, -1.0f};
|
||||
camera->right = {1.0f, 0.0f, 0.0f};
|
||||
camera->up = {0.0f, -1.0f, 0.0f};
|
||||
camera->world_up = {0.0f, -1.0f, 0.0f};
|
||||
}
|
||||
|
||||
inline
|
||||
void camera_init_lh(Camera* camera) {
|
||||
camera->orientation = {0.0f, 90.0f, 0.0f, 1.0f};
|
||||
camera->front = {0.0f, 0.0f, 1.0f};
|
||||
camera->right = {1.0f, 0.0f, 0.0f};
|
||||
camera->up = {0.0f, 1.0f, 0.0f};
|
||||
camera->world_up = {0.0f, 1.0f, 0.0f};
|
||||
}
|
||||
|
||||
inline
|
||||
void camera_init(Camera* camera) {
|
||||
camera->znear = 0.1f;
|
||||
camera->zfar = 10000.0f;
|
||||
|
||||
switch (camera->gpu_api_type) {
|
||||
case GPU_API_TYPE_NONE: {
|
||||
camera_init_rh_opengl(camera);
|
||||
} break;
|
||||
case GPU_API_TYPE_OPENGL: {
|
||||
camera_init_rh_opengl(camera);
|
||||
} break;
|
||||
case GPU_API_TYPE_VULKAN: {
|
||||
camera_init_rh_vulkan(camera);
|
||||
} break;
|
||||
case GPU_API_TYPE_DIRECTX: {
|
||||
camera_init_lh(camera);
|
||||
} break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
void camera_update_vectors(Camera* camera) noexcept
|
||||
{
|
||||
|
|
@ -164,6 +228,8 @@ void camera_movement(
|
|||
|
||||
for (int32 i = 0; i < CAMERA_MAX_INPUTS; i++) {
|
||||
switch(movement[i]) {
|
||||
case CAMERA_MOVEMENT_NONE: {
|
||||
} break;
|
||||
case CAMERA_MOVEMENT_FORWARD: {
|
||||
camera->location.x += forward.x * velocity;
|
||||
camera->location.y += forward.y * velocity;
|
||||
|
|
@ -219,6 +285,7 @@ void camera_movement(
|
|||
camera->zoom -= velocity;
|
||||
} break;
|
||||
default: {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -240,11 +307,25 @@ void camera_orth_matrix_lh(Camera* __restrict camera) noexcept
|
|||
}
|
||||
|
||||
inline
|
||||
void camera_orth_matrix_rh(Camera* __restrict camera) noexcept
|
||||
void camera_orth_matrix_rh_opengl(Camera* __restrict camera) noexcept
|
||||
{
|
||||
//mat4_identity(camera->orth);
|
||||
camera->orth[15] = 1.0f;
|
||||
mat4_ortho_sparse_rh(
|
||||
mat4_ortho_sparse_rh_opengl(
|
||||
camera->orth,
|
||||
0.0f, (f32) camera->viewport_width,
|
||||
0.0f, (f32) camera->viewport_height,
|
||||
camera->znear,
|
||||
camera->zfar
|
||||
);
|
||||
}
|
||||
|
||||
inline
|
||||
void camera_orth_matrix_rh_vulkan(Camera* __restrict camera) noexcept
|
||||
{
|
||||
//mat4_identity(camera->orth);
|
||||
camera->orth[15] = 1.0f;
|
||||
mat4_ortho_sparse_rh_vulkan(
|
||||
camera->orth,
|
||||
0.0f, (f32) camera->viewport_width,
|
||||
0.0f, (f32) camera->viewport_height,
|
||||
|
|
@ -268,7 +349,7 @@ void camera_projection_matrix_lh(Camera* __restrict camera) noexcept
|
|||
}
|
||||
|
||||
inline
|
||||
void camera_projection_matrix_rh(Camera* __restrict camera) noexcept
|
||||
void camera_projection_matrix_rh_opengl(Camera* __restrict camera) noexcept
|
||||
{
|
||||
//mat4_identity(camera->projection);
|
||||
camera->projection[15] = 1.0f;
|
||||
|
|
@ -281,6 +362,21 @@ void camera_projection_matrix_rh(Camera* __restrict camera) noexcept
|
|||
);
|
||||
}
|
||||
|
||||
inline
|
||||
void camera_projection_matrix_rh_vulkan(Camera* __restrict camera) noexcept
|
||||
{
|
||||
//mat4_identity(camera->projection);
|
||||
camera->projection[15] = 1.0f;
|
||||
// @bug Fix
|
||||
mat4_perspective_sparse_rh(
|
||||
camera->projection,
|
||||
camera->fov,
|
||||
camera->aspect,
|
||||
camera->znear,
|
||||
camera->zfar
|
||||
);
|
||||
}
|
||||
|
||||
// This is usually not used, since it is included in the view matrix
|
||||
// expects the identity matrix
|
||||
inline
|
||||
|
|
@ -332,7 +428,7 @@ camera_view_matrix_lh(Camera* __restrict camera) noexcept
|
|||
}
|
||||
|
||||
void
|
||||
camera_view_matrix_rh(Camera* __restrict camera) noexcept
|
||||
camera_view_matrix_rh_opengl(Camera* __restrict camera) noexcept
|
||||
{
|
||||
v3_f32 zaxis = { -camera->front.x, -camera->front.y, -camera->front.z };
|
||||
|
||||
|
|
@ -363,4 +459,95 @@ camera_view_matrix_rh(Camera* __restrict camera) noexcept
|
|||
camera->view[15] = 1.0f;
|
||||
}
|
||||
|
||||
void
|
||||
camera_view_matrix_rh_vulkan(Camera* __restrict camera) noexcept
|
||||
{
|
||||
v3_f32 zaxis = { -camera->front.x, -camera->front.y, -camera->front.z };
|
||||
|
||||
v3_f32 xaxis;
|
||||
vec3_cross(&xaxis, &zaxis, &camera->world_up);
|
||||
vec3_normalize(&xaxis);
|
||||
|
||||
v3_f32 yaxis;
|
||||
vec3_cross(&yaxis, &zaxis, &xaxis);
|
||||
|
||||
// We tested if it would make sense to create a vec3_dot_sse version for the 3 dot products
|
||||
// The result was that it is not faster, only if we would do 4 dot products would we see an improvement
|
||||
camera->view[0] = xaxis.x;
|
||||
camera->view[1] = yaxis.x;
|
||||
camera->view[2] = zaxis.x;
|
||||
camera->view[3] = 0.0f;
|
||||
camera->view[4] = xaxis.y;
|
||||
camera->view[5] = yaxis.y;
|
||||
camera->view[6] = zaxis.y;
|
||||
camera->view[7] = 0.0f;
|
||||
camera->view[8] = xaxis.z;
|
||||
camera->view[9] = yaxis.z;
|
||||
camera->view[10] = zaxis.z;
|
||||
camera->view[11] = 0;
|
||||
camera->view[12] = -vec3_dot(&xaxis, &camera->location);
|
||||
camera->view[13] = -vec3_dot(&yaxis, &camera->location);
|
||||
camera->view[14] = -vec3_dot(&zaxis, &camera->location);
|
||||
camera->view[15] = 1.0f;
|
||||
}
|
||||
|
||||
inline
|
||||
f32 camera_step_closer(GpuApiType type, f32 value) noexcept {
|
||||
// WARNING: The value depends on the near and far plane.
|
||||
// The reason for this is they will get smaller and smaller with increasing zfar values
|
||||
// until the difference effectively becomes 0 -> vertices occupy the same zindex -> zfighting
|
||||
// For safety reasons we calculate a rather generous offset.
|
||||
// @performance Maybe it makes sense in the future to just pick a small CONST epsilon value
|
||||
switch (type) {
|
||||
case GPU_API_TYPE_NONE:
|
||||
return value + (nextafterf(value, -INFINITY) - value) * 1000;
|
||||
case GPU_API_TYPE_OPENGL:
|
||||
return value + (nextafterf(value, -INFINITY) - value) * 1000;
|
||||
case GPU_API_TYPE_VULKAN:
|
||||
return value + (nextafterf(value, -INFINITY) - value) * 1000;
|
||||
case GPU_API_TYPE_DIRECTX:
|
||||
return value + (nextafterf(value, -INFINITY) - value) * 1000;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
inline
|
||||
f32 camera_step_away(GpuApiType type, f32 value) noexcept {
|
||||
// WARNING: The value depends on the near and far plane.
|
||||
// The reason for this is they will get smaller and smaller with increasing zfar values
|
||||
// until the difference effectively becomes 0 -> vertices occupy the same zindex -> zfighting
|
||||
// For safety reasons we calculate a rather generous offset.
|
||||
// @performance Maybe it makes sense in the future to just pick a small CONST epsilon value
|
||||
switch (type) {
|
||||
case GPU_API_TYPE_NONE:
|
||||
return value + (nextafterf(value, INFINITY) - value) * 1000;
|
||||
case GPU_API_TYPE_OPENGL:
|
||||
return value + (nextafterf(value, INFINITY) - value) * 1000;
|
||||
case GPU_API_TYPE_VULKAN:
|
||||
return value + (nextafterf(value, INFINITY) - value) * 1000;
|
||||
case GPU_API_TYPE_DIRECTX:
|
||||
return value + (nextafterf(value, INFINITY) - value) * 1000;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
#if OPENGL
|
||||
#define camera_projection_matrix(camera) camera_projection_matrix_rh_opengl((camera))
|
||||
#define camera_orth_matrix(camera) camera_orth_matrix_rh_opengl((camera))
|
||||
#define camera_view_matrix(camera) camera_view_matrix_rh_opengl((camera))
|
||||
#define camera_translation_matrix_sparse(camera, translation) camera_translation_matrix_sparse_rh((camera), (translation))
|
||||
#elif VULKAN
|
||||
#define camera_projection_matrix(camera) camera_projection_matrix_rh_vulkan((camera))
|
||||
#define camera_orth_matrix(camera) camera_orth_matrix_rh_vulkan((camera))
|
||||
#define camera_view_matrix(camera) camera_view_matrix_rh_vulkan((camera))
|
||||
#define camera_translation_matrix_sparse(camera, translation) camera_translation_matrix_sparse_rh((camera), (translation))
|
||||
#elif DIRECTX
|
||||
#define camera_projection_matrix(camera) camera_projection_matrix_lh((camera))
|
||||
#define camera_orth_matrix(camera) camera_orth_matrix_lh((camera))
|
||||
#define camera_view_matrix(camera) camera_view_matrix_lh((camera))
|
||||
#define camera_translation_matrix_sparse(camera, translation) camera_translation_matrix_sparse_lh((camera), (translation))
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -127,7 +127,7 @@ Asset* cmd_texture_create(AppCmdBuffer* __restrict cb, Command* __restrict cmd)
|
|||
}
|
||||
|
||||
Texture* texture = (Texture *) asset->self;
|
||||
if (cb->gpu_api_type == GPU_API_TYPE_OPENGL
|
||||
if ((cb->gpu_api_type == GPU_API_TYPE_OPENGL || cb->gpu_api_type == GPU_API_TYPE_VULKAN)
|
||||
&& !(texture->image.image_settings & IMAGE_SETTING_BOTTOM_TO_TOP)
|
||||
) {
|
||||
image_flip_vertical(cb->thrd_mem_vol, &texture->image);
|
||||
|
|
@ -158,7 +158,7 @@ Asset* cmd_font_create(AppCmdBuffer* __restrict cb, Command* __restrict cmd)
|
|||
}
|
||||
|
||||
Font* font = (Font *) asset->self;
|
||||
if (cb->gpu_api_type == GPU_API_TYPE_OPENGL) {
|
||||
if (cb->gpu_api_type == GPU_API_TYPE_OPENGL || cb->gpu_api_type == GPU_API_TYPE_VULKAN) {
|
||||
font_invert_coordinates(font);
|
||||
}
|
||||
|
||||
|
|
@ -361,7 +361,7 @@ inline Asset* cmd_texture_load_sync(AppCmdBuffer* cb, int32 asset_id) {
|
|||
// Check if asset already loaded
|
||||
char id_str[9];
|
||||
int_to_hex(asset_id, id_str);
|
||||
PROFILE_VERBOSE(PROFILE_CMD_ASSET_LOAD_SYNC, id_str);
|
||||
PROFILE(PROFILE_CMD_ASSET_LOAD_SYNC, id_str, false, true);
|
||||
|
||||
Asset* asset = thrd_ams_get_asset_wait(cb->ams, id_str);
|
||||
|
||||
|
|
@ -373,7 +373,7 @@ inline Asset* cmd_texture_load_sync(AppCmdBuffer* cb, int32 asset_id) {
|
|||
|
||||
// Setup basic texture
|
||||
Texture* texture = (Texture *) asset->self;
|
||||
if (cb->gpu_api_type == GPU_API_TYPE_OPENGL
|
||||
if ((cb->gpu_api_type == GPU_API_TYPE_OPENGL || cb->gpu_api_type == GPU_API_TYPE_VULKAN)
|
||||
&& !(texture->image.image_settings & IMAGE_SETTING_BOTTOM_TO_TOP)
|
||||
) {
|
||||
image_flip_vertical(cb->mem_vol, &texture->image);
|
||||
|
|
@ -386,7 +386,7 @@ inline Asset* cmd_texture_load_sync(AppCmdBuffer* cb, int32 asset_id) {
|
|||
|
||||
inline Asset* cmd_texture_load_sync(AppCmdBuffer* cb, const char* name) {
|
||||
LOG_FORMAT_1("Load texture %d", {{LOG_DATA_CHAR_STR, (void *) name}});
|
||||
PROFILE_VERBOSE(PROFILE_CMD_ASSET_LOAD_SYNC, name);
|
||||
PROFILE(PROFILE_CMD_ASSET_LOAD_SYNC, name, false, true);
|
||||
|
||||
// Check if asset already loaded
|
||||
Asset* asset = thrd_ams_get_asset_wait(cb->ams, name);
|
||||
|
|
@ -400,7 +400,7 @@ inline Asset* cmd_texture_load_sync(AppCmdBuffer* cb, const char* name) {
|
|||
|
||||
// Setup basic texture
|
||||
Texture* texture = (Texture *) asset->self;
|
||||
if (cb->gpu_api_type == GPU_API_TYPE_OPENGL
|
||||
if ((cb->gpu_api_type == GPU_API_TYPE_OPENGL || cb->gpu_api_type == GPU_API_TYPE_VULKAN)
|
||||
&& !(texture->image.image_settings & IMAGE_SETTING_BOTTOM_TO_TOP)
|
||||
) {
|
||||
image_flip_vertical(cb->mem_vol, &texture->image);
|
||||
|
|
@ -419,7 +419,7 @@ inline Asset* cmd_font_load_sync(AppCmdBuffer* cb, int32 asset_id)
|
|||
char id_str[9];
|
||||
int_to_hex(asset_id, id_str);
|
||||
|
||||
PROFILE_VERBOSE(PROFILE_CMD_FONT_LOAD_SYNC, id_str);
|
||||
PROFILE(PROFILE_CMD_FONT_LOAD_SYNC, id_str, false, true);
|
||||
|
||||
Asset* asset = thrd_ams_get_asset_wait(cb->ams, id_str);
|
||||
|
||||
|
|
@ -431,7 +431,7 @@ inline Asset* cmd_font_load_sync(AppCmdBuffer* cb, int32 asset_id)
|
|||
|
||||
// Setup font
|
||||
Font* font = (Font *) asset->self;
|
||||
if (cb->gpu_api_type == GPU_API_TYPE_OPENGL) {
|
||||
if (cb->gpu_api_type == GPU_API_TYPE_OPENGL || cb->gpu_api_type == GPU_API_TYPE_VULKAN) {
|
||||
font_invert_coordinates(font);
|
||||
}
|
||||
|
||||
|
|
@ -443,7 +443,7 @@ inline Asset* cmd_font_load_sync(AppCmdBuffer* cb, int32 asset_id)
|
|||
inline Asset* cmd_font_load_sync(AppCmdBuffer* cb, const char* name)
|
||||
{
|
||||
LOG_FORMAT_1("Load font %s", {{LOG_DATA_CHAR_STR, (void *) name}});
|
||||
PROFILE_VERBOSE(PROFILE_CMD_FONT_LOAD_SYNC, name);
|
||||
PROFILE(PROFILE_CMD_FONT_LOAD_SYNC, name, false, true);
|
||||
|
||||
// Check if asset already loaded
|
||||
Asset* asset = thrd_ams_get_asset_wait(cb->ams, name);
|
||||
|
|
@ -457,7 +457,7 @@ inline Asset* cmd_font_load_sync(AppCmdBuffer* cb, const char* name)
|
|||
|
||||
// Setup font
|
||||
Font* font = (Font *) asset->self;
|
||||
if (cb->gpu_api_type == GPU_API_TYPE_OPENGL) {
|
||||
if (cb->gpu_api_type == GPU_API_TYPE_OPENGL || cb->gpu_api_type == GPU_API_TYPE_VULKAN) {
|
||||
font_invert_coordinates(font);
|
||||
}
|
||||
|
||||
|
|
@ -471,7 +471,7 @@ UILayout* cmd_layout_load_sync(
|
|||
AppCmdBuffer* __restrict cb,
|
||||
UILayout* __restrict layout, const char* __restrict layout_path
|
||||
) {
|
||||
PROFILE_VERBOSE(PROFILE_CMD_LAYOUT_LOAD_SYNC, layout_path);
|
||||
PROFILE(PROFILE_CMD_LAYOUT_LOAD_SYNC, layout_path, false, true);
|
||||
LOG_FORMAT_1("Load layout %s", {{LOG_DATA_CHAR_STR, (void *) layout_path}});
|
||||
|
||||
FileBody layout_file = {};
|
||||
|
|
@ -492,7 +492,7 @@ UIThemeStyle* cmd_theme_load_sync(
|
|||
AppCmdBuffer* __restrict cb,
|
||||
UIThemeStyle* __restrict theme, const char* __restrict theme_path
|
||||
) {
|
||||
PROFILE_VERBOSE(PROFILE_CMD_THEME_LOAD_SYNC, theme_path);
|
||||
PROFILE(PROFILE_CMD_THEME_LOAD_SYNC, theme_path, false, true);
|
||||
LOG_FORMAT_1("Load theme %s", {{LOG_DATA_CHAR_STR, (void *) theme_path}});
|
||||
|
||||
FileBody theme_file = {};
|
||||
|
|
@ -518,7 +518,7 @@ UILayout* cmd_ui_load_sync(
|
|||
UIThemeStyle* __restrict theme, const char* __restrict theme_path,
|
||||
const Camera* __restrict camera
|
||||
) {
|
||||
PROFILE_VERBOSE(PROFILE_CMD_UI_LOAD_SYNC, layout_path);
|
||||
PROFILE(PROFILE_CMD_UI_LOAD_SYNC, layout_path, false, true);
|
||||
LOG_FORMAT_1("Load ui with layout %s and theme %s", {{LOG_DATA_CHAR_STR, (void *) layout_path}, {LOG_DATA_CHAR_STR, (void *) theme_path}});
|
||||
|
||||
if (!cmd_layout_load_sync(cb, layout, layout_path)) {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@
|
|||
#define EXPORT_LIB extern "C" __attribute__((visibility("default")))
|
||||
|
||||
#if DEBUG
|
||||
#define UNREACHABLE() ASSERT_SIMPLE(false)
|
||||
#define UNREACHABLE() ASSERT_SIMPLE(false); __builtin_unreachable()
|
||||
#else
|
||||
#define UNREACHABLE() __builtin_unreachable()
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@
|
|||
typedef SSIZE_T ssize_t;
|
||||
|
||||
#if DEBUG
|
||||
#define UNREACHABLE() ASSERT_SIMPLE(false) __assume(0)
|
||||
#define UNREACHABLE() ASSERT_SIMPLE(false); __assume(0)
|
||||
#else
|
||||
#define UNREACHABLE() __assume(0)
|
||||
#endif
|
||||
|
|
|
|||
142
environment/Universe.h
Normal file
142
environment/Universe.h
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
#ifndef TOS_ENVIRONMENT_UNIVERSE_H
|
||||
#define TOS_ENVIRONMENT_UNIVERSE_H
|
||||
|
||||
#include "../stdlib/Types.h"
|
||||
|
||||
enum StarType {
|
||||
STAR_TYPE_STAR,
|
||||
STAR_TYPE_RED_GIANT,
|
||||
STAR_TYPE_WHITE_DWARF,
|
||||
STAR_TYPE_NEUTRON_STAR,
|
||||
STAR_TYPE_SUPERGIANT,
|
||||
STAR_TYPE_HYPERGIANT,
|
||||
STAR_TYPE_PULSAR,
|
||||
STAR_TYPE_VARIABLE_STAR,
|
||||
STAR_TYPE_QUARK_STAR,
|
||||
STAR_TYPE_BOSON_STAR,
|
||||
STAR_TYPE_STRANGE_STAR,
|
||||
};
|
||||
|
||||
struct StarDistribution {
|
||||
int16 count_min; // per galaxy
|
||||
int16 count_max; // per galaxy
|
||||
int64 age_min;
|
||||
int64 age_max;
|
||||
f32 mass_min; // in relation to sun
|
||||
f32 mass_max; // in relation to sun
|
||||
int32 radius_min; // maybe use age instead, since that basically defines the state of the star
|
||||
int32 radius_max; // maybe use age instead, since that basically defines the state of the star
|
||||
f32 rotation_min;
|
||||
f32 rotation_max;
|
||||
byte multiple_max;
|
||||
};
|
||||
|
||||
static const StarDistribution STAR_TYPE_DISTRIBUTION[] = {
|
||||
{ // STAR_TYPE_STAR
|
||||
.count_min = 800,
|
||||
.count_max = 1000,
|
||||
.age_min = 0,
|
||||
.age_max = 0,
|
||||
.mass_min = 0,
|
||||
.mass_max = 0,
|
||||
.radius_min = 0,
|
||||
.radius_max = 0,
|
||||
.rotation_min = 0,
|
||||
.rotation_max = 0,
|
||||
.multiple_max = 5 // higher = less likely (1 = no multiple allowed)
|
||||
}
|
||||
};
|
||||
|
||||
enum BlackHoleType {
|
||||
BLACK_HOLE_TYPE_STELLAR,
|
||||
BLACK_HOLE_TYPE_SUPERMASSIVE,
|
||||
BLACK_HOLE_TYPE_INTERMEDIATE,
|
||||
BLACK_HOLE_TYPE_PRIMORDIAL,
|
||||
BLACK_HOLE_TYPE_MICRO,
|
||||
};
|
||||
|
||||
enum PlanetType {
|
||||
PLANET_TYPE_TERRESTRIAL_PLANET,
|
||||
PLANET_TYPE_GAS_GIANT,
|
||||
PLANET_TYPE_ICE_GIANTS,
|
||||
PLANET_TYPE_DWARF_PLANET,
|
||||
PLANET_TYPE_ROUGE,
|
||||
};
|
||||
|
||||
enum AsteroidType {
|
||||
ASTEROID_TYPE_BELT_ASTEROID,
|
||||
ASTEROID_TYPE_TROJAN,
|
||||
ASTEROID_TYPE_WANDERING,
|
||||
};
|
||||
|
||||
enum GalaxyType {
|
||||
GALAXY_TYPE_SPIRAL,
|
||||
GALAXY_TYPE_ELLIPTICAL,
|
||||
GALAXY_TYPE_IRREGULAR,
|
||||
};
|
||||
|
||||
enum NebulaeType {
|
||||
NEBULA_TYPE_EMISSION_NEBULAE,
|
||||
NEBULA_TYPE_REFLECTION_NEBULAE,
|
||||
NEBULA_TYPE_DARK_NEBULAE,
|
||||
NEBULA_TYPE_PLANETARY_NEBULAE,
|
||||
NEBULA_TYPE_SUPERNOVA_NEBULAE,
|
||||
};
|
||||
|
||||
struct Star {
|
||||
StarType type;
|
||||
bool is_pulsar;
|
||||
bool is_magnetar;
|
||||
};
|
||||
|
||||
struct WormHole {
|
||||
|
||||
};
|
||||
|
||||
struct MegaStructure {
|
||||
|
||||
};
|
||||
|
||||
struct BlackHole {
|
||||
BlackHoleType type;
|
||||
bool is_quasar;
|
||||
f32 rotation;
|
||||
f32 charge;
|
||||
};
|
||||
|
||||
struct CelestialBody {
|
||||
|
||||
};
|
||||
|
||||
struct Galaxy {
|
||||
CelestialBody* celestial_bodies;
|
||||
int32 celestial_body_count;
|
||||
|
||||
|
||||
};
|
||||
|
||||
struct Universe {
|
||||
Galaxy* galaxies;
|
||||
int32 galaxy_count;
|
||||
};
|
||||
|
||||
#define GALAXY_COUNT 100
|
||||
|
||||
#define GALAXY_STAR_COUNT_MIN 800
|
||||
#define GALAXY_STAR_COUNT_MIN 1000
|
||||
|
||||
#define STAR_PLANET_COUNT_MIN 800
|
||||
#define STAR_PLANET_COUNT_MIN 1000
|
||||
|
||||
void galaxy_generate(Galaxy* galaxy) {
|
||||
|
||||
for (int32 i = 0; i )
|
||||
}
|
||||
|
||||
void universe_generate(Universe* universe) {
|
||||
for (int32 i = 0; i < GALAXY_COUNT; ++i) {
|
||||
galaxy_generate(&universe->galaxies[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -29,7 +29,7 @@ void* cmd_shader_load_sync(
|
|||
AppCmdBuffer* __restrict cb, Shader* __restrict shader, const int32* __restrict shader_ids,
|
||||
ID3D12Device* __restrict device, ID3D12PipelineState** __restrict pipeline, ID3D12RootSignature* __restrict pipeline_layout
|
||||
) {
|
||||
PROFILE_VERBOSE(PROFILE_CMD_SHADER_LOAD_SYNC, "");
|
||||
PROFILE(PROFILE_CMD_SHADER_LOAD_SYNC, NULL, false, true);
|
||||
char asset_id[9];
|
||||
|
||||
ID3DBlob* shader_assets[SHADER_TYPE_SIZE];
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ ID3D12PipelineState* pipeline_make(
|
|||
ID3DBlob* fragment_shader,
|
||||
ID3DBlob*
|
||||
) {
|
||||
PROFILE_VERBOSE(PROFILE_PIPELINE_MAKE, "");
|
||||
PROFILE(PROFILE_PIPELINE_MAKE, NULL, false, true);
|
||||
LOG_1("Create pipeline");
|
||||
// @todo We need to find a way to do this somewhere else:
|
||||
D3D12_INPUT_ELEMENT_DESC input_element_info[] = {
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ void* cmd_shader_load(AppCmdBuffer*, Command*) {
|
|||
}
|
||||
|
||||
void* cmd_shader_load_sync(AppCmdBuffer* __restrict cb, Shader* __restrict shader, const int32* __restrict shader_ids) {
|
||||
PROFILE_VERBOSE(PROFILE_CMD_SHADER_LOAD_SYNC, "");
|
||||
PROFILE(PROFILE_CMD_SHADER_LOAD_SYNC, NULL, false, true);
|
||||
char asset_id[9];
|
||||
|
||||
int32 shader_assets[SHADER_TYPE_SIZE];
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
#include "../../utils/StringUtils.h"
|
||||
#include "../../log/Log.h"
|
||||
#include "../../log/Stats.h"
|
||||
#include "../../log/PerformanceProfiler.h"
|
||||
#include "../../system/FileUtils.cpp"
|
||||
#include "../RenderUtils.h"
|
||||
#include "Opengl.h"
|
||||
|
|
@ -169,7 +170,7 @@ void load_texture_to_gpu(const Texture* texture, int32 mipmap_level = 0)
|
|||
glGenerateMipmap(GL_TEXTURE_2D);
|
||||
}
|
||||
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UPLOAD, texture->image.pixel_count * image_pixel_size_from_type(texture->image.image_settings));
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_VERTEX_UPLOAD, texture->image.pixel_count * image_pixel_size_from_type(texture->image.image_settings));
|
||||
}
|
||||
|
||||
inline
|
||||
|
|
@ -341,7 +342,7 @@ uint32 gpuapi_buffer_generate(int32 size, const void* data)
|
|||
glBindBuffer(GL_ARRAY_BUFFER, vbo);
|
||||
glBufferData(GL_ARRAY_BUFFER, size, data, GL_STATIC_DRAW);
|
||||
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UPLOAD, size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_VERTEX_UPLOAD, size);
|
||||
|
||||
return vbo;
|
||||
}
|
||||
|
|
@ -355,7 +356,7 @@ uint32 gpuapi_buffer_generate_dynamic(int32 size, const void* data)
|
|||
glBindBuffer(GL_ARRAY_BUFFER, vbo);
|
||||
glBufferData(GL_ARRAY_BUFFER, size, data, GL_DYNAMIC_DRAW);
|
||||
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UPLOAD, size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_VERTEX_UPLOAD, size);
|
||||
|
||||
return vbo;
|
||||
}
|
||||
|
|
@ -388,7 +389,7 @@ void gpuapi_buffer_update_dynamic(uint32 vbo, int32 size, const void* data)
|
|||
glBindBuffer(GL_ARRAY_BUFFER, vbo);
|
||||
glBufferData(GL_ARRAY_BUFFER, size, data, GL_DYNAMIC_DRAW);
|
||||
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UPLOAD, size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_VERTEX_UPLOAD, size);
|
||||
}
|
||||
|
||||
inline
|
||||
|
|
@ -398,7 +399,7 @@ void gpuapi_buffer_update_sub(uint32 vbo, int32 offset, int32 size, const void*
|
|||
glBufferSubData(GL_ARRAY_BUFFER, offset, size, data);
|
||||
ASSERT_GPU_API();
|
||||
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UPLOAD, size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_VERTEX_UPLOAD, size);
|
||||
}
|
||||
|
||||
inline
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@
|
|||
#include "../../stdlib/Types.h"
|
||||
#include "../../memory/RingMemory.h"
|
||||
#include "../../log/Log.h"
|
||||
#include "../../log/Stats.h"
|
||||
#include "../../log/PerformanceProfiler.h"
|
||||
#include "../../object/Vertex.h"
|
||||
#include "Shader.h"
|
||||
#include "Opengl.h"
|
||||
|
|
@ -39,61 +41,72 @@ int32 shader_type_index(ShaderType type)
|
|||
}
|
||||
|
||||
// Set value based on uniform location
|
||||
inline
|
||||
// @todo change naming to gpuapi_uniform_buffer_update (same as vulkan)
|
||||
// @todo change from upload to uniform upload since it is a special form of upload
|
||||
FORCE_INLINE
|
||||
void shader_set_value(uint32 location, bool value)
|
||||
{
|
||||
glUniform1i(location, (int32) value);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UNIFORM_UPLOAD, sizeof(value));
|
||||
}
|
||||
|
||||
inline
|
||||
FORCE_INLINE
|
||||
void shader_set_value(uint32 location, int32 value)
|
||||
{
|
||||
glUniform1i(location, value);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UNIFORM_UPLOAD, sizeof(value));
|
||||
}
|
||||
|
||||
inline
|
||||
FORCE_INLINE
|
||||
void shader_set_value(uint32 location, f32 value)
|
||||
{
|
||||
glUniform1f(location, value);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UNIFORM_UPLOAD, sizeof(value));
|
||||
}
|
||||
|
||||
inline
|
||||
FORCE_INLINE
|
||||
void shader_set_v2(uint32 location, const f32* value)
|
||||
{
|
||||
glUniform2fv(location, 1, value);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UNIFORM_UPLOAD, sizeof(*value) * 2);
|
||||
}
|
||||
|
||||
inline
|
||||
FORCE_INLINE
|
||||
void shader_set_v3(uint32 location, const f32* value)
|
||||
{
|
||||
glUniform3fv(location, 1, value);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UNIFORM_UPLOAD, sizeof(*value) * 3);
|
||||
}
|
||||
|
||||
inline
|
||||
FORCE_INLINE
|
||||
void shader_set_v4(uint32 location, const f32* value)
|
||||
{
|
||||
glUniform4fv(location, 1, value);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UNIFORM_UPLOAD, sizeof(*value) * 4);
|
||||
}
|
||||
|
||||
inline
|
||||
FORCE_INLINE
|
||||
void shader_set_m2(uint32 location, const f32* value)
|
||||
{
|
||||
glUniformMatrix2fv(location, 1, GL_FALSE, value);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UNIFORM_UPLOAD, sizeof(*value) * 4);
|
||||
}
|
||||
|
||||
inline
|
||||
FORCE_INLINE
|
||||
void shader_set_m3(uint32 location, const f32* value)
|
||||
{
|
||||
glUniformMatrix3fv(location, 1, GL_FALSE, value);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UNIFORM_UPLOAD, sizeof(*value) * 9);
|
||||
}
|
||||
|
||||
inline
|
||||
FORCE_INLINE
|
||||
void shader_set_m4(uint32 location, const f32* value)
|
||||
{
|
||||
glUniformMatrix4fv(location, 1, GL_FALSE, value);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UNIFORM_UPLOAD, sizeof(*value) * 16);
|
||||
}
|
||||
|
||||
inline
|
||||
FORCE_INLINE
|
||||
uint32 shader_get_attrib_location(uint32 id, const char* name)
|
||||
{
|
||||
// By using this you can retreive the shader variable name at a point where and when you know it
|
||||
|
|
@ -244,12 +257,14 @@ int32 program_get_size(uint32 program)
|
|||
return size;
|
||||
}
|
||||
|
||||
// @todo Instead of passing the shaders one by one, pass one array called ShaderStage* shader_stages
|
||||
// This way we can handle this more dynamic
|
||||
GLuint pipeline_make(
|
||||
GLuint vertex_shader,
|
||||
GLuint fragment_shader,
|
||||
GLint geometry_shader
|
||||
) {
|
||||
PROFILE_VERBOSE(PROFILE_PIPELINE_MAKE, "");
|
||||
PROFILE(PROFILE_PIPELINE_MAKE, NULL, false, true);
|
||||
LOG_1("Create pipeline");
|
||||
GLuint program = glCreateProgram();
|
||||
|
||||
|
|
@ -302,7 +317,7 @@ GLuint pipeline_make(
|
|||
}
|
||||
|
||||
// @question Depending on how the different gpu apis work we may want to pass Shader* to have a uniform structure
|
||||
inline
|
||||
FORCE_INLINE
|
||||
void pipeline_use(uint32 id)
|
||||
{
|
||||
glUseProgram(id);
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ void* cmd_shader_load_sync(
|
|||
VkDevice device, VkRenderPass render_pass, VkPipelineLayout* __restrict pipeline_layout, VkPipeline* __restrict pipeline,
|
||||
VkDescriptorSetLayout* __restrict descriptor_set_layouts
|
||||
) {
|
||||
PROFILE_VERBOSE(PROFILE_CMD_SHADER_LOAD_SYNC, "");
|
||||
PROFILE(PROFILE_CMD_SHADER_LOAD_SYNC, NULL, false, true);
|
||||
char asset_id[9];
|
||||
|
||||
VkShaderModule shader_assets[SHADER_TYPE_SIZE];
|
||||
|
|
|
|||
|
|
@ -16,6 +16,9 @@
|
|||
#include "../GpuAttributeType.h"
|
||||
#include "../../object/Vertex.h"
|
||||
#include "../../log/Log.h"
|
||||
#include "../../log/Stats.h"
|
||||
#include "../../log/PerformanceProfiler.h"
|
||||
#include "../../log/PerformanceProfiler.h"
|
||||
|
||||
inline
|
||||
uint32_t shader_get_uniform_location(
|
||||
|
|
@ -90,28 +93,28 @@ void gpuapi_attribute_info_create(GpuAttributeType type, VkVertexInputAttributeD
|
|||
attr[0] = {
|
||||
.location = 0,
|
||||
.binding = 0,
|
||||
.format = VK_FORMAT_R32G32_SFLOAT,
|
||||
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
||||
.offset = offsetof(Vertex3D, position)
|
||||
};
|
||||
|
||||
attr[1] = {
|
||||
.location = 1,
|
||||
.binding = 0,
|
||||
.format = VK_FORMAT_R32G32_SFLOAT,
|
||||
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
||||
.offset = offsetof(Vertex3D, normal)
|
||||
};
|
||||
|
||||
attr[2] = {
|
||||
.location = 2,
|
||||
.binding = 0,
|
||||
.format = VK_FORMAT_R32G32_SFLOAT,
|
||||
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
||||
.offset = offsetof(Vertex3D, tex_coord)
|
||||
};
|
||||
|
||||
attr[3] = {
|
||||
.location = 3,
|
||||
.binding = 0,
|
||||
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
||||
.format = VK_FORMAT_R32_UINT,
|
||||
.offset = offsetof(Vertex3D, color)
|
||||
};
|
||||
} return;
|
||||
|
|
@ -119,14 +122,14 @@ void gpuapi_attribute_info_create(GpuAttributeType type, VkVertexInputAttributeD
|
|||
attr[0] = {
|
||||
.location = 0,
|
||||
.binding = 0,
|
||||
.format = VK_FORMAT_R32G32_SFLOAT,
|
||||
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
||||
.offset = offsetof(Vertex3DNormal, position)
|
||||
};
|
||||
|
||||
attr[1] = {
|
||||
.location = 1,
|
||||
.binding = 0,
|
||||
.format = VK_FORMAT_R32G32_SFLOAT,
|
||||
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
||||
.offset = offsetof(Vertex3DNormal, normal)
|
||||
};
|
||||
} return;
|
||||
|
|
@ -134,14 +137,14 @@ void gpuapi_attribute_info_create(GpuAttributeType type, VkVertexInputAttributeD
|
|||
attr[0] = {
|
||||
.location = 0,
|
||||
.binding = 0,
|
||||
.format = VK_FORMAT_R32G32_SFLOAT,
|
||||
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
||||
.offset = offsetof(Vertex3DColor, position)
|
||||
};
|
||||
|
||||
attr[1] = {
|
||||
.location = 1,
|
||||
.binding = 0,
|
||||
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
||||
.format = VK_FORMAT_R32_UINT,
|
||||
.offset = offsetof(Vertex3DColor, color)
|
||||
};
|
||||
} return;
|
||||
|
|
@ -149,7 +152,7 @@ void gpuapi_attribute_info_create(GpuAttributeType type, VkVertexInputAttributeD
|
|||
attr[0] = {
|
||||
.location = 0,
|
||||
.binding = 0,
|
||||
.format = VK_FORMAT_R32G32_SFLOAT,
|
||||
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
||||
.offset = offsetof(Vertex3DTextureColor, position)
|
||||
};
|
||||
|
||||
|
|
@ -164,14 +167,14 @@ void gpuapi_attribute_info_create(GpuAttributeType type, VkVertexInputAttributeD
|
|||
attr[0] = {
|
||||
.location = 0,
|
||||
.binding = 0,
|
||||
.format = VK_FORMAT_R32G32_SFLOAT,
|
||||
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
||||
.offset = offsetof(Vertex3DSamplerTextureColor, position)
|
||||
};
|
||||
|
||||
attr[1] = {
|
||||
.location = 1,
|
||||
.binding = 0,
|
||||
.format = VK_FORMAT_R32_UINT,
|
||||
.format = VK_FORMAT_R32_SINT,
|
||||
.offset = offsetof(Vertex3DSamplerTextureColor, sampler)
|
||||
};
|
||||
|
||||
|
|
@ -193,13 +196,15 @@ void pipeline_use(VkCommandBuffer command_buffer, VkPipeline pipeline)
|
|||
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
|
||||
}
|
||||
|
||||
// @todo Instead of passing the shaders one by one, pass one array called ShaderStage* shader_stages
|
||||
// This way we can handle this more dynamic
|
||||
VkPipeline pipeline_make(
|
||||
VkDevice device, VkRenderPass render_pass, VkPipelineLayout* __restrict pipeline_layout, VkPipeline* __restrict pipeline,
|
||||
VkDescriptorSetLayout* descriptor_set_layouts,
|
||||
VkShaderModule vertex_shader, VkShaderModule fragment_shader,
|
||||
VkShaderModule
|
||||
) {
|
||||
PROFILE_VERBOSE(PROFILE_PIPELINE_MAKE, "");
|
||||
PROFILE(PROFILE_PIPELINE_MAKE, NULL, false, true);
|
||||
LOG_1("Create pipeline");
|
||||
VkPipelineShaderStageCreateInfo vertex_shader_stage_info = {};
|
||||
vertex_shader_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
|
|
@ -213,13 +218,16 @@ VkPipeline pipeline_make(
|
|||
fragment_shader_stage_info.module = fragment_shader;
|
||||
fragment_shader_stage_info.pName = "main";
|
||||
|
||||
VkPipelineShaderStageCreateInfo shader_stages[] = {vertex_shader_stage_info, fragment_shader_stage_info};
|
||||
VkPipelineShaderStageCreateInfo shader_stages[] = {
|
||||
vertex_shader_stage_info,
|
||||
fragment_shader_stage_info
|
||||
};
|
||||
|
||||
VkVertexInputBindingDescription binding_description;
|
||||
vulkan_vertex_binding_description(sizeof(Vertex3DTextureColor), &binding_description);
|
||||
vulkan_vertex_binding_description(sizeof(Vertex3DSamplerTextureColor), &binding_description);
|
||||
|
||||
VkVertexInputAttributeDescription input_attribute_description[2];
|
||||
gpuapi_attribute_info_create(GPU_ATTRIBUTE_TYPE_VERTEX_3D_TEXTURE_COLOR, input_attribute_description);
|
||||
VkVertexInputAttributeDescription input_attribute_description[gpuapi_attribute_count(GPU_ATTRIBUTE_TYPE_VERTEX_3D_SAMPLER_TEXTURE_COLOR)];
|
||||
gpuapi_attribute_info_create(GPU_ATTRIBUTE_TYPE_VERTEX_3D_SAMPLER_TEXTURE_COLOR, input_attribute_description);
|
||||
|
||||
VkPipelineVertexInputStateCreateInfo vertex_input_info = {};
|
||||
vertex_input_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
|
||||
|
|
@ -253,9 +261,16 @@ VkPipeline pipeline_make(
|
|||
multisampling.sampleShadingEnable = VK_FALSE;
|
||||
multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
|
||||
|
||||
// @todo This depends on the texture -> shouldn't be here
|
||||
VkPipelineColorBlendAttachmentState color_blend_attachment = {};
|
||||
color_blend_attachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
|
||||
color_blend_attachment.blendEnable = VK_FALSE;
|
||||
color_blend_attachment.blendEnable = VK_TRUE;
|
||||
color_blend_attachment.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
|
||||
color_blend_attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
|
||||
color_blend_attachment.colorBlendOp = VK_BLEND_OP_ADD;
|
||||
color_blend_attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
|
||||
color_blend_attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
|
||||
color_blend_attachment.alphaBlendOp = VK_BLEND_OP_ADD;
|
||||
|
||||
VkPipelineColorBlendStateCreateInfo color_blending = {};
|
||||
color_blending.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
|
||||
|
|
@ -389,7 +404,7 @@ void vulkan_descriptor_sets_create(
|
|||
uint32 frames_in_flight, RingMemory* ring
|
||||
)
|
||||
{
|
||||
VkDescriptorSetLayout* layouts = (VkDescriptorSetLayout *) ring_get_memory(ring, sizeof(VkDescriptorSetLayout), 64);
|
||||
VkDescriptorSetLayout* layouts = (VkDescriptorSetLayout *) ring_get_memory(ring, sizeof(VkDescriptorSetLayout) * frames_in_flight, 64);
|
||||
for (uint32 i = 0; i < frames_in_flight; ++i) {
|
||||
layouts[i] = descriptor_set_layout;
|
||||
}
|
||||
|
|
@ -415,12 +430,20 @@ void vulkan_descriptor_sets_create(
|
|||
buffer_info.offset = 0;
|
||||
buffer_info.range = uniform_buffer_object_size;
|
||||
|
||||
VkDescriptorImageInfo image_info = {};
|
||||
image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
||||
image_info.imageView = texture_image_view;
|
||||
image_info.sampler = texture_sampler;
|
||||
VkDescriptorImageInfo image_info[] = {
|
||||
{
|
||||
.sampler = texture_sampler,
|
||||
.imageView = texture_image_view,
|
||||
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
||||
},
|
||||
{ // @bug this needs to be the ui sampler
|
||||
.sampler = texture_sampler,
|
||||
.imageView = texture_image_view,
|
||||
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
||||
}
|
||||
};
|
||||
|
||||
VkWriteDescriptorSet descriptor_writes[2] = {
|
||||
VkWriteDescriptorSet descriptor_writes[] = {
|
||||
{
|
||||
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
.dstSet = descriptor_sets[i],
|
||||
|
|
@ -437,7 +460,16 @@ void vulkan_descriptor_sets_create(
|
|||
.dstArrayElement = 0,
|
||||
.descriptorCount = 1,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
|
||||
.pImageInfo = &image_info,
|
||||
.pImageInfo = &image_info[0],
|
||||
},
|
||||
{ // @bug this needs to be the ui sampler
|
||||
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
||||
.dstSet = descriptor_sets[i],
|
||||
.dstBinding = 2,
|
||||
.dstArrayElement = 0,
|
||||
.descriptorCount = 1,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
|
||||
.pImageInfo = &image_info[1],
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@
|
|||
#include "../../object/Texture.h"
|
||||
#include "../../image/Image.cpp"
|
||||
#include "../../log/Log.h"
|
||||
#include "../../log/Stats.h"
|
||||
#include "../../log/PerformanceProfiler.h"
|
||||
#include "../../memory/RingMemory.h"
|
||||
#include "ShaderUtils.h"
|
||||
#include "FramesInFlightContainer.h"
|
||||
|
|
@ -959,9 +961,10 @@ void load_texture_to_gpu(
|
|||
ASSERT_GPU_API(vkCreateSampler(device, &sampler_info, NULL, &texture_sampler));
|
||||
}
|
||||
|
||||
// @todo Rename to same name as opengl (or rename opengl obviously)
|
||||
void vulkan_vertex_buffer_update(
|
||||
VkDevice device, VkPhysicalDevice physical_device, VkCommandPool command_pool, VkQueue queue,
|
||||
VkBuffer vertexBuffer,
|
||||
VkBuffer* vertexBuffer,
|
||||
const void* __restrict vertices, int32 vertex_size, int32 vertex_count
|
||||
)
|
||||
{
|
||||
|
|
@ -988,18 +991,20 @@ void vulkan_vertex_buffer_update(
|
|||
|
||||
VkBufferCopy copyRegion = {};
|
||||
copyRegion.size = bufferSize;
|
||||
vkCmdCopyBuffer(commandBuffer, stagingBuffer, vertexBuffer, 1, ©Region);
|
||||
vkCmdCopyBuffer(commandBuffer, stagingBuffer, *vertexBuffer, 1, ©Region);
|
||||
vulkan_single_commands_end(queue, commandBuffer);
|
||||
|
||||
vulkan_single_commands_free(device, command_pool, commandBuffer);
|
||||
|
||||
vkDestroyBuffer(device, stagingBuffer, NULL);
|
||||
vkFreeMemory(device, stagingBufferMemory, NULL);
|
||||
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_VERTEX_UPLOAD, vertex_size * vertex_count);
|
||||
}
|
||||
|
||||
void vulkan_vertex_buffer_create(
|
||||
VkDevice device, VkPhysicalDevice physical_device, VkCommandPool command_pool, VkQueue queue,
|
||||
VkBuffer vertexBuffer, VkDeviceMemory vertexBufferMemory,
|
||||
VkBuffer* vertexBuffer, VkDeviceMemory vertexBufferMemory,
|
||||
const void* __restrict vertices, int32 vertex_size, int32 vertex_count
|
||||
)
|
||||
{
|
||||
|
|
@ -1026,7 +1031,7 @@ void vulkan_vertex_buffer_create(
|
|||
bufferSize,
|
||||
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
|
||||
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
||||
vertexBuffer, vertexBufferMemory
|
||||
*vertexBuffer, vertexBufferMemory
|
||||
);
|
||||
|
||||
// Copy buffer
|
||||
|
|
@ -1037,7 +1042,7 @@ void vulkan_vertex_buffer_create(
|
|||
|
||||
VkBufferCopy copyRegion = {};
|
||||
copyRegion.size = bufferSize;
|
||||
vkCmdCopyBuffer(commandBuffer, stagingBuffer, vertexBuffer, 1, ©Region);
|
||||
vkCmdCopyBuffer(commandBuffer, stagingBuffer, *vertexBuffer, 1, ©Region);
|
||||
vulkan_single_commands_end(queue, commandBuffer);
|
||||
|
||||
// @todo if we change behaviour according to the comment above we don't need this
|
||||
|
|
@ -1094,6 +1099,8 @@ void vulkan_index_buffer_create(
|
|||
vkFreeMemory(device, stagingBufferMemory, NULL);
|
||||
}
|
||||
|
||||
|
||||
// @todo We also need a free function (unmap buffer)
|
||||
void vulkan_uniform_buffers_create(
|
||||
VkDevice device, VkPhysicalDevice physical_device,
|
||||
VkBuffer* __restrict uniform_buffers, VkDeviceMemory* __restrict uniform_buffers_memory, void** __restrict uniform_buffers_mapped,
|
||||
|
|
@ -1116,4 +1123,13 @@ void vulkan_uniform_buffers_create(
|
|||
}
|
||||
}
|
||||
|
||||
// @question Do we want one generalized function like this or multiple type specific like in opengl?
|
||||
void gpuapi_uniform_buffer_update(
|
||||
f32* data, uint32 data_size,
|
||||
uint32 current_image, void** __restrict uniform_buffers_mapped
|
||||
) {
|
||||
memcpy(uniform_buffers_mapped[current_image], data, data_size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_GPU_UNIFORM_UPLOAD, data_size);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -200,14 +200,14 @@ void debug_memory_reset() noexcept
|
|||
}
|
||||
}
|
||||
|
||||
#if DEBUG || INTERNAL
|
||||
#if DEBUG
|
||||
#define DEBUG_MEMORY_INIT(start, size) debug_memory_init((start), (size))
|
||||
#define DEBUG_MEMORY_READ(start, size) debug_memory_log((start), (size), MEMORY_DEBUG_TYPE_READ, __func__)
|
||||
#define DEBUG_MEMORY_WRITE(start, size) debug_memory_log((start), (size), MEMORY_DEBUG_TYPE_WRITE, __func__)
|
||||
#define DEBUG_MEMORY_DELETE(start, size) debug_memory_log((start), (size), MEMORY_DEBUG_TYPE_DELETE, __func__)
|
||||
#define DEBUG_MEMORY_RESERVE(start, size) debug_memory_reserve((start), (size), MEMORY_DEBUG_TYPE_RESERVE, __func__)
|
||||
#define DEBUG_MEMORY_SUBREGION(start, size) debug_memory_reserve((start), (size), MEMORY_DEBUG_TYPE_SUBREGION, __func__)
|
||||
#define DEBUG_MEMORY_FREE(start, size) debug_memory_free((start))
|
||||
#define DEBUG_MEMORY_FREE(start) debug_memory_free((start))
|
||||
#define DEBUG_MEMORY_RESET() debug_memory_reset()
|
||||
#else
|
||||
#define DEBUG_MEMORY_INIT(start, size) ((void) 0)
|
||||
|
|
@ -216,7 +216,7 @@ void debug_memory_reset() noexcept
|
|||
#define DEBUG_MEMORY_DELETE(start, size) ((void) 0)
|
||||
#define DEBUG_MEMORY_RESERVE(start, size) ((void) 0)
|
||||
#define DEBUG_MEMORY_SUBREGION(start, size) ((void) 0)
|
||||
#define DEBUG_MEMORY_FREE(start, size) ((void) 0)
|
||||
#define DEBUG_MEMORY_FREE(start) ((void) 0)
|
||||
#define DEBUG_MEMORY_RESET() ((void) 0)
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -198,55 +198,19 @@ void performance_profiler_end(int32 id) noexcept
|
|||
perf->self_cycle = perf->total_cycle;
|
||||
}
|
||||
|
||||
// @question Do we want different levels of PROFILE_VERBOSE and PROFILE_STATELESS same as in Log.h
|
||||
// This would allow us to go ham in a lot of functions (e.g. file reading)
|
||||
|
||||
#if LOG_LEVEL == 4
|
||||
#if LOG_LEVEL > 1
|
||||
// Only these function can properly handle self-time calculation
|
||||
// Use these whenever you want to profile an entire function
|
||||
#define PROFILE(id) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), __func__)
|
||||
#define PROFILE_VERBOSE(id, info) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), __func__, (info), false, true)
|
||||
#define PROFILE_STATELESS(id, info) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), __func__, (info), true, true)
|
||||
#define PROFILE(id, ...) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), __func__, ##__VA_ARGS__)
|
||||
|
||||
#define PROFILE_START(id, name) if(_perf_active && *_perf_active) performance_profiler_start((id), (name))
|
||||
#define PROFILE_END(id) if(_perf_active && *_perf_active) performance_profiler_end((id))
|
||||
#define PROFILE_SCOPE(id, name) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), (name))
|
||||
#define PROFILE_RESET(id) if(_perf_active && *_perf_active) performance_profiler_reset((id))
|
||||
#elif LOG_LEVEL == 3
|
||||
// Only these function can properly handle self-time calculation
|
||||
// Use these whenever you want to profile an entire function
|
||||
#define PROFILE(id) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), __func__)
|
||||
#define PROFILE_VERBOSE(id, info) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), __func__, (info), false, true)
|
||||
#define PROFILE_STATELESS(id, info) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), __func__, (info), true, true)
|
||||
|
||||
#define PROFILE_START(id, name) if(_perf_active && *_perf_active) performance_profiler_start((id), (name))
|
||||
#define PROFILE_END(id) if(_perf_active && *_perf_active) performance_profiler_end((id))
|
||||
#define PROFILE_SCOPE(id, name) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), (name))
|
||||
#define PROFILE_RESET(id) if(_perf_active && *_perf_active) performance_profiler_reset((id))
|
||||
#elif LOG_LEVEL == 2
|
||||
// Only these function can properly handle self-time calculation
|
||||
// Use these whenever you want to profile an entire function
|
||||
#define PROFILE(id) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), __func__)
|
||||
#define PROFILE_VERBOSE(id, info) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), __func__, (info), false, true)
|
||||
#define PROFILE_STATELESS(id, info) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), __func__, (info), true, true)
|
||||
|
||||
#define PROFILE_START(id, name) if(_perf_active && *_perf_active) performance_profiler_start((id), (name))
|
||||
#define PROFILE_END(id) if(_perf_active && *_perf_active) performance_profiler_end((id))
|
||||
#define PROFILE_SCOPE(id, name) PerformanceProfiler __profile_scope_##__func__##_##__LINE__((id), (name))
|
||||
#define PROFILE_RESET(id) if(_perf_active && *_perf_active) performance_profiler_reset((id))
|
||||
#elif LOG_LEVEL == 1
|
||||
#else
|
||||
#define PROFILE(id) ((void) 0)
|
||||
#define PROFILE_VERBOSE(name, info) ((void) 0)
|
||||
#define PROFILE_STATELESS(id, info) ((void) 0)
|
||||
|
||||
#define PROFILE_START(id, name) ((void) 0)
|
||||
#define PROFILE_END(id) ((void) 0)
|
||||
#define PROFILE_SCOPE(id, name) ((void) 0)
|
||||
#define PROFILE_RESET(id) ((void) 0)
|
||||
#elif LOG_LEVEL == 0
|
||||
#define PROFILE(id) ((void) 0)
|
||||
#define PROFILE_VERBOSE(name, info) ((void) 0)
|
||||
#define PROFILE_STATELESS() ((void) 0)
|
||||
|
||||
#define PROFILE_START(id, name) ((void) 0)
|
||||
#define PROFILE_END(id) ((void) 0)
|
||||
|
|
|
|||
|
|
@ -16,7 +16,13 @@
|
|||
DEBUG_COUNTER_DRIVE_READ,
|
||||
DEBUG_COUNTER_DRIVE_WRITE,
|
||||
|
||||
DEBUG_COUNTER_GPU_UPLOAD,
|
||||
DEBUG_COUNTER_GPU_VERTEX_UPLOAD,
|
||||
DEBUG_COUNTER_GPU_UNIFORM_UPLOAD,
|
||||
DEBUG_COUNTER_GPU_DRAW_CALLS,
|
||||
DEBUG_COUNTER_GPU_DOWNLOAD,
|
||||
|
||||
DEBUG_COUNTER_NETWORK_OUT_RAW,
|
||||
DEBUG_COUNTER_NETWORK_IN_RAW,
|
||||
|
||||
DEBUG_COUNTER_SIZE
|
||||
};
|
||||
|
|
|
|||
|
|
@ -707,7 +707,7 @@ void mat4_ortho_sparse_lh(
|
|||
matrix[15] = 1.0f;
|
||||
}
|
||||
|
||||
void mat4_ortho_sparse_rh(
|
||||
void mat4_ortho_sparse_rh_opengl(
|
||||
f32 *matrix,
|
||||
f32 left, f32 right, f32 bottom, f32 top,
|
||||
f32 znear, f32 zfar
|
||||
|
|
@ -728,7 +728,7 @@ void mat4_ortho_sparse_rh(
|
|||
|
||||
//matrix[8] = 0.0f;
|
||||
//matrix[9] = 0.0f;
|
||||
matrix[10] = -2.0f / fn_delta;
|
||||
matrix[10] = 2.0f / fn_delta;
|
||||
//matrix[11] = 0.0f;
|
||||
|
||||
matrix[12] = -(right + left) / rl_delta;
|
||||
|
|
@ -737,6 +737,36 @@ void mat4_ortho_sparse_rh(
|
|||
matrix[15] = 1.0f;
|
||||
}
|
||||
|
||||
void mat4_ortho_sparse_rh_vulkan(
|
||||
f32 *matrix,
|
||||
f32 left, f32 right, f32 bottom, f32 top,
|
||||
f32 znear, f32 zfar
|
||||
) noexcept {
|
||||
f32 rl_delta = right - left;
|
||||
f32 tb_delta = top - bottom;
|
||||
f32 fn_delta = zfar - znear;
|
||||
|
||||
matrix[0] = 2.0f / rl_delta;
|
||||
//matrix[1] = 0.0f;
|
||||
//matrix[2] = 0.0f;
|
||||
//matrix[3] = 0.0f;
|
||||
|
||||
//matrix[4] = 0.0f;
|
||||
matrix[5] = -2.0f / tb_delta;
|
||||
//matrix[6] = 0.0f;
|
||||
//matrix[7] = 0.0f;
|
||||
|
||||
//matrix[8] = 0.0f;
|
||||
//matrix[9] = 0.0f;
|
||||
matrix[10] = 1.0f / fn_delta;
|
||||
//matrix[11] = 0.0f;
|
||||
|
||||
matrix[12] = -(right + left) / rl_delta;
|
||||
matrix[13] = (top + bottom) / tb_delta;
|
||||
matrix[14] = -znear / fn_delta;
|
||||
matrix[15] = 1.0f;
|
||||
}
|
||||
|
||||
void mat4_translate(f32* matrix, f32 dx, f32 dy, f32 dz) noexcept
|
||||
{
|
||||
f32 temp[16];
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ inline
|
|||
void buffer_alloc(BufferMemory* buf, uint64 size, int32 alignment = 64)
|
||||
{
|
||||
ASSERT_SIMPLE(size);
|
||||
PROFILE_VERBOSE(PROFILE_BUFFER_ALLOC, "");
|
||||
PROFILE(PROFILE_BUFFER_ALLOC, NULL, false, true);
|
||||
LOG_FORMAT_1("Allocating BufferMemory: %n B", {{LOG_DATA_UINT64, &size}});
|
||||
|
||||
buf->memory = alignment < 2
|
||||
|
|
@ -49,14 +49,12 @@ void buffer_alloc(BufferMemory* buf, uint64 size, int32 alignment = 64)
|
|||
|
||||
memset(buf->memory, 0, buf->size);
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) buf->memory, buf->size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, buf->size);
|
||||
}
|
||||
|
||||
inline
|
||||
void buffer_free(BufferMemory* buf)
|
||||
{
|
||||
DEBUG_MEMORY_DELETE((uintptr_t) buf->memory, buf->size);
|
||||
if (buf->alignment < 2) {
|
||||
platform_free((void **) &buf->memory);
|
||||
} else {
|
||||
|
|
@ -78,7 +76,6 @@ void buffer_init(BufferMemory* buf, byte* data, uint64 size, int32 alignment = 6
|
|||
buf->alignment = alignment;
|
||||
buf->element_alignment = 0;
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) buf->memory, buf->size);
|
||||
DEBUG_MEMORY_SUBREGION((uintptr_t) buf->memory, buf->size);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ void chunk_alloc(ChunkMemory* buf, uint32 count, uint32 chunk_size, int32 alignm
|
|||
{
|
||||
ASSERT_SIMPLE(chunk_size);
|
||||
ASSERT_SIMPLE(count);
|
||||
PROFILE_VERBOSE(PROFILE_CHUNK_ALLOC, "");
|
||||
PROFILE(PROFILE_CHUNK_ALLOC, NULL, false, true);
|
||||
LOG_1("Allocating ChunkMemory");
|
||||
|
||||
chunk_size = ROUND_TO_NEAREST(chunk_size, alignment);
|
||||
|
|
@ -63,8 +63,6 @@ void chunk_alloc(ChunkMemory* buf, uint32 count, uint32 chunk_size, int32 alignm
|
|||
|
||||
memset(buf->memory, 0, buf->size);
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) buf->memory, buf->size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, buf->size);
|
||||
LOG_FORMAT_1("Allocated ChunkMemory: %n B", {{LOG_DATA_UINT64, &buf->size}});
|
||||
}
|
||||
|
||||
|
|
@ -89,7 +87,6 @@ void chunk_init(ChunkMemory* buf, BufferMemory* data, uint32 count, uint32 chunk
|
|||
// On another hand we could by accident overwrite the values in free if we are not careful
|
||||
buf->free = (uint64 *) (buf->memory + count * chunk_size);
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) buf->memory, buf->size);
|
||||
DEBUG_MEMORY_SUBREGION((uintptr_t) buf->memory, buf->size);
|
||||
}
|
||||
|
||||
|
|
@ -115,7 +112,6 @@ void chunk_init(ChunkMemory* buf, byte* data, uint32 count, uint32 chunk_size, i
|
|||
// On another hand we could by accident overwrite the values in free if we are not careful
|
||||
buf->free = (uint64 *) (buf->memory + count * chunk_size);
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) buf->memory, buf->size);
|
||||
DEBUG_MEMORY_SUBREGION((uintptr_t) buf->memory, buf->size);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -38,8 +38,6 @@ void heap_alloc(Heap* heap, uint32 element_size, uint64 capacity, int32 (*compar
|
|||
heap->size = 0;
|
||||
heap->compare = compare;
|
||||
heap->helper_mem = heap->elements + element_size;
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) heap->elements, element_size * capacity);
|
||||
}
|
||||
|
||||
void heap_free(Heap* heap)
|
||||
|
|
@ -60,8 +58,6 @@ void heap_init(Heap* heap, BufferMemory* buf, uint32 element_size, uint64 capaci
|
|||
heap->capacity = capacity;
|
||||
heap->size = 0;
|
||||
heap->compare = compare;
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) heap->elements, element_size * capacity);
|
||||
}
|
||||
|
||||
void heapify_down(Heap* heap, uint64 index) {
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ inline
|
|||
void ring_alloc(RingMemory* ring, uint64 size, uint32 alignment = 64)
|
||||
{
|
||||
ASSERT_SIMPLE(size);
|
||||
PROFILE_VERBOSE(PROFILE_RING_ALLOC, "");
|
||||
PROFILE(PROFILE_RING_ALLOC, NULL, false, true);
|
||||
LOG_FORMAT_1("Allocating RingMemory: %n B", {{LOG_DATA_UINT64, &size}});
|
||||
|
||||
ring->memory = alignment < 2
|
||||
|
|
@ -62,8 +62,6 @@ void ring_alloc(RingMemory* ring, uint64 size, uint32 alignment = 64)
|
|||
|
||||
memset(ring->memory, 0, ring->size);
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) ring->memory, ring->size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, ring->size);
|
||||
LOG_FORMAT_1("Allocated RingMemory: %n B", {{LOG_DATA_UINT64, &ring->size}});
|
||||
}
|
||||
|
||||
|
|
@ -80,7 +78,6 @@ void ring_init(RingMemory* ring, BufferMemory* buf, uint64 size, uint32 alignmen
|
|||
ring->size = size;
|
||||
ring->alignment = alignment;
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) ring->memory, ring->size);
|
||||
DEBUG_MEMORY_SUBREGION((uintptr_t) ring->memory, ring->size);
|
||||
}
|
||||
|
||||
|
|
@ -99,15 +96,12 @@ void ring_init(RingMemory* ring, byte* buf, uint64 size, uint32 alignment = 64)
|
|||
|
||||
memset(ring->memory, 0, ring->size);
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) ring->memory, ring->size);
|
||||
DEBUG_MEMORY_SUBREGION((uintptr_t) ring->memory, ring->size);
|
||||
}
|
||||
|
||||
inline
|
||||
void ring_free(RingMemory* ring)
|
||||
{
|
||||
DEBUG_MEMORY_DELETE((uintptr_t) ring->memory, ring->size);
|
||||
|
||||
if (ring->alignment < 2) {
|
||||
platform_free((void **) &ring->memory);
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -2,14 +2,14 @@
|
|||
#define TOS_MODELS_SETTING_TYPES_H
|
||||
|
||||
enum SettingGpuDetail {
|
||||
SETTING_TYPE_GPU_CUSTOM,
|
||||
SETTING_TYPE_GPU_VLOW,
|
||||
SETTING_TYPE_GPU_LOW,
|
||||
SETTING_TYPE_GPU_MEDIUM,
|
||||
SETTING_TYPE_GPU_HIGH,
|
||||
SETTING_TYPE_GPU_VHIGH,
|
||||
SETTING_TYPE_GPU_ULTRA,
|
||||
SETTING_TYPE_GPU_NEXTGEN
|
||||
SETTING_TYPE_GPU_DETAIL_CUSTOM,
|
||||
SETTING_TYPE_GPU_DETAIL_VLOW,
|
||||
SETTING_TYPE_GPU_DETAIL_LOW,
|
||||
SETTING_TYPE_GPU_DETAIL_MEDIUM,
|
||||
SETTING_TYPE_GPU_DETAIL_HIGH,
|
||||
SETTING_TYPE_GPU_DETAIL_VHIGH,
|
||||
SETTING_TYPE_GPU_DETAIL_ULTRA,
|
||||
SETTING_TYPE_GPU_DETAIL_NEXTGEN
|
||||
};
|
||||
|
||||
enum SettingPlayerPerspective {
|
||||
|
|
|
|||
|
|
@ -33,6 +33,9 @@ void* platform_alloc(size_t size)
|
|||
|
||||
*((size_t *) ptr) = size;
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) ptr, size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
|
||||
|
||||
return (void *) ((uintptr_t) ptr + sizeof(size_t));
|
||||
}
|
||||
|
||||
|
|
@ -60,12 +63,17 @@ void* platform_alloc_aligned(size_t size, int32 alignment)
|
|||
*((void **) ((uintptr_t) aligned_ptr - sizeof(void *) - sizeof(size_t))) = ptr;
|
||||
*((size_t *) ((uintptr_t) aligned_ptr - sizeof(size_t))) = size;
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) aligned_ptr, size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
|
||||
|
||||
return aligned_ptr;
|
||||
}
|
||||
|
||||
inline
|
||||
void platform_free(void** ptr) {
|
||||
void* actual_ptr = (void *) ((uintptr_t) *ptr - sizeof(size_t));
|
||||
DEBUG_MEMORY_FREE((uintptr_t) actual_ptr);
|
||||
|
||||
munmap(actual_ptr, *((size_t *) actual_ptr));
|
||||
*ptr = NULL;
|
||||
}
|
||||
|
|
@ -73,6 +81,8 @@ void platform_free(void** ptr) {
|
|||
inline
|
||||
void platform_aligned_free(void** aligned_ptr) {
|
||||
void* ptr = (void *) ((uintptr_t) *aligned_ptr - sizeof(void *) - sizeof(size_t));
|
||||
DEBUG_MEMORY_FREE((uintptr_t) ptr);
|
||||
|
||||
munmap(ptr, *((size_t *) ((uintptr_t) ptr + sizeof(void *))));
|
||||
*aligned_ptr = NULL;
|
||||
}
|
||||
|
|
@ -93,6 +103,9 @@ void* platform_shared_alloc(int32* fd, const char* name, size_t size)
|
|||
|
||||
*((size_t *) shm_ptr) = size;
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) shm_ptr, size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
|
||||
|
||||
return (void *) ((uintptr_t) shm_ptr + sizeof(size_t));
|
||||
}
|
||||
|
||||
|
|
@ -116,6 +129,7 @@ void* platform_shared_open(int32* fd, const char* name, size_t size)
|
|||
inline
|
||||
void platform_shared_free(int32 fd, const char* name, void** ptr)
|
||||
{
|
||||
DEBUG_MEMORY_FREE((uintptr_t) *ptr - sizeof(size_t));
|
||||
munmap((void *) ((uintptr_t) *ptr - sizeof(size_t)), *((size_t *) ((uintptr_t) *ptr - sizeof(size_t))));
|
||||
*ptr = NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ FileHandle file_append_handle(const char* path) {
|
|||
|
||||
inline
|
||||
bool file_exists(const char* path) {
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, path);
|
||||
PROFILE(PROFILE_FILE_UTILS, path, false, true);
|
||||
|
||||
struct stat buffer;
|
||||
const char* full_path = path;
|
||||
|
|
@ -159,7 +159,7 @@ bool file_exists(const char* path) {
|
|||
|
||||
inline
|
||||
bool file_copy(const char* __restrict src, const char* __restrict dst) {
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, src);
|
||||
PROFILE(PROFILE_FILE_UTILS, src, false, true);
|
||||
|
||||
char src_full_path[MAX_PATH];
|
||||
char dst_full_path[MAX_PATH];
|
||||
|
|
@ -213,7 +213,7 @@ bool file_copy(const char* __restrict src, const char* __restrict dst) {
|
|||
|
||||
inline
|
||||
void file_read(const char* __restrict path, FileBody* __restrict file, RingMemory* __restrict ring = NULL) {
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, path);
|
||||
PROFILE(PROFILE_FILE_UTILS, path, false, true);
|
||||
|
||||
char full_path[MAX_PATH];
|
||||
const char* abs_path = path;
|
||||
|
|
@ -328,7 +328,7 @@ bool file_read_line(
|
|||
|
||||
inline
|
||||
bool file_write(const char* __restrict path, const FileBody* __restrict file) {
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, path);
|
||||
PROFILE(PROFILE_FILE_UTILS, path, false, true);
|
||||
|
||||
int32 fd;
|
||||
char full_path[PATH_MAX];
|
||||
|
|
|
|||
|
|
@ -13,13 +13,19 @@
|
|||
#include <windows.h>
|
||||
#include "../../stdlib/Types.h"
|
||||
#include "../../utils/TestUtils.h"
|
||||
#include "../../log/DebugMemory.h"
|
||||
#include "../../log/Stats.h"
|
||||
|
||||
// @todo Currently alignment only effects the starting position, but it should also effect the ending/size
|
||||
|
||||
inline
|
||||
void* platform_alloc(size_t size)
|
||||
{
|
||||
return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||
void* ptr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||
DEBUG_MEMORY_INIT((uintptr_t) ptr, size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
inline
|
||||
|
|
@ -36,11 +42,15 @@ void* platform_alloc_aligned(size_t size, int32 alignment)
|
|||
void* aligned_ptr = (void *) (((uintptr_t) ptr + alignment + sizeof(void*) - 1) & ~(alignment - 1));
|
||||
((void**) aligned_ptr)[-1] = ptr;
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) aligned_ptr, size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
|
||||
|
||||
return aligned_ptr;
|
||||
}
|
||||
|
||||
inline
|
||||
void platform_free(void** ptr) {
|
||||
DEBUG_MEMORY_FREE((uintptr_t) *ptr);
|
||||
VirtualFree(*ptr, 0, MEM_RELEASE);
|
||||
*ptr = NULL;
|
||||
}
|
||||
|
|
@ -48,6 +58,8 @@ void platform_free(void** ptr) {
|
|||
inline
|
||||
void platform_aligned_free(void** aligned_ptr) {
|
||||
void* ptr = ((void**) *aligned_ptr)[-1];
|
||||
DEBUG_MEMORY_FREE((uintptr_t) ptr);
|
||||
|
||||
VirtualFree(ptr, 0, MEM_RELEASE);
|
||||
*aligned_ptr = NULL;
|
||||
}
|
||||
|
|
@ -61,6 +73,9 @@ void* platform_shared_alloc(HANDLE* fd, const char* name, size_t size)
|
|||
void* shm_ptr = MapViewOfFile(*fd, FILE_MAP_ALL_ACCESS, 0, 0, size);
|
||||
ASSERT_SIMPLE(shm_ptr);
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) shm_ptr, size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
|
||||
|
||||
return shm_ptr;
|
||||
}
|
||||
|
||||
|
|
@ -79,6 +94,7 @@ void* platform_shared_open(HANDLE* fd, const char* name, size_t size)
|
|||
inline
|
||||
void platform_shared_free(HANDLE fd, const char*, void** ptr)
|
||||
{
|
||||
DEBUG_MEMORY_FREE((uintptr_t) *ptr);
|
||||
UnmapViewOfFile(*ptr);
|
||||
CloseHandle(fd);
|
||||
*ptr = NULL;
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ void relative_to_absolute(const char* __restrict rel, char* __restrict path)
|
|||
inline uint64
|
||||
file_size(const char* path)
|
||||
{
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, path);
|
||||
PROFILE(PROFILE_FILE_UTILS, path, false, true);
|
||||
|
||||
// @performance Profile against fseek strategy
|
||||
FileHandle fp;
|
||||
|
|
@ -133,7 +133,7 @@ file_size(const char* path)
|
|||
inline
|
||||
bool file_exists(const char* path)
|
||||
{
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, path);
|
||||
PROFILE(PROFILE_FILE_UTILS, path, false, true);
|
||||
|
||||
DWORD file_attr;
|
||||
|
||||
|
|
@ -152,7 +152,7 @@ bool file_exists(const char* path)
|
|||
inline void
|
||||
file_read(const char* __restrict path, FileBody* __restrict file, RingMemory* __restrict ring = NULL)
|
||||
{
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, path);
|
||||
PROFILE(PROFILE_FILE_UTILS, path, false, true);
|
||||
|
||||
FileHandle fp;
|
||||
if (*path == '.') {
|
||||
|
|
@ -219,7 +219,7 @@ file_read(const char* __restrict path, FileBody* __restrict file, RingMemory* __
|
|||
inline
|
||||
void file_read(const char* __restrict path, FileBody* __restrict file, uint64 offset, uint64 length = MAX_UINT64, RingMemory* __restrict ring = NULL)
|
||||
{
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, path);
|
||||
PROFILE(PROFILE_FILE_UTILS, path, false, true);
|
||||
|
||||
FileHandle fp;
|
||||
if (*path == '.') {
|
||||
|
|
@ -409,7 +409,7 @@ bool file_read_line(
|
|||
inline bool
|
||||
file_write(const char* __restrict path, const FileBody* __restrict file)
|
||||
{
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, path);
|
||||
PROFILE(PROFILE_FILE_UTILS, path, false, true);
|
||||
|
||||
FileHandle fp;
|
||||
if (*path == '.') {
|
||||
|
|
@ -456,7 +456,7 @@ file_write(const char* __restrict path, const FileBody* __restrict file)
|
|||
inline void
|
||||
file_copy(const char* __restrict src, const char* __restrict dst)
|
||||
{
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, src);
|
||||
PROFILE(PROFILE_FILE_UTILS, src, false, true);
|
||||
|
||||
if (*src == '.') {
|
||||
char src_full_path[MAX_PATH];
|
||||
|
|
@ -662,7 +662,7 @@ FileHandle file_read_async_handle(const char* path)
|
|||
|
||||
bool file_append(const char* __restrict path, const char* __restrict file)
|
||||
{
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, path);
|
||||
PROFILE(PROFILE_FILE_UTILS, path, false, true);
|
||||
|
||||
FileHandle fp;
|
||||
if (*path == '.') {
|
||||
|
|
@ -709,7 +709,7 @@ bool file_append(const char* __restrict path, const char* __restrict file)
|
|||
inline bool
|
||||
file_append(FileHandle fp, const char* file)
|
||||
{
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, file);
|
||||
PROFILE(PROFILE_FILE_UTILS, file, false, true);
|
||||
|
||||
if (fp == INVALID_HANDLE_VALUE) {
|
||||
ASSERT_SIMPLE(false);
|
||||
|
|
@ -731,7 +731,7 @@ file_append(FileHandle fp, const char* file)
|
|||
inline bool
|
||||
file_append(FileHandle fp, const char* file, size_t length)
|
||||
{
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, file);
|
||||
PROFILE(PROFILE_FILE_UTILS, file, false, true);
|
||||
|
||||
if (fp == INVALID_HANDLE_VALUE) {
|
||||
ASSERT_SIMPLE(false);
|
||||
|
|
@ -752,7 +752,7 @@ file_append(FileHandle fp, const char* file, size_t length)
|
|||
inline bool
|
||||
file_append(const char* __restrict path, const FileBody* __restrict file)
|
||||
{
|
||||
PROFILE_VERBOSE(PROFILE_FILE_UTILS, path);
|
||||
PROFILE(PROFILE_FILE_UTILS, path, false, true);
|
||||
|
||||
FileHandle fp;
|
||||
if (*path == '.') {
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@
|
|||
#define NOKERNEL 1
|
||||
//#define NOUSER 0
|
||||
#define NONLS 1
|
||||
//#define NOMB 1
|
||||
#define NOMB 1
|
||||
#define NOMEMMGR 1
|
||||
#define NOMETAFILE 1
|
||||
#define NOMINMAX 1
|
||||
|
|
|
|||
|
|
@ -132,16 +132,11 @@ void hashmap_alloc(HashMap* hm, int32 count, int32 element_size)
|
|||
|
||||
hm->table = (uint16 *) data;
|
||||
chunk_init(&hm->buf, data + sizeof(uint16) * count, count, element_size, 8);
|
||||
|
||||
DEBUG_MEMORY_INIT((uintptr_t) hm->buf.memory, hm->buf.size);
|
||||
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, hm->buf.size);
|
||||
}
|
||||
|
||||
inline
|
||||
void hashmap_free(HashMap* hm)
|
||||
{
|
||||
DEBUG_MEMORY_DELETE((uintptr_t) hm->buf.memory, hm->buf.size);
|
||||
|
||||
platform_free((void **) &hm->table);
|
||||
|
||||
hm->table = NULL;
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@
|
|||
#include <stdint.h>
|
||||
|
||||
#if _WIN32
|
||||
// @question Do I really need <windows.h> here or could I go lower?
|
||||
#include <windows.h>
|
||||
typedef SSIZE_T ssize_t;
|
||||
#elif __linux__
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ static THREAD_RETURN thread_pool_worker(void* arg)
|
|||
work->func(work);
|
||||
LOG_2("ThreadPool worker ended");
|
||||
// At the end of a thread the ring memory automatically is considered freed
|
||||
DEBUG_MEMORY_FREE((uintptr_t) work->ring.memory, work->ring.size);
|
||||
DEBUG_MEMORY_FREE((uintptr_t) work->ring.memory);
|
||||
LOG_FORMAT_2("Freed thread RingMemory: %n B", {{LOG_DATA_UINT64, &work->ring.size}});
|
||||
atomic_set_release(&work->state, 1);
|
||||
|
||||
|
|
|
|||
|
|
@ -218,6 +218,7 @@ int32 ui_input_element_update(UILayout* layout, UIElement* element)
|
|||
dimension.width -= input->border.thickness;
|
||||
dimension.height -= input->border.thickness;
|
||||
|
||||
// @bug change to camera_step_closer()
|
||||
zindex = nextafterf(zindex, INFINITY);
|
||||
}
|
||||
|
||||
|
|
@ -229,6 +230,7 @@ int32 ui_input_element_update(UILayout* layout, UIElement* element)
|
|||
input->background.background_color
|
||||
);
|
||||
|
||||
// @bug change to camera_step_closer()
|
||||
zindex = nextafterf(zindex, INFINITY);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -570,7 +570,7 @@ int32 layout_from_data(
|
|||
const byte* __restrict data,
|
||||
UILayout* __restrict layout
|
||||
) {
|
||||
PROFILE_VERBOSE(PROFILE_LAYOUT_FROM_DATA, "");
|
||||
PROFILE(PROFILE_LAYOUT_FROM_DATA, NULL, false, true);
|
||||
LOG_1("Load layout");
|
||||
|
||||
const byte* in = data;
|
||||
|
|
@ -606,7 +606,7 @@ void layout_from_theme(
|
|||
UILayout* __restrict layout,
|
||||
const UIThemeStyle* __restrict theme
|
||||
) {
|
||||
PROFILE_VERBOSE(PROFILE_LAYOUT_FROM_THEME, "");
|
||||
PROFILE(PROFILE_LAYOUT_FROM_THEME, NULL, false, true);
|
||||
LOG_1("Load theme for layout");
|
||||
|
||||
// @todo Handle animations
|
||||
|
|
|
|||
|
|
@ -267,7 +267,7 @@ int32 theme_from_data(
|
|||
const byte* __restrict data,
|
||||
UIThemeStyle* __restrict theme
|
||||
) {
|
||||
PROFILE_VERBOSE(PROFILE_THEME_FROM_THEME, "");
|
||||
PROFILE(PROFILE_THEME_FROM_THEME, NULL, false, true);
|
||||
LOG_1("Load theme");
|
||||
|
||||
const byte* in = data;
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user