update
Some checks failed
CodeQL / Analyze (${{ matrix.language }}) (autobuild, c-cpp) (push) Has been cancelled
Microsoft C++ Code Analysis / Analyze (push) Has been cancelled

This commit is contained in:
Dennis Eichhorn 2025-06-14 19:10:16 +00:00
parent eb9a135ca7
commit 2059cc6e77
45 changed files with 2124 additions and 610 deletions

View File

@ -29,7 +29,7 @@ struct AssetComponent {
uint64 asset_count;
// @question Do we want to add a mutex to assets. This way we don't have to lock the entire ams.
mutex mutex;
mutex mtx;
};
struct AssetManagementSystem {
@ -55,7 +55,7 @@ void ams_component_create(AssetComponent* ac, BufferMemory* buf, int32 chunk_siz
LOG_1("Create AMS Component for %n assets and %n B", {{LOG_DATA_INT32, &count}, {LOG_DATA_UINT32, &chunk_size}});
chunk_init(&ac->asset_memory, buf, count, chunk_size, 64);
mutex_init(&ac->mutex, NULL);
mutex_init(&ac->mtx, NULL);
}
inline
@ -71,13 +71,13 @@ void ams_component_create(AssetComponent* ac, byte* buf, int32 chunk_size, int32
ac->asset_memory.memory = buf;
ac->asset_memory.free = (uint64 *) (ac->asset_memory.memory + ac->asset_memory.chunk_size * count);
mutex_init(&ac->mutex, NULL);
mutex_init(&ac->mtx, NULL);
}
inline
void ams_component_free(AssetComponent* ac)
{
mutex_destroy(&ac->mutex);
mutex_destroy(&ac->mtx);
}
inline
@ -400,15 +400,15 @@ Asset* thrd_ams_reserve_asset(AssetManagementSystem* ams, byte type, const char*
AssetComponent* ac = &ams->asset_components[type];
uint16 elements = ams_calculate_chunks(ac, size, overhead);
mutex_lock(&ams->asset_components[type].mutex);
mutex_lock(&ams->asset_components[type].mtx);
int32 free_data = chunk_reserve(&ac->asset_memory, elements);
if (free_data < 0) {
mutex_unlock(&ams->asset_components[type].mutex);
mutex_unlock(&ams->asset_components[type].mtx);
ASSERT_SIMPLE(free_data >= 0);
return NULL;
}
mutex_unlock(&ams->asset_components[type].mutex);
mutex_unlock(&ams->asset_components[type].mtx);
byte* asset_data = chunk_get_element(&ac->asset_memory, free_data, true);
@ -514,15 +514,15 @@ Asset* thrd_ams_insert_asset(AssetManagementSystem* ams, Asset* asset_temp, cons
{
AssetComponent* ac = &ams->asset_components[asset_temp->component_id];
mutex_lock(&ams->asset_components[asset_temp->component_id].mutex);
mutex_lock(&ams->asset_components[asset_temp->component_id].mtx);
int32 free_data = chunk_reserve(&ac->asset_memory, asset_temp->size);
if (free_data < 0) {
mutex_unlock(&ams->asset_components[asset_temp->component_id].mutex);
mutex_unlock(&ams->asset_components[asset_temp->component_id].mtx);
ASSERT_SIMPLE(free_data >= 0);
return NULL;
}
mutex_unlock(&ams->asset_components[asset_temp->component_id].mutex);
mutex_unlock(&ams->asset_components[asset_temp->component_id].mtx);
byte* asset_data = chunk_get_element(&ac->asset_memory, free_data);
memcpy(asset_data, asset_temp->self, sizeof(Asset));

View File

@ -38,7 +38,7 @@ inline
void cmd_buffer_create(AppCmdBuffer* cb, BufferMemory* buf, int32 commands_count)
{
chunk_init(&cb->commands, buf, commands_count, sizeof(Command), 64);
mutex_init(&cb->mutex, NULL);
mutex_init(&cb->mtx, NULL);
LOG_1("Created AppCmdBuffer: %n B", {{LOG_DATA_UINT64, &cb->commands.size}});
}
@ -181,10 +181,10 @@ Asset* cmd_font_load_async(AppCmdBuffer* __restrict cb, Command* __restrict cmd)
inline
void thrd_cmd_insert(AppCmdBuffer* __restrict cb, Command* __restrict cmd_temp)
{
mutex_lock(&cb->mutex);
mutex_lock(&cb->mtx);
int32 index = chunk_reserve(&cb->commands, 1);
if (index < 0) {
mutex_unlock(&cb->mutex);
mutex_unlock(&cb->mtx);
ASSERT_SIMPLE(false);
return;
@ -196,7 +196,7 @@ void thrd_cmd_insert(AppCmdBuffer* __restrict cb, Command* __restrict cmd_temp)
Command* cmd = (Command *) chunk_get_element(&cb->commands, index);
memcpy(cmd, cmd_temp, sizeof(Command));
mutex_unlock(&cb->mutex);
mutex_unlock(&cb->mtx);
}
inline
@ -697,9 +697,9 @@ void cmd_iterate(AppCmdBuffer* cb)
// This shouldn't happen since the command buffer shouldn't fill up in just 1-3 frames
void thrd_cmd_iterate(AppCmdBuffer* cb)
{
mutex_lock(&cb->mutex);
mutex_lock(&cb->mtx);
cmd_iterate(cb);
mutex_unlock(&cb->mutex);
mutex_unlock(&cb->mtx);
}
#endif

View File

@ -29,7 +29,7 @@ struct AppCmdBuffer {
ChunkMemory commands;
int32 last_element;
mutex mutex;
mutex mtx;
// Application data for cmd access
// The list below depends on what kind of systems our command buffer needs access to

View File

@ -34,7 +34,7 @@ struct DatabasePool {
void db_pool_alloc(DatabasePool* pool, uint8 count) {
ASSERT_SIMPLE(count);
PROFILE(PROFILE_DB_POOL_ALLOC, NULL, false, true);
LOG_1("Allocating DatabasePool for %d connections", {{LOG_DATA_BYTE, &count}});
LOG_1("[INFO] Allocating DatabasePool for %d connections", {{LOG_DATA_BYTE, &count}});
uint64 size = count * sizeof(DatabaseConnection)
+ sizeof(uint64) * CEIL_DIV(count, 64) // free
@ -51,7 +51,7 @@ void db_pool_add(DatabasePool* __restrict pool, DatabaseConnection* __restrict d
}
void db_pool_free(DatabasePool* pool) {
LOG_1("Freeing DatabasePool");
LOG_1("[INFO] Freeing DatabasePool");
for (int32 i = 0; i < pool->count; ++i) {
db_close(&pool->connections[i]);

View File

@ -54,7 +54,7 @@ void ecs_entity_type_create(ChunkMemory* ec, BufferMemory* buf, int32 chunk_size
ASSERT_SIMPLE(chunk_size);
chunk_init(ec, buf, count, chunk_size, 64);
//mutex_init(&ec->mutex, NULL);
//mutex_init(&ec->mtx, NULL);
}
inline
@ -63,7 +63,7 @@ void ecs_component_type_create(ChunkMemory* ec, BufferMemory* buf, int32 chunk_s
ASSERT_SIMPLE(chunk_size);
chunk_init(ec, buf, count, chunk_size, 64);
//mutex_init(&ec->mutex, NULL);
//mutex_init(&ec->mtx, NULL);
}
Entity* ecs_get_entity(EntityComponentSystem* ecs, int32 entity_id)

View File

@ -26,12 +26,10 @@ bool html_template_in_control_structure(const char* str, const char** controls,
return false;
}
// @performance This combines load and build, that should be two separate functions
// Data layout:
// 1. minified text file
// 2. AST
void html_template_cache_load(PerfectHashMapRef* cache, const char* key, const char* str, int32 alignment = 64) {
char* minified = (char *) ROUND_TO_NEAREST((uintptr_t) cache->data + (uintptr_t) cache->data_pos, alignment);
void html_template_build(const FileBody* in, FileBody* out) {
// @todo We need to save the size of the template in the out file so we can correctly load the AST which starts afterwards
char* minified = (char *) out->content;
char* minified_start = minified;
static const char* CONTROL_STRUCTURE_START[] = {
@ -42,6 +40,8 @@ void html_template_cache_load(PerfectHashMapRef* cache, const char* key, const c
"'", "\"", "</code>", "</pre>", "/>", "</textarea>",
};
const char* str = (const char*) in->content;
// Remove empty content to reduce template size
// We could handle this inside the lexer but the lexer itself often uses pointers into the template
// The lexer would also have to create additional tokens in that case whenever it "splits" the template due to empty text
@ -78,7 +78,7 @@ void html_template_cache_load(PerfectHashMapRef* cache, const char* key, const c
*minified++ = *str++;
}
cache->data_pos += ((uintptr_t) minified - (uintptr_t) minified_start);
out->size += ((uintptr_t) minified - (uintptr_t) minified_start);
// Now add AST to cache
HtmlTemplateToken current_token = html_template_token_next((const char**) &minified_start, HTML_TEMPLATE_CONTEXT_FLAG_HTML);
@ -89,7 +89,7 @@ void html_template_cache_load(PerfectHashMapRef* cache, const char* key, const c
// @todo Instead of doing this, we want to use the cache.memory
// For this to work we need to pass the current memory position however into this function
byte* memory_start = cache->data + cache->data_pos;
byte* memory_start = out->content + out->size;
byte* memory = memory_start;
HtmlTemplateASTNode* ast = html_template_statement_parse(
(const char**) &minified_start,
@ -101,12 +101,39 @@ void html_template_cache_load(PerfectHashMapRef* cache, const char* key, const c
&memory
);
cache->data_pos += ((uintptr_t) memory - (uintptr_t) memory_start);
ASSERT_SIMPLE(ast);
ASSERT_SIMPLE(((uintptr_t) ast) % alignment == 0);
}
// @performance This combines load and build, that should be two separate functions
// Data layout:
// 1. minified text file
// 2. AST
void html_template_cache_load(PerfectHashMapRef* cache, const char* key, const char* str) {
// Add cache to data
// We only store the AST index in the hash map
perfect_hashmap_insert(&cache->hm, key, (int32) ((uintptr_t) ast - (uintptr_t) cache->data));
// The AST internally references the the appropriate sections in the template
// @question Why though? shouldn't we store the template now also in AST form for better size usuage?
perfect_hashmap_insert(&cache->hm, key, str);
}
void html_template_build_iter(const char* path, va_list args) {
RingMemory* ring = va_arg(args, RingMemory*);
char full_path[MAX_PATH];
relative_to_absolute(path, full_path);
FileBody in = {};
file_read(full_path, &in, ring);
FileBody out = {
.size = 0,
.content = ring_get_memory(ring, in.size * 2, 64)
};
html_template_build(&in, &out);
// @todo save out
}
void html_template_cache_iter(const char* path, va_list args) {
@ -118,7 +145,6 @@ void html_template_cache_iter(const char* path, va_list args) {
FileBody file = {};
file_read(full_path, &file, ring);
html_template_cache_load(cache, path, (const char *) file.content);
}

View File

@ -270,10 +270,10 @@ bool html_template_condition_eval(HtmlTemplateASTNode *node, HtmlTemplateContext
// @todo should take in a buffer for template output
int32 html_template_interpret(
HtmlTemplateASTNode *node,
HtmlTemplateASTNode* node,
char* buffer,
int32 buffer_size,
HtmlTemplateContextStack *context_stack
HtmlTemplateContextStack* context_stack
) {
int32 out_length = 0;

View File

@ -10,13 +10,13 @@
#define COMS_JINGGA_HTTP_HEADER_H
#include "../stdlib/Types.h"
#include "HttpHeaderKey.h"
#include "header/HttpHeaderKey.h"
struct HttpHeader {
struct HttpHeaderElement {
HttpHeaderKey key;
byte value_length;
uint16 value_offset;
uint16 value_length;
};
#endif

View File

@ -1,153 +0,0 @@
/**
* Jingga
*
* @copyright Jingga
* @license OMS License 2.0
* @version 1.0.0
* @link https://jingga.app
*/
#ifndef COMS_JINGGA_HTTP_HEADER_KEY_H
#define COMS_JINGGA_HTTP_HEADER_KEY_H
#include "../stdlib/Types.h"
enum HttpHeaderKey : byte {
// Standard HTTP/1.1 & HTTP/2 Headers (RFC 9110, 9113, etc.)
HTTP_HEADER_KEY_HOST,
HTTP_HEADER_KEY_USER_AGENT,
HTTP_HEADER_KEY_ACCEPT,
HTTP_HEADER_KEY_ACCEPT_CHARSET,
HTTP_HEADER_KEY_ACCEPT_ENCODING,
HTTP_HEADER_KEY_ACCEPT_LANGUAGE,
HTTP_HEADER_KEY_ACCEPT_DATETIME,
HTTP_HEADER_KEY_ACCEPT_PATCH,
HTTP_HEADER_KEY_ACCEPT_RANGES,
HTTP_HEADER_KEY_AGE,
HTTP_HEADER_KEY_ALLOW,
HTTP_HEADER_KEY_AUTHORIZATION,
HTTP_HEADER_KEY_CACHE_CONTROL,
HTTP_HEADER_KEY_CONNECTION,
HTTP_HEADER_KEY_CONTENT_DISPOSITION,
HTTP_HEADER_KEY_CONTENT_ENCODING,
HTTP_HEADER_KEY_CONTENT_LANGUAGE,
HTTP_HEADER_KEY_CONTENT_LENGTH,
HTTP_HEADER_KEY_CONTENT_LOCATION,
HTTP_HEADER_KEY_CONTENT_MD5,
HTTP_HEADER_KEY_CONTENT_RANGE,
HTTP_HEADER_KEY_CONTENT_TYPE,
HTTP_HEADER_KEY_COOKIE,
HTTP_HEADER_KEY_DATE,
HTTP_HEADER_KEY_ETAG,
HTTP_HEADER_KEY_EXPECT,
HTTP_HEADER_KEY_EXPIRES,
HTTP_HEADER_KEY_FROM,
HTTP_HEADER_KEY_IF_MATCH,
HTTP_HEADER_KEY_IF_MODIFIED_SINCE,
HTTP_HEADER_KEY_IF_NONE_MATCH,
HTTP_HEADER_KEY_IF_RANGE,
HTTP_HEADER_KEY_IF_UNMODIFIED_SINCE,
HTTP_HEADER_KEY_LAST_MODIFIED,
HTTP_HEADER_KEY_LINK,
HTTP_HEADER_KEY_LOCATION,
HTTP_HEADER_KEY_MAX_FORWARDS,
HTTP_HEADER_KEY_ORIGIN,
HTTP_HEADER_KEY_PRAGMA,
HTTP_HEADER_KEY_PROXY_AUTHENTICATE,
HTTP_HEADER_KEY_PROXY_AUTHORIZATION,
HTTP_HEADER_KEY_RANGE,
HTTP_HEADER_KEY_REFERER,
HTTP_HEADER_KEY_RETRY_AFTER,
HTTP_HEADER_KEY_SERVER,
HTTP_HEADER_KEY_SET_COOKIE,
HTTP_HEADER_KEY_STRICT_TRANSPORT_SECURITY,
HTTP_HEADER_KEY_TE,
HTTP_HEADER_KEY_TRAILER,
HTTP_HEADER_KEY_TRANSFER_ENCODING,
HTTP_HEADER_KEY_UPGRADE,
HTTP_HEADER_KEY_VARY,
HTTP_HEADER_KEY_VIA,
HTTP_HEADER_KEY_WARNING,
HTTP_HEADER_KEY_WWW_AUTHENTICATE,
// Common Non-Standard (X-*) and Extension Headers
HTTP_HEADER_KEY_X_FORWARDED_FOR,
HTTP_HEADER_KEY_X_FORWARDED_HOST,
HTTP_HEADER_KEY_X_FORWARDED_PROTO,
HTTP_HEADER_KEY_X_REQUESTED_WITH,
HTTP_HEADER_KEY_X_CSRF_TOKEN,
HTTP_HEADER_KEY_X_XSS_PROTECTION,
HTTP_HEADER_KEY_X_CONTENT_TYPE_OPTIONS,
HTTP_HEADER_KEY_X_FRAME_OPTIONS,
HTTP_HEADER_KEY_X_POWERED_BY,
HTTP_HEADER_KEY_X_UPLOAD_ID,
HTTP_HEADER_KEY_X_RATE_LIMIT_LIMIT,
HTTP_HEADER_KEY_X_RATE_LIMIT_REMAINING,
HTTP_HEADER_KEY_X_RATE_LIMIT_RESET,
HTTP_HEADER_KEY_X_UA_COMPATIBLE,
HTTP_HEADER_KEY_X_DNS_PREFETCH_CONTROL,
HTTP_HEADER_KEY_X_DOWNLOAD_OPTIONS,
HTTP_HEADER_KEY_X_PERMITTED_CROSS_DOMAIN_POLICIES,
// CORS Headers
HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_ORIGIN,
HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_CREDENTIALS,
HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_HEADERS,
HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_METHODS,
HTTP_HEADER_KEY_ACCESS_CONTROL_EXPOSE_HEADERS,
HTTP_HEADER_KEY_ACCESS_CONTROL_MAX_AGE,
HTTP_HEADER_KEY_ACCESS_CONTROL_REQUEST_HEADERS,
HTTP_HEADER_KEY_ACCESS_CONTROL_REQUEST_METHOD,
// Security Headers
HTTP_HEADER_KEY_CONTENT_SECURITY_POLICY,
HTTP_HEADER_KEY_PERMISSIONS_POLICY,
HTTP_HEADER_KEY_REFERRER_POLICY,
HTTP_HEADER_KEY_EXPECT_CT,
HTTP_HEADER_KEY_FEATURE_POLICY,
HTTP_HEADER_KEY_CROSS_ORIGIN_EMBEDDER_POLICY,
HTTP_HEADER_KEY_CROSS_ORIGIN_OPENER_POLICY,
HTTP_HEADER_KEY_CROSS_ORIGIN_RESOURCE_POLICY,
// WebSocket Headers
HTTP_HEADER_KEY_SEC_WEBSOCKET_KEY,
HTTP_HEADER_KEY_SEC_WEBSOCKET_ACCEPT,
HTTP_HEADER_KEY_SEC_WEBSOCKET_VERSION,
HTTP_HEADER_KEY_SEC_WEBSOCKET_PROTOCOL,
HTTP_HEADER_KEY_SEC_WEBSOCKET_EXTENSIONS,
// HTTP/3 and QUIC Headers
HTTP_HEADER_KEY_ALT_SVC,
HTTP_HEADER_KEY_EARLY_DATA,
// Cloud & CDN Headers
HTTP_HEADER_KEY_CF_CONNECTING_IP,
HTTP_HEADER_KEY_CF_IPCOUNTRY,
HTTP_HEADER_KEY_CF_RAY,
HTTP_HEADER_KEY_TRUE_CLIENT_IP,
HTTP_HEADER_KEY_X_AMZ_CF_ID,
HTTP_HEADER_KEY_X_AMZN_TRACE_ID,
// Custom/Experimental Headers
HTTP_HEADER_KEY_DNT, // Do Not Track
HTTP_HEADER_KEY_SAVE_DATA,
HTTP_HEADER_KEY_DOWNLINK,
HTTP_HEADER_KEY_ECT, // Effective Connection Type
HTTP_HEADER_KEY_RTT,
HTTP_HEADER_KEY_PURPOSE,
HTTP_HEADER_KEY_SEC_FETCH_SITE,
HTTP_HEADER_KEY_SEC_FETCH_MODE,
HTTP_HEADER_KEY_SEC_FETCH_USER,
HTTP_HEADER_KEY_SEC_FETCH_DEST,
HTTP_HEADER_KEY_SERVICE_WORKER_NAVIGATION_PRELOAD,
HTTP_HEADER_KEY_LAST_EVENT_ID,
HTTP_HEADER_KEY_REPORT_TO,
HTTP_HEADER_KEY_PRIORITY,
HTTP_HEADER_KEY_SIGNATURE,
HTTP_HEADER_KEY_SIGNATURE_KEY,
HTTP_HEADER_KEY_FORWARDED,
HTTP_HEADER_KEY_ORIGINAL_METHOD,
HTTP_HEADER_KEY_ORIGINAL_URL,
HTTP_HEADER_KEY_ORIGINAL_HOST,
};
#endif

View File

@ -22,4 +22,19 @@ enum HttpMethod : byte {
typedef HttpMethod HttpVerb;
const char* http_method_text(HttpMethod method) {
switch (method) {
case HTTP_METHOD_GET:
return "GET";
case HTTP_METHOD_POST:
return "POST";
case HTTP_METHOD_PUT:
return "PUT";
case HTTP_METHOD_DELETE:
return "DELETE";
default:
UNREACHABLE();
}
}
#endif

View File

@ -11,6 +11,10 @@
#include "../stdlib/Types.h"
#define HTTP_PROTOCOL_1_1_STR "1.1"
#define HTTP_PROTOCOL_2_STR "2"
#define HTTP_PROTOCOL_3_STR "3"
enum HttpProtocol : byte {
HTTP_PROTOCOL_UNKNOWN,
HTTP_PROTOCOL_1_1,
@ -18,4 +22,17 @@ enum HttpProtocol : byte {
HTTP_PROTOCOL_3,
};
const char* http_protocol_text(HttpProtocol protocol) {
switch (protocol) {
case HTTP_PROTOCOL_1_1:
return "HTTP/1.1";
case HTTP_PROTOCOL_2:
return "HTTP/2";
case HTTP_PROTOCOL_3:
return "HTTP/3";
default:
UNREACHABLE();
}
}
#endif

View File

@ -19,68 +19,297 @@
#include "HttpMethod.h"
#include "HttpProtocol.h"
#include "HttpHeaderKey.h"
#include "HttpUri.h"
#include "HttpHeader.h"
#include "header/HttpHeaderKey.h"
#include "../network/SocketConnection.h"
#include "../memory/ThreadedChunkMemory.h"
enum HttpRequestState : byte {
HTTP_REQUEST_STATE_NONE = 1 << 0,
HTTP_REQUEST_STATE_HEADER_SENT = 1 << 1,
HTTP_REQUEST_STATE_HEADER_BODY_SENT = 1 << 2,
HTTP_REQUEST_STATE_HEADER_FINALIZED = 1 << 3
};
#define MIN_HTTP_REQUEST_CONTENT 2048
/**
* Data layout
* HttpRequest
* ...
* char* header
*
* @todo allow to overwrite body for iterative large body handling
* Request body (excl. file data if available)
* HttpHeader[]
* char* body // chunked
*
*/
// @todo for large request overwrite existing data and just send that instead of allocating too much memory
// The order of the members and their types is carefully chosen to make them fit into 1 cache line
// The request content must come directly after the request
struct HttpRequest {
// Chunk id
int32 id;
// Defines the amount of chunks this http request uses (incl. http header)
uint16 size;
// Flag to indicate if the request is already returned
// Uses HttpRequestState
byte state;
HttpUri uri;
HttpMethod method;
HttpProtocol protocol;
uint16 header_count;
// Element information
uint16 header_available_count;
uint16 header_used_count;
// Value information
uint16 header_available_size;
uint16 header_used_size;
// Body information
uint16 body_offset;
uint32 body_length;
uint32 request_length;
uint32 headers_offset;
// uint32 body_available_size; Comes from size * chunk_size - body_offset
uint32 body_used_size;
};
inline
bool http_request_header_is_complete(const char* request, size_t length) {
void http_request_grow(HttpRequest* __restrict* request, int32 count, ThreadedChunkMemory* mem)
{
HttpRequest* req = *request;
int32 id = thrd_chunk_resize(mem, req->id, req->size, count);
req = (HttpRequest*) thrd_chunk_get_element(mem, id);
req->id = id;
req->size = count;
*request = req;
}
void http_header_value_set(
HttpRequest* __restrict* request,
HttpHeaderKey key,
const char* __restrict value,
ThreadedChunkMemory* mem,
size_t value_length = 0
) {
HttpRequest* req = *request;
char* body_ptr = ((char *) (req + 1)) + req->body_offset;
HttpHeaderElement* elements = (HttpHeaderElement *) (req + 1);
HttpHeaderElement* element = NULL;
for (int32 i = 0; i < req->header_used_count; ++i) {
if (elements[i].key == key) {
element = &elements[i];
break;
}
}
value_length = value_length == 0 ? str_length(value) : value_length;
int32 header_content_offset = req->header_available_count * sizeof(HttpHeaderElement);
if (element) {
// Replace existing value
if (value_length <= element->value_length) {
// New value can use same memory since it is smaller
// We don't size down since it is wasted performance for the hypothetical edge case where this is beneficial
// The edge case if a size reduction would result in avoiding a memory expansion later on
// @bug What if we first reduce the size and then increase it even though the original size would still be able to contain it?
memcpy(((char *) (req + 1)) + element->value_offset, value, value_length);
} else {
// New value is larger than old value and requires memory moves
uint32 grow_header_content = req->header_used_size + value_length >= req->header_available_size;
uint32 header_value_growth = (uint32) OMS_MAX(grow_header_content * (1 * 256 * sizeof(char)), value_length);
if (header_value_growth) {
// The header content cannot hold the value
// We calculate the body size and then subtract the used space to find the free body size
if (header_value_growth > (req->size * mem->chunk_size - req->body_offset - sizeof(HttpRequest)) - req->body_used_size) {
// We need to grow the request object since we don't have enough free space in the body to grow into
http_request_grow(request, req->size + 1, mem);
req = *request;
body_ptr = ((char *) (req + 1)) + req->body_offset;
}
if (req->body_used_size) {
// Move body if we have body
memmove(
body_ptr + header_value_growth, // new body start position
body_ptr, // old body start position
req->body_used_size // data to move
);
}
// We now move the body start position
req->body_offset += header_value_growth;
// New element is positioned at the end of the existing header content
// @bug We are wasting the original value memory e.g.
// old data: ... other element_value ... old_value ... other element_value
// new data: ... other element_value ... old_value ... other element_value .. new_value
// As you can see we still use memory for the old_value which is not even tracked any more
// Solution: shift header content completely and re-reference the value_offset of every other element
// Ideal: ..other element_value ... new_value ... other element_value
// This is ideal since we only need to memmove the data after new_value
element->value_offset = header_content_offset + req->header_used_size;
// The header content growth in size
req->header_available_size += header_value_growth;
// The used header growth in size
req->header_used_size += value_length;
} else {
// The header content can hold the value
// Add the value at the end of content (careful same bug as above)
element->value_offset = req->body_offset - (req->header_available_size - req->header_used_size);
}
memcpy(((char *) (req + 1)) + element->value_offset, value, value_length);
}
element->value_length = value_length;
} else {
// Add new value
uint32 grow_header_elements = req->header_used_count >= req->header_available_count;
uint32 grow_header_content = req->header_used_size + value_length > req->header_available_size;
uint32 header_element_addition = grow_header_elements * 4;
uint32 header_element_growth = header_element_addition * sizeof(HttpHeaderElement);
uint32 header_value_growth = (uint32) OMS_MAX((grow_header_content) * (1 * 256 * sizeof(char)), value_length);
if (header_element_growth || header_value_growth) {
if (header_element_growth + header_value_growth > (req->size * mem->chunk_size - req->body_offset - sizeof(HttpRequest)) - req->body_used_size) {
// We need to grow the request object since we don't have enough free space in the body to grow into
http_request_grow(request, req->size + 1, mem);
req = *request;
body_ptr = ((char *) (req + 1)) + req->body_offset;
elements = (HttpHeaderElement *) (req + 1);
}
if (req->body_used_size) {
// Move body if we have body
memmove(
body_ptr + header_element_growth + header_value_growth, // New body start position
body_ptr, // Old body start
req->body_used_size // Data to move
);
}
if (header_element_growth && req->header_used_size) {
// If we are growing the element array, we need to move the content
memmove(
((char *) (req + 1)) + sizeof(HttpHeaderElement) * (req->header_available_count + header_element_addition), // New element value start position
((char *) (req + 1)) + sizeof(HttpHeaderElement) * req->header_available_count, // Old element value start position
req->header_used_size
);
// We need to adjust the offset position because of the move
for (int32 i = 0; i < req->header_used_count; ++i) {
elements[i].value_offset += header_element_growth;
}
}
// We now move the body start position
req->body_offset += header_element_growth + header_value_growth;
req->header_available_count += header_element_growth / sizeof(HttpHeaderElement);
req->header_available_size += header_value_growth;
}
// Set element
element = &elements[req->header_used_count];
element->key = key;
// The value is added to the end of the values
element->value_offset = req->header_available_count * sizeof(HttpHeaderElement) + req->header_used_size;
element->value_length = (uint16) value_length;
// Set value
memcpy(((char *) (req + 1)) + element->value_offset, value, value_length);
req->body_used_size += value_length;
req->header_used_size += (uint16) value_length;
++req->header_used_count;
}
}
HttpRequest* http_request_create(ThreadedChunkMemory* mem)
{
int32 request_buffer_count = CEIL_DIV(sizeof(HttpRequest) + MIN_HTTP_REQUEST_CONTENT, mem->chunk_size);
int32 request_buffer_id = thrd_chunk_reserve(mem, request_buffer_count);
HttpRequest* request = (HttpRequest *) thrd_chunk_get_element(mem, request_buffer_id);
request->id = request_buffer_id;
request->size = request_buffer_count;
request->protocol = HTTP_PROTOCOL_1_1;
// Create content length placehoder, this header element is always required
http_header_value_set(&request, HTTP_HEADER_KEY_CONTENT_LENGTH, " ", mem);
// Prepare the chunked sub-regions
request->header_available_count = 16;
request->header_available_size = 4 * 256 * sizeof(char);
request->body_offset = request->header_available_count * sizeof(HttpHeaderElement) + request->header_available_size;
/*
request->body_available_size = request_buffer_count * mem->chunk_size
- request->header_available_count * sizeof(HttpHeaderElement)
- request->header_available_size;
*/
return request;
}
inline
const char* http_request_body(const HttpRequest* request) {
return ((const char *) (request + 1)) + request->body_offset;
}
FORCE_INLINE
bool http_header_is_complete(const char* request, size_t length) {
return str_contains(request, "\r\n\r\n", length);
}
// Binary search for the key
inline
const HttpHeader* http_request_header_get(const HttpRequest* request, HttpHeaderKey key) {
const HttpHeader* base = (HttpHeader *) ((uintptr_t) request + sizeof(HttpRequest) + request->request_length);
int32 header_count = OMS_MIN(request->header_count, (uint16) key);
while (header_count > 1) {
int32 half = header_count / 2;
header_count -= half;
base += (base[half - 1].key < key) * half;
}
return base->key == key ? base : NULL;
bool http_header_is_complete(const HttpRequest* request) {
const char* body = (const char *) (request + 1) + request->body_offset;
return str_contains(body, "\r\n\r\n", request->body_used_size);
}
inline
const char* http_request_header_value_get(const HttpRequest* request, const HttpHeader* header) {
const char* request_data = (const char *) ((uintptr_t) request + sizeof(HttpRequest));
const HttpHeaderElement* http_header_element_get(const HttpRequest* request, HttpHeaderKey key)
{
const HttpHeaderElement* elements = (HttpHeaderElement *) (request + 1);
for (int32 i = 0; i < request->header_used_count; ++i) {
if (elements[i].key == key) {
return &elements[i];
}
}
return request_data + header->value_offset;
return NULL;
}
inline
const char* http_header_value_get(const HttpRequest* request, const HttpHeaderElement* header_element)
{
return ((const char *) (request + 1)) + header_element->value_offset;
}
bool http_request_has_file_upload(const HttpRequest* request) {
const HttpHeader* header = http_request_header_get(request, HTTP_HEADER_KEY_CONTENT_TYPE);
const HttpHeaderElement* header = http_header_element_get(request, HTTP_HEADER_KEY_CONTENT_TYPE);
if (!header) {
return false;
}
const char* header_value = http_request_header_value_get(request, header);
const char* header_value = http_header_value_get(request, header);
if ((str_compare_caseless(header_value, "application/", OMS_MIN(header->value_length, sizeof("application/") - 1)) == 0
&& str_compare_caseless(header_value, "application/json", OMS_MIN(header->value_length, sizeof("application/json") - 1)) != 0)
|| str_compare_caseless(header_value, "image/", OMS_MIN(header->value_length, sizeof("image/") - 1)) == 0
@ -100,8 +329,11 @@ bool http_request_has_file_upload(const HttpRequest* request) {
return false;
}
void http_request_header_parse(HttpRequest* http_request, const char* request) {
// @performance we could probably significantly improve this by handling this directly instead of calling the helper functions
// In the case below we know exactly if additional header elements will follow or not
void http_header_parse(HttpRequest** http_request, const char* request, ThreadedChunkMemory* mem) {
const char* request_start = request;
HttpRequest* http_req = *http_request;
//////////////////////////////////////////////////
// Parsing HTTP request line
@ -111,49 +343,49 @@ void http_request_header_parse(HttpRequest* http_request, const char* request) {
// Parse request type
if (str_compare(request, "GET") == 0) {
http_request->method = HTTP_METHOD_GET;
http_req->method = HTTP_METHOD_GET;
} else if (str_compare(request, "POST") == 0) {
http_request->method = HTTP_METHOD_POST;
http_req->method = HTTP_METHOD_POST;
} else if (str_compare(request, "PUT") == 0) {
http_request->method = HTTP_METHOD_PUT;
http_req->method = HTTP_METHOD_PUT;
} else if (str_compare(request, "DELETE") == 0) {
http_request->method = HTTP_METHOD_DELETE;
http_req->method = HTTP_METHOD_DELETE;
} else {
// Additional request types are possible BUT we don't support them in our internal framework
// If this would be a public framework we would've to support additional request types
http_request->method = HTTP_METHOD_UNKNOWN;
http_req->method = HTTP_METHOD_UNKNOWN;
}
// Parse reuqest path
str_move_past(&request, ' ');
http_request->uri.path_offset = request - request_start;
http_req->uri.path_offset = request - request_start;
str_skip_until_list(&request, ":?# ");
http_request->uri.path_length = (request - request_start) - http_request->uri.path_offset;
http_req->uri.path_length = (request - request_start) - http_req->uri.path_offset;
// Parse port
if (*request == ':') {
http_request->uri.port = (uint16) str_to_int(request, &request);
http_req->uri.port = (uint16) str_to_int(request, &request);
}
// Parse query parameters
if (*request == '?') {
http_request->uri.parameter_offset = request - request_start;
http_req->uri.parameter_offset = request - request_start;
str_skip_until_list(&request, "# ");
http_request->uri.path_length = (request - request_start) - http_request->uri.parameter_offset;
//http_req->uri.parameter_length = (request - request_start) - http_req->uri.parameter_offset;
}
// Parse fragment
if (*request == '#') {
http_request->uri.fragment_offset = request - request_start;
http_req->uri.fragment_offset = request - request_start;
str_move_to(&request, ' ');
http_request->uri.fragment_length = (request - request_start) - http_request->uri.fragment_offset;
http_req->uri.fragment_length = (request - request_start) - http_req->uri.fragment_offset;
}
// Parse protocol
str_move_past(&request, ' ');
if (str_compare(request, "HTTP/", sizeof("HTTP/") - 1) != 0) {
LOG_1("Invalid HTTP header, no protocol defined");
LOG_1("[ERROR] Invalid HTTP header, no protocol defined");
ASSERT_SIMPLE(false);
return;
@ -161,13 +393,13 @@ void http_request_header_parse(HttpRequest* http_request, const char* request) {
request += sizeof("HTTP/") - 1;
if (*request == '1') {
http_request->protocol = HTTP_PROTOCOL_1_1;
http_req->protocol = HTTP_PROTOCOL_1_1;
} else if (*request == '2') {
http_request->protocol = HTTP_PROTOCOL_2;
http_req->protocol = HTTP_PROTOCOL_2;
} else if (*request == '3') {
http_request->protocol = HTTP_PROTOCOL_3;
http_req->protocol = HTTP_PROTOCOL_3;
} else {
http_request->protocol = HTTP_PROTOCOL_UNKNOWN;
http_req->protocol = HTTP_PROTOCOL_UNKNOWN;
}
//////////////////////////////////////////////////
@ -176,16 +408,15 @@ void http_request_header_parse(HttpRequest* http_request, const char* request) {
// The HTTP headers end with \r\n\r\n (= one empty line/element)
while (request[0] != '\r' && request[1] != '\n' && request[2] != '\r' && request[3] != '\n') {
str_move_past(&request, '\n');
const char* key = request;
// @todo parse headers
str_move_past(&request, ':');
str_skip_empty(&request);
const char* value = request;
str_move_to(&request, '\r');
http_header_value_set(http_request, http_header_key_text(key), value, mem, request - value);
}
//////////////////////////////////////////////////
// Parsing HTTP body
//////////////////////////////////////////////////
request += 4;
http_request->body_offset = request - request_start;
http_request->body_length = http_request->request_length - http_request->body_offset;
}
void parse_multipart_data(const char *body, const char *boundary) {

View File

@ -10,18 +10,363 @@
#define COMS_JINGGA_HTTP_RESPONSE_H
#include "../stdlib/Types.h"
#include "HttpMethod.h"
#include "HttpHeader.h"
#include "HttpProtocol.h"
#include "HttpStatusCode.h"
#include "header/HttpHeaderKey.h"
#include "../network/SocketConnection.h"
#include "../memory/ThreadedChunkMemory.h"
struct HttpResponse {
HttpMethod method;
HttpProtocol protocol;
HttpStatusCode status_code;
enum HttpResponseState : byte {
HTTP_RESPONSE_STATE_NONE = 1 << 0,
HTTP_RESPONSE_STATE_HEADER_SENT = 1 << 1,
HTTP_RESPONSE_STATE_HEADER_BODY_SENT = 1 << 2,
HTTP_RESPONSE_STATE_HEADER_FINALIZED = 1 << 3
};
// @performance Create a cached header line for 200 responses
// @performance Create a cached header for most common response (incl. CSP, referrer, x-*, ...)
#define MIN_HTTP_RESPONSE_CONTENT 32768
/**
* Data layout
* HttpResponse
* ...
* HttpHeaderElement elements[...]
* char header_values[...]
* char body[...]
*
* NOTE: that the memory area for header elements and header values is chunked
* This means that we usually "allocate" multiple elements so we don't have to perform a growth too often
*/
// @todo for large responses overwrite existing data and just send that instead of allocating too much memory
struct HttpResponse {
// Chunk id
int32 id;
// Defines the amount of chunks this http response uses (incl. http header and body)
uint16 size;
// Flag to indicate if the response is already returned
// Uses HttpResponseState
byte state;
HttpProtocol protocol;
HttpStatusCode status_code;
// Element information
uint16 header_available_count;
uint16 header_used_count;
// Value information
uint16 header_available_size;
uint16 header_used_size;
// Body information
uint16 body_offset;
// uint32 body_available_size; Comes from size * chunk_size - body_offset
uint32 body_used_size;
};
inline
void http_response_grow(HttpResponse* __restrict* response, int32 count, ThreadedChunkMemory* mem)
{
HttpResponse* resp = *response;
int32 id = thrd_chunk_resize(mem, resp->id, resp->size, count);
resp = (HttpResponse*) thrd_chunk_get_element(mem, id);
resp->id = id;
resp->size = count;
*response = resp;
}
void http_header_value_set(
HttpResponse* __restrict* response,
HttpHeaderKey key,
const char* __restrict value,
ThreadedChunkMemory* mem
) {
HttpResponse* resp = *response;
char* body_ptr = ((char *) (resp + 1)) + resp->body_offset;
HttpHeaderElement* elements = (HttpHeaderElement *) (resp + 1);
HttpHeaderElement* element = NULL;
for (int32 i = 0; i < resp->header_used_count; ++i) {
if (elements[i].key == key) {
element = &elements[i];
break;
}
}
size_t value_length = str_length(value);
int32 header_content_offset = resp->header_available_count * sizeof(HttpHeaderElement);
if (element) {
// Replace existing value
if (value_length <= element->value_length) {
// New value can use same memory since it is smaller
// We don't size down since it is wasted performance for the hypothetical edge case where this is beneficial
// The edge case if a size reduction would result in avoiding a memory expansion later on
// @bug What if we first reduce the size and then increase it even though the original size would still be able to contain it?
memcpy(((char *) (resp + 1)) + element->value_offset, value, value_length);
} else {
// New value is larger than old value and requires memory moves
uint32 grow_header_content = resp->header_used_size + value_length >= resp->header_available_size;
uint32 header_value_growth = (uint32) OMS_MAX(grow_header_content * (1 * 256 * sizeof(char)), value_length);
if (header_value_growth) {
// The header content cannot hold the value
// We calculate the body size and then subtract the used space to find the free body size
if (header_value_growth > (resp->size * mem->chunk_size - resp->body_offset - sizeof(HttpResponse)) - resp->body_used_size) {
// We need to grow the response object since we don't have enough free space in the body to grow into
http_response_grow(response, resp->size + 1, mem);
resp = *response;
body_ptr = ((char *) (resp + 1)) + resp->body_offset;
}
if (resp->body_used_size) {
// Move body if we have body
memmove(
body_ptr + header_value_growth, // new body start position
body_ptr, // old body start position
resp->body_used_size // data to move
);
}
// We now move the body start position
resp->body_offset += header_value_growth;
// New element is positioned at the end of the existing header content
// @bug We are wasting the original value memory e.g.
// old data: ... other element_value ... old_value ... other element_value
// new data: ... other element_value ... old_value ... other element_value .. new_value
// As you can see we still use memory for the old_value which is not even tracked any more
// Solution: shift header content completely and re-reference the value_offset of every other element
// Ideal: ..other element_value ... new_value ... other element_value
// This is ideal since we only need to memmove the data after new_value
element->value_offset = header_content_offset + resp->header_used_size;
// The header content growth in size
resp->header_available_size += header_value_growth;
// The used header growth in size
resp->header_used_size += value_length;
} else {
// The header content can hold the value
// Add the value at the end of content (careful same bug as above)
element->value_offset = resp->body_offset - (resp->header_available_size - resp->header_used_size);
}
memcpy(((char *) (resp + 1)) + element->value_offset, value, value_length);
}
element->value_length = value_length;
} else {
// Add new value
uint32 grow_header_elements = resp->header_used_count >= resp->header_available_count;
uint32 grow_header_content = resp->header_used_size + value_length > resp->header_available_size;
uint32 header_element_addition = grow_header_elements * 4;
uint32 header_element_growth = header_element_addition * sizeof(HttpHeaderElement);
uint32 header_value_growth = (uint32) OMS_MAX((grow_header_content) * (1 * 256 * sizeof(char)), value_length);
if (header_element_growth || header_value_growth) {
if (header_element_growth + header_value_growth > (resp->size * mem->chunk_size - resp->body_offset - sizeof(HttpResponse)) - resp->body_used_size) {
// We need to grow the response object since we don't have enough free space in the body to grow into
http_response_grow(response, resp->size + 1, mem);
resp = *response;
body_ptr = ((char *) (resp + 1)) + resp->body_offset;
elements = (HttpHeaderElement *) (resp + 1);
}
if (resp->body_used_size) {
// Move body if we have body
memmove(
body_ptr + header_element_growth + header_value_growth, // New body start position
body_ptr, // Old body start
resp->body_used_size // Data to move
);
}
if (header_element_growth && resp->header_used_size) {
// If we are growing the element array, we need to move the content
memmove(
((char *) (resp + 1)) + sizeof(HttpHeaderElement) * (resp->header_available_count + header_element_addition), // New element value start position
((char *) (resp + 1)) + sizeof(HttpHeaderElement) * resp->header_available_count, // Old element value start position
resp->header_used_size
);
// We need to adjust the offset position because of the move
for (int32 i = 0; i < resp->header_used_count; ++i) {
elements[i].value_offset += header_element_growth;
}
}
// We now move the body start position
resp->body_offset += header_element_growth + header_value_growth;
resp->header_available_count += header_element_growth / sizeof(HttpHeaderElement);
resp->header_available_size += header_value_growth;
}
// Set element
element = &elements[resp->header_used_count];
element->key = key;
// The value is added to the end of the values
element->value_offset = resp->header_available_count * sizeof(HttpHeaderElement) + resp->header_used_size;
element->value_length = (uint16) value_length;
// Set value
memcpy(((char *) (resp + 1)) + element->value_offset, value, value_length);
resp->body_used_size += value_length;
resp->header_used_size += (uint16) value_length;
++resp->header_used_count;
}
}
HttpResponse* http_response_create(ThreadedChunkMemory* mem)
{
int32 response_buffer_count = CEIL_DIV(sizeof(HttpResponse) + MIN_HTTP_RESPONSE_CONTENT, mem->chunk_size);
int32 response_buffer_id = thrd_chunk_reserve(mem, response_buffer_count);
HttpResponse* response = (HttpResponse *) thrd_chunk_get_element(mem, response_buffer_id);
response->id = response_buffer_id;
response->size = response_buffer_count;
response->protocol = HTTP_PROTOCOL_1_1;
response->status_code = HTTP_STATUS_CODE_200;
// Create content length placehoder, this header element is always required
http_header_value_set(&response, HTTP_HEADER_KEY_CONTENT_LENGTH, " ", mem);
// Prepare the chunked sub-regions
response->header_available_count = 16;
response->header_available_size = 4 * 256 * sizeof(char);
response->body_offset = response->header_available_count * sizeof(HttpHeaderElement) + response->header_available_size;
/*
response->body_available_size = response_buffer_count * mem->chunk_size
- response->header_available_count * sizeof(HttpHeaderElement)
- response->header_available_size;
*/
return response;
}
inline
const char* http_response_body(const HttpResponse* response) {
return ((const char *) (response + 1)) + response->body_offset;
}
inline
const HttpHeaderElement* http_header_element_get(const HttpResponse* response, HttpHeaderKey key)
{
const HttpHeaderElement* elements = (HttpHeaderElement *) (response + 1);
for (int32 i = 0; i < response->header_used_count; ++i) {
if (elements[i].key == key) {
return &elements[i];
}
}
return NULL;
}
inline
const char* http_header_value_get(const HttpResponse* response, const HttpHeaderElement* header_element)
{
return ((const char *) (response + 1)) + header_element->value_offset;
}
// @todo we need a streamed response version http_response_stream()
// WARNING: We expect response to already contain a header element called content-length
void http_response_send(const SocketConnection* __restrict socket, HttpResponse* __restrict response)
{
char header[4096];
char* header_ref;
header_ref = header;
// First line
header_ref += str_copy(header_ref, http_protocol_text(response->protocol));
*header_ref++ = ' ';
header_ref += str_copy(header_ref, http_status_text(response->status_code));
*header_ref++ = '\r';
*header_ref++ = '\n';
char content_length[12];
int_to_str(response->body_used_size, content_length);
http_header_value_set(&response, HTTP_HEADER_KEY_CONTENT_LENGTH, content_length, NULL);
const HttpHeaderElement* elements = (HttpHeaderElement *) (response + 1);
// Headers
for (int32 i = 0; i < response->header_used_count; ++i) {
const HttpHeaderElement* element = &elements[i];
header_ref += str_copy(header_ref, http_header_key_text(element->key));
*header_ref++ = ' ';
memcpy(header_ref, (const char *) elements + element->value_offset, element->value_length);
header_ref += element->value_length;
*header_ref++ = '\r';
*header_ref++ = '\n';
}
*header_ref++ = '\r';
*header_ref++ = '\n';
// Use entire header array for first send
// This also has a direct impact for the time to first byte (ttfb)
// You may also have heard that crical css should be in head of the html (this is one of the reasons)
int32 body_size_to_add = OMS_CLAMP((int32) (sizeof(header) - (header_ref - header)), 0, (int32) response->body_used_size);
if (body_size_to_add && response->body_offset) {
memcpy(
header_ref,
((const char *) (response + 1)) + response->body_offset,
body_size_to_add
);
header_ref += body_size_to_add;
}
// Send headers & potentially some content
send(socket->sd, header, header_ref - header, 0);
// Do we have data remaining to be sent?
if (response->body_offset && response->body_used_size - body_size_to_add > 0) {
// @question Do we need chunked sends?
send(
socket->sd,
((const char *) (response + 1)) + response->body_offset + body_size_to_add,
response->body_used_size - body_size_to_add,
0
);
}
}
void http_response_body_add(HttpResponse** response, const char* __restrict body, size_t length, ThreadedChunkMemory* mem)
{
HttpResponse* resp = *response;
char* response_body = (char *) (resp + 1);
length = (length == 0) ? str_length(body) : length;
// Resize if needed
if (resp->body_used_size + length > resp->size * mem->chunk_size - sizeof(HttpResponse)) {
int32 response_buffer_count = CEIL_DIV(sizeof(HttpResponse) + resp->body_used_size + length, mem->chunk_size);
http_response_grow(&resp, response_buffer_count, mem);
*response = resp;
response_body = (char*) (resp + 1);
}
memcpy(response_body + resp->body_used_size, body, length);
resp->body_used_size += length;
}
#endif

View File

@ -28,4 +28,10 @@ struct HttpUri {
uint16 port;
};
FORCE_INLINE
const char* http_uri_path_get(const HttpUri* uri, const char* base) {
return base + uri->path_offset;
}
#endif

View File

@ -0,0 +1,15 @@
/**
* Jingga
*
* @copyright Jingga
* @license OMS License 2.0
* @version 1.0.0
* @link https://jingga.app
*/
#ifndef COMS_JINGGA_HTTP_HEADER_CONNECTION_H
#define COMS_JINGGA_HTTP_HEADER_CONNECTION_H
#define HTTP_HEADER_CONNECTION_CLOSE "close"
#define HTTP_HEADER_CONNECTION_KEEP_ALIVE "keep-alive"
#endif

View File

@ -0,0 +1,14 @@
/**
* Jingga
*
* @copyright Jingga
* @license OMS License 2.0
* @version 1.0.0
* @link https://jingga.app
*/
#ifndef COMS_JINGGA_HTTP_HEADER_CONTENT_TYPE_H
#define COMS_JINGGA_HTTP_HEADER_CONTENT_TYPE_H
#define HTTP_HEADER_CONTENT_TYPE_TEXT_HTML "text/html"
#endif

652
http/header/HttpHeaderKey.h Normal file
View File

@ -0,0 +1,652 @@
/**
* Jingga
*
* @copyright Jingga
* @license OMS License 2.0
* @version 1.0.0
* @link https://jingga.app
*/
#ifndef COMS_JINGGA_HTTP_HEADER_KEY_H
#define COMS_JINGGA_HTTP_HEADER_KEY_H
#include "../../stdlib/Types.h"
enum HttpHeaderKey : byte {
// Standard HTTP/1.1 & HTTP/2 Headers (RFC 9110, 9113, etc.)
HTTP_HEADER_KEY_UNKNOWN,
HTTP_HEADER_KEY_HOST,
HTTP_HEADER_KEY_USER_AGENT,
HTTP_HEADER_KEY_ACCEPT,
HTTP_HEADER_KEY_ACCEPT_CHARSET,
HTTP_HEADER_KEY_ACCEPT_ENCODING,
HTTP_HEADER_KEY_ACCEPT_LANGUAGE,
HTTP_HEADER_KEY_ACCEPT_DATETIME,
HTTP_HEADER_KEY_ACCEPT_PATCH,
HTTP_HEADER_KEY_ACCEPT_RANGES,
HTTP_HEADER_KEY_AGE,
HTTP_HEADER_KEY_ALLOW,
HTTP_HEADER_KEY_AUTHORIZATION,
HTTP_HEADER_KEY_CACHE_CONTROL,
HTTP_HEADER_KEY_CONNECTION,
HTTP_HEADER_KEY_CONTENT_DISPOSITION,
HTTP_HEADER_KEY_CONTENT_ENCODING,
HTTP_HEADER_KEY_CONTENT_LANGUAGE,
HTTP_HEADER_KEY_CONTENT_LENGTH,
HTTP_HEADER_KEY_CONTENT_LOCATION,
HTTP_HEADER_KEY_CONTENT_MD5,
HTTP_HEADER_KEY_CONTENT_RANGE,
HTTP_HEADER_KEY_CONTENT_TYPE,
HTTP_HEADER_KEY_COOKIE,
HTTP_HEADER_KEY_DATE,
HTTP_HEADER_KEY_ETAG,
HTTP_HEADER_KEY_EXPECT,
HTTP_HEADER_KEY_EXPIRES,
HTTP_HEADER_KEY_FROM,
HTTP_HEADER_KEY_IF_MATCH,
HTTP_HEADER_KEY_IF_MODIFIED_SINCE,
HTTP_HEADER_KEY_IF_NONE_MATCH,
HTTP_HEADER_KEY_IF_RANGE,
HTTP_HEADER_KEY_IF_UNMODIFIED_SINCE,
HTTP_HEADER_KEY_LAST_MODIFIED,
HTTP_HEADER_KEY_LINK,
HTTP_HEADER_KEY_LOCATION,
HTTP_HEADER_KEY_MAX_FORWARDS,
HTTP_HEADER_KEY_ORIGIN,
HTTP_HEADER_KEY_PRAGMA,
HTTP_HEADER_KEY_PROXY_AUTHENTICATE,
HTTP_HEADER_KEY_PROXY_AUTHORIZATION,
HTTP_HEADER_KEY_RANGE,
HTTP_HEADER_KEY_REFERER,
HTTP_HEADER_KEY_RETRY_AFTER,
HTTP_HEADER_KEY_SERVER,
HTTP_HEADER_KEY_SET_COOKIE,
HTTP_HEADER_KEY_STRICT_TRANSPORT_SECURITY,
HTTP_HEADER_KEY_TE,
HTTP_HEADER_KEY_TRAILER,
HTTP_HEADER_KEY_TRANSFER_ENCODING,
HTTP_HEADER_KEY_UPGRADE,
HTTP_HEADER_KEY_VARY,
HTTP_HEADER_KEY_VIA,
HTTP_HEADER_KEY_WARNING,
HTTP_HEADER_KEY_WWW_AUTHENTICATE,
// Common Non-Standard (X-*) and Extension Headers
HTTP_HEADER_KEY_X_FORWARDED_FOR,
HTTP_HEADER_KEY_X_FORWARDED_HOST,
HTTP_HEADER_KEY_X_FORWARDED_PROTO,
HTTP_HEADER_KEY_X_REQUESTED_WITH,
HTTP_HEADER_KEY_X_CSRF_TOKEN,
HTTP_HEADER_KEY_X_XSS_PROTECTION,
HTTP_HEADER_KEY_X_CONTENT_TYPE_OPTIONS,
HTTP_HEADER_KEY_X_FRAME_OPTIONS,
HTTP_HEADER_KEY_X_POWERED_BY,
HTTP_HEADER_KEY_X_UPLOAD_ID,
HTTP_HEADER_KEY_X_RATE_LIMIT_LIMIT,
HTTP_HEADER_KEY_X_RATE_LIMIT_REMAINING,
HTTP_HEADER_KEY_X_RATE_LIMIT_RESET,
HTTP_HEADER_KEY_X_UA_COMPATIBLE,
HTTP_HEADER_KEY_X_DNS_PREFETCH_CONTROL,
HTTP_HEADER_KEY_X_DOWNLOAD_OPTIONS,
HTTP_HEADER_KEY_X_PERMITTED_CROSS_DOMAIN_POLICIES,
// CORS Headers
HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_ORIGIN,
HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_CREDENTIALS,
HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_HEADERS,
HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_METHODS,
HTTP_HEADER_KEY_ACCESS_CONTROL_EXPOSE_HEADERS,
HTTP_HEADER_KEY_ACCESS_CONTROL_MAX_AGE,
HTTP_HEADER_KEY_ACCESS_CONTROL_REQUEST_HEADERS,
HTTP_HEADER_KEY_ACCESS_CONTROL_REQUEST_METHOD,
// Security Headers
HTTP_HEADER_KEY_CONTENT_SECURITY_POLICY,
HTTP_HEADER_KEY_PERMISSIONS_POLICY,
HTTP_HEADER_KEY_REFERRER_POLICY,
HTTP_HEADER_KEY_EXPECT_CT,
HTTP_HEADER_KEY_FEATURE_POLICY,
HTTP_HEADER_KEY_CROSS_ORIGIN_EMBEDDER_POLICY,
HTTP_HEADER_KEY_CROSS_ORIGIN_OPENER_POLICY,
HTTP_HEADER_KEY_CROSS_ORIGIN_RESOURCE_POLICY,
// WebSocket Headers
HTTP_HEADER_KEY_SEC_WEBSOCKET_KEY,
HTTP_HEADER_KEY_SEC_WEBSOCKET_ACCEPT,
HTTP_HEADER_KEY_SEC_WEBSOCKET_VERSION,
HTTP_HEADER_KEY_SEC_WEBSOCKET_PROTOCOL,
HTTP_HEADER_KEY_SEC_WEBSOCKET_EXTENSIONS,
// HTTP/3 and QUIC Headers
HTTP_HEADER_KEY_ALT_SVC,
HTTP_HEADER_KEY_EARLY_DATA,
// Cloud & CDN Headers
HTTP_HEADER_KEY_CF_CONNECTING_IP,
HTTP_HEADER_KEY_CF_IPCOUNTRY,
HTTP_HEADER_KEY_CF_RAY,
HTTP_HEADER_KEY_TRUE_CLIENT_IP,
HTTP_HEADER_KEY_X_AMZ_CF_ID,
HTTP_HEADER_KEY_X_AMZN_TRACE_ID,
// Custom/Experimental Headers
HTTP_HEADER_KEY_DNT, // Do Not Track
HTTP_HEADER_KEY_SAVE_DATA,
HTTP_HEADER_KEY_DOWNLINK,
HTTP_HEADER_KEY_ECT, // Effective Connection Type
HTTP_HEADER_KEY_RTT,
HTTP_HEADER_KEY_PURPOSE,
HTTP_HEADER_KEY_SEC_FETCH_SITE,
HTTP_HEADER_KEY_SEC_FETCH_MODE,
HTTP_HEADER_KEY_SEC_FETCH_USER,
HTTP_HEADER_KEY_SEC_FETCH_DEST,
HTTP_HEADER_KEY_SERVICE_WORKER_NAVIGATION_PRELOAD,
HTTP_HEADER_KEY_LAST_EVENT_ID,
HTTP_HEADER_KEY_REPORT_TO,
HTTP_HEADER_KEY_PRIORITY,
HTTP_HEADER_KEY_SIGNATURE,
HTTP_HEADER_KEY_SIGNATURE_KEY,
HTTP_HEADER_KEY_FORWARDED,
HTTP_HEADER_KEY_ORIGINAL_METHOD,
HTTP_HEADER_KEY_ORIGINAL_URL,
HTTP_HEADER_KEY_ORIGINAL_HOST,
};
const char* http_header_key_text(HttpHeaderKey header_key) {
switch (header_key) {
case HTTP_HEADER_KEY_HOST:
return "Host";
case HTTP_HEADER_KEY_USER_AGENT:
return "User-Agent";
case HTTP_HEADER_KEY_ACCEPT:
return "Accept";
case HTTP_HEADER_KEY_ACCEPT_CHARSET:
return "Accept-Charset";
case HTTP_HEADER_KEY_ACCEPT_ENCODING:
return "Accept-Encoding";
case HTTP_HEADER_KEY_ACCEPT_LANGUAGE:
return "Accept-Language";
case HTTP_HEADER_KEY_ACCEPT_DATETIME:
return "Accept-Datetime";
case HTTP_HEADER_KEY_ACCEPT_PATCH:
return "Accept-Patch";
case HTTP_HEADER_KEY_ACCEPT_RANGES:
return "Accept-Ranges";
case HTTP_HEADER_KEY_AGE:
return "Age";
case HTTP_HEADER_KEY_ALLOW:
return "Allow";
case HTTP_HEADER_KEY_AUTHORIZATION:
return "Authorization";
case HTTP_HEADER_KEY_CACHE_CONTROL:
return "Cache-Control";
case HTTP_HEADER_KEY_CONNECTION:
return "Connection";
case HTTP_HEADER_KEY_CONTENT_DISPOSITION:
return "Content-Disposition";
case HTTP_HEADER_KEY_CONTENT_ENCODING:
return "Content-Encoding";
case HTTP_HEADER_KEY_CONTENT_LANGUAGE:
return "Content-Language";
case HTTP_HEADER_KEY_CONTENT_LENGTH:
return "Content-Length";
case HTTP_HEADER_KEY_CONTENT_LOCATION:
return "Content-Location";
case HTTP_HEADER_KEY_CONTENT_MD5:
return "Content-Md5";
case HTTP_HEADER_KEY_CONTENT_RANGE:
return "Content-Range";
case HTTP_HEADER_KEY_CONTENT_TYPE:
return "Content-Type";
case HTTP_HEADER_KEY_COOKIE:
return "Cookie";
case HTTP_HEADER_KEY_DATE:
return "Date";
case HTTP_HEADER_KEY_ETAG:
return "Etag";
case HTTP_HEADER_KEY_EXPECT:
return "Expect";
case HTTP_HEADER_KEY_EXPIRES:
return "Expires";
case HTTP_HEADER_KEY_FROM:
return "From";
case HTTP_HEADER_KEY_IF_MATCH:
return "If-Match";
case HTTP_HEADER_KEY_IF_MODIFIED_SINCE:
return "If-Modified-Since";
case HTTP_HEADER_KEY_IF_NONE_MATCH:
return "If-None-Match";
case HTTP_HEADER_KEY_IF_RANGE:
return "If-Range";
case HTTP_HEADER_KEY_IF_UNMODIFIED_SINCE:
return "If-Unmodified-Since";
case HTTP_HEADER_KEY_LAST_MODIFIED:
return "Last-Modified";
case HTTP_HEADER_KEY_LINK:
return "Link";
case HTTP_HEADER_KEY_LOCATION:
return "Location";
case HTTP_HEADER_KEY_MAX_FORWARDS:
return "Max-Forwards";
case HTTP_HEADER_KEY_ORIGIN:
return "Origin";
case HTTP_HEADER_KEY_PRAGMA:
return "Pragma";
case HTTP_HEADER_KEY_PROXY_AUTHENTICATE:
return "Proxy-Authenticate";
case HTTP_HEADER_KEY_PROXY_AUTHORIZATION:
return "Proxy-Authorization";
case HTTP_HEADER_KEY_RANGE:
return "Range";
case HTTP_HEADER_KEY_REFERER:
return "Referer";
case HTTP_HEADER_KEY_RETRY_AFTER:
return "Retry-After";
case HTTP_HEADER_KEY_SERVER:
return "Server";
case HTTP_HEADER_KEY_SET_COOKIE:
return "Set-Cookie";
case HTTP_HEADER_KEY_STRICT_TRANSPORT_SECURITY:
return "Strict-Transport-Security";
case HTTP_HEADER_KEY_TE:
return "Te";
case HTTP_HEADER_KEY_TRAILER:
return "Trailer";
case HTTP_HEADER_KEY_TRANSFER_ENCODING:
return "Transfer-Encoding";
case HTTP_HEADER_KEY_UPGRADE:
return "Upgrade";
case HTTP_HEADER_KEY_VARY:
return "Vary";
case HTTP_HEADER_KEY_VIA:
return "Via";
case HTTP_HEADER_KEY_WARNING:
return "Warning";
case HTTP_HEADER_KEY_WWW_AUTHENTICATE:
return "Www-Authenticate";
case HTTP_HEADER_KEY_X_FORWARDED_FOR:
return "X-Forwarded-For";
case HTTP_HEADER_KEY_X_FORWARDED_HOST:
return "X-Forwarded-Host";
case HTTP_HEADER_KEY_X_FORWARDED_PROTO:
return "X-Forwarded-Proto";
case HTTP_HEADER_KEY_X_REQUESTED_WITH:
return "X-Requested-With";
case HTTP_HEADER_KEY_X_CSRF_TOKEN:
return "X-Csrf-Token";
case HTTP_HEADER_KEY_X_XSS_PROTECTION:
return "X-Xss-Protection";
case HTTP_HEADER_KEY_X_CONTENT_TYPE_OPTIONS:
return "X-Content-Type-Options";
case HTTP_HEADER_KEY_X_FRAME_OPTIONS:
return "X-Frame-Options";
case HTTP_HEADER_KEY_X_POWERED_BY:
return "X-Powered-By";
case HTTP_HEADER_KEY_X_UPLOAD_ID:
return "X-Upload-Id";
case HTTP_HEADER_KEY_X_RATE_LIMIT_LIMIT:
return "X-Rate-Limit-Limit";
case HTTP_HEADER_KEY_X_RATE_LIMIT_REMAINING:
return "X-Rate-Limit-Remaining";
case HTTP_HEADER_KEY_X_RATE_LIMIT_RESET:
return "X-Rate-Limit-Reset";
case HTTP_HEADER_KEY_X_UA_COMPATIBLE:
return "X-Ua-Compatible";
case HTTP_HEADER_KEY_X_DNS_PREFETCH_CONTROL:
return "X-Dns-Prefetch-Control";
case HTTP_HEADER_KEY_X_DOWNLOAD_OPTIONS:
return "X-Download-Options";
case HTTP_HEADER_KEY_X_PERMITTED_CROSS_DOMAIN_POLICIES:
return "X-Permitted-Cross-Domain-Policies";
case HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_ORIGIN:
return "Access-Control-Allow-Origin";
case HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_CREDENTIALS:
return "Access-Control-Allow-Credentials";
case HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_HEADERS:
return "Access-Control-Allow-Headers";
case HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_METHODS:
return "Access-Control-Allow-Methods";
case HTTP_HEADER_KEY_ACCESS_CONTROL_EXPOSE_HEADERS:
return "Access-Control-Expose-Headers";
case HTTP_HEADER_KEY_ACCESS_CONTROL_MAX_AGE:
return "Access-Control-Max-Age";
case HTTP_HEADER_KEY_ACCESS_CONTROL_REQUEST_HEADERS:
return "Access-Control-Request-Headers";
case HTTP_HEADER_KEY_ACCESS_CONTROL_REQUEST_METHOD:
return "Access-Control-Request-Method";
case HTTP_HEADER_KEY_CONTENT_SECURITY_POLICY:
return "Content-Security-Policy";
case HTTP_HEADER_KEY_PERMISSIONS_POLICY:
return "Permissions-Policy";
case HTTP_HEADER_KEY_REFERRER_POLICY:
return "Referrer-Policy";
case HTTP_HEADER_KEY_EXPECT_CT:
return "Expect-Ct";
case HTTP_HEADER_KEY_FEATURE_POLICY:
return "Feature-Policy";
case HTTP_HEADER_KEY_CROSS_ORIGIN_EMBEDDER_POLICY:
return "Cross-Origin-Embedder-Policy";
case HTTP_HEADER_KEY_CROSS_ORIGIN_OPENER_POLICY:
return "Cross-Origin-Opener-Policy";
case HTTP_HEADER_KEY_CROSS_ORIGIN_RESOURCE_POLICY:
return "Cross-Origin-Resource-Policy";
case HTTP_HEADER_KEY_SEC_WEBSOCKET_KEY:
return "Sec-Websocket-Key";
case HTTP_HEADER_KEY_SEC_WEBSOCKET_ACCEPT:
return "Sec-Websocket-Accept";
case HTTP_HEADER_KEY_SEC_WEBSOCKET_VERSION:
return "Sec-Websocket-Version";
case HTTP_HEADER_KEY_SEC_WEBSOCKET_PROTOCOL:
return "Sec-Websocket-Protocol";
case HTTP_HEADER_KEY_SEC_WEBSOCKET_EXTENSIONS:
return "Sec-Websocket-Extensions";
case HTTP_HEADER_KEY_ALT_SVC:
return "Alt-Svc";
case HTTP_HEADER_KEY_EARLY_DATA:
return "Early-Data";
case HTTP_HEADER_KEY_CF_CONNECTING_IP:
return "Cf-Connecting-Ip";
case HTTP_HEADER_KEY_CF_IPCOUNTRY:
return "Cf-Ipcountry";
case HTTP_HEADER_KEY_CF_RAY:
return "Cf-Ray";
case HTTP_HEADER_KEY_TRUE_CLIENT_IP:
return "True-Client-Ip";
case HTTP_HEADER_KEY_X_AMZ_CF_ID:
return "X-Amz-Cf-Id";
case HTTP_HEADER_KEY_X_AMZN_TRACE_ID:
return "X-Amzn-Trace-Id";
case HTTP_HEADER_KEY_DNT:
return "Dnt";
case HTTP_HEADER_KEY_SAVE_DATA:
return "Save-Data";
case HTTP_HEADER_KEY_DOWNLINK:
return "Downlink";
case HTTP_HEADER_KEY_ECT:
return "Ect";
case HTTP_HEADER_KEY_RTT:
return "Rtt";
case HTTP_HEADER_KEY_PURPOSE:
return "Purpose";
case HTTP_HEADER_KEY_SEC_FETCH_SITE:
return "Sec-Fetch-Site";
case HTTP_HEADER_KEY_SEC_FETCH_MODE:
return "Sec-Fetch-Mode";
case HTTP_HEADER_KEY_SEC_FETCH_USER:
return "Sec-Fetch-User";
case HTTP_HEADER_KEY_SEC_FETCH_DEST:
return "Sec-Fetch-Dest";
case HTTP_HEADER_KEY_SERVICE_WORKER_NAVIGATION_PRELOAD:
return "Service-Worker-Navigation-Preload";
case HTTP_HEADER_KEY_LAST_EVENT_ID:
return "Last-Event-Id";
case HTTP_HEADER_KEY_REPORT_TO:
return "Report-To";
case HTTP_HEADER_KEY_PRIORITY:
return "Priority";
case HTTP_HEADER_KEY_SIGNATURE:
return "Signature";
case HTTP_HEADER_KEY_SIGNATURE_KEY:
return "Signature-Key";
case HTTP_HEADER_KEY_FORWARDED:
return "Forwarded";
case HTTP_HEADER_KEY_ORIGINAL_METHOD:
return "Original-Method";
case HTTP_HEADER_KEY_ORIGINAL_URL:
return "Original-Url";
case HTTP_HEADER_KEY_ORIGINAL_HOST:
return "Original-Host";
default:
UNREACHABLE();
}
}
HttpHeaderKey http_header_key_text(const char* header) {
if (str_compare_caseless(header, "Host:") == 0) {
return HTTP_HEADER_KEY_HOST;
} else if (str_compare_caseless(header, "User-Agent:") == 0) {
return HTTP_HEADER_KEY_USER_AGENT;
} else if (str_compare_caseless(header, "Accept:") == 0) {
return HTTP_HEADER_KEY_ACCEPT;
} else if (str_compare_caseless(header, "Accept-Charset:") == 0) {
return HTTP_HEADER_KEY_ACCEPT_CHARSET;
} else if (str_compare_caseless(header, "Accept-Encoding:") == 0) {
return HTTP_HEADER_KEY_ACCEPT_ENCODING;
} else if (str_compare_caseless(header, "Accept-Language:") == 0) {
return HTTP_HEADER_KEY_ACCEPT_LANGUAGE;
} else if (str_compare_caseless(header, "Accept-Datetime:") == 0) {
return HTTP_HEADER_KEY_ACCEPT_DATETIME;
} else if (str_compare_caseless(header, "Accept-Patch:") == 0) {
return HTTP_HEADER_KEY_ACCEPT_PATCH;
} else if (str_compare_caseless(header, "Accept-Ranges:") == 0) {
return HTTP_HEADER_KEY_ACCEPT_RANGES;
} else if (str_compare_caseless(header, "Age:") == 0) {
return HTTP_HEADER_KEY_AGE;
} else if (str_compare_caseless(header, "Allow:") == 0) {
return HTTP_HEADER_KEY_ALLOW;
} else if (str_compare_caseless(header, "Authorization:") == 0) {
return HTTP_HEADER_KEY_AUTHORIZATION;
} else if (str_compare_caseless(header, "Cache-Control:") == 0) {
return HTTP_HEADER_KEY_CACHE_CONTROL;
} else if (str_compare_caseless(header, "Connection:") == 0) {
return HTTP_HEADER_KEY_CONNECTION;
} else if (str_compare_caseless(header, "Content-Disposition:") == 0) {
return HTTP_HEADER_KEY_CONTENT_DISPOSITION;
} else if (str_compare_caseless(header, "Content-Encoding:") == 0) {
return HTTP_HEADER_KEY_CONTENT_ENCODING;
} else if (str_compare_caseless(header, "Content-Language:") == 0) {
return HTTP_HEADER_KEY_CONTENT_LANGUAGE;
} else if (str_compare_caseless(header, "Content-Length:") == 0) {
return HTTP_HEADER_KEY_CONTENT_LENGTH;
} else if (str_compare_caseless(header, "Content-Location:") == 0) {
return HTTP_HEADER_KEY_CONTENT_LOCATION;
} else if (str_compare_caseless(header, "Content-Md5:") == 0) {
return HTTP_HEADER_KEY_CONTENT_MD5;
} else if (str_compare_caseless(header, "Content-Range:") == 0) {
return HTTP_HEADER_KEY_CONTENT_RANGE;
} else if (str_compare_caseless(header, "Content-Type:") == 0) {
return HTTP_HEADER_KEY_CONTENT_TYPE;
} else if (str_compare_caseless(header, "Cookie:") == 0) {
return HTTP_HEADER_KEY_COOKIE;
} else if (str_compare_caseless(header, "Date:") == 0) {
return HTTP_HEADER_KEY_DATE;
} else if (str_compare_caseless(header, "Etag:") == 0) {
return HTTP_HEADER_KEY_ETAG;
} else if (str_compare_caseless(header, "Expect:") == 0) {
return HTTP_HEADER_KEY_EXPECT;
} else if (str_compare_caseless(header, "Expires:") == 0) {
return HTTP_HEADER_KEY_EXPIRES;
} else if (str_compare_caseless(header, "From:") == 0) {
return HTTP_HEADER_KEY_FROM;
} else if (str_compare_caseless(header, "If-Match:") == 0) {
return HTTP_HEADER_KEY_IF_MATCH;
} else if (str_compare_caseless(header, "If-Modified-Since:") == 0) {
return HTTP_HEADER_KEY_IF_MODIFIED_SINCE;
} else if (str_compare_caseless(header, "If-None-Match:") == 0) {
return HTTP_HEADER_KEY_IF_NONE_MATCH;
} else if (str_compare_caseless(header, "If-Range:") == 0) {
return HTTP_HEADER_KEY_IF_RANGE;
} else if (str_compare_caseless(header, "If-Unmodified-Since:") == 0) {
return HTTP_HEADER_KEY_IF_UNMODIFIED_SINCE;
} else if (str_compare_caseless(header, "Last-Modified:") == 0) {
return HTTP_HEADER_KEY_LAST_MODIFIED;
} else if (str_compare_caseless(header, "Link:") == 0) {
return HTTP_HEADER_KEY_LINK;
} else if (str_compare_caseless(header, "Location:") == 0) {
return HTTP_HEADER_KEY_LOCATION;
} else if (str_compare_caseless(header, "Max-Forwards:") == 0) {
return HTTP_HEADER_KEY_MAX_FORWARDS;
} else if (str_compare_caseless(header, "Origin:") == 0) {
return HTTP_HEADER_KEY_ORIGIN;
} else if (str_compare_caseless(header, "Pragma:") == 0) {
return HTTP_HEADER_KEY_PRAGMA;
} else if (str_compare_caseless(header, "Proxy-Authenticate:") == 0) {
return HTTP_HEADER_KEY_PROXY_AUTHENTICATE;
} else if (str_compare_caseless(header, "Proxy-Authorization:") == 0) {
return HTTP_HEADER_KEY_PROXY_AUTHORIZATION;
} else if (str_compare_caseless(header, "Range:") == 0) {
return HTTP_HEADER_KEY_RANGE;
} else if (str_compare_caseless(header, "Referer:") == 0) {
return HTTP_HEADER_KEY_REFERER;
} else if (str_compare_caseless(header, "Retry-After:") == 0) {
return HTTP_HEADER_KEY_RETRY_AFTER;
} else if (str_compare_caseless(header, "Server:") == 0) {
return HTTP_HEADER_KEY_SERVER;
} else if (str_compare_caseless(header, "Set-Cookie:") == 0) {
return HTTP_HEADER_KEY_SET_COOKIE;
} else if (str_compare_caseless(header, "Strict-Transport-Security:") == 0) {
return HTTP_HEADER_KEY_STRICT_TRANSPORT_SECURITY;
} else if (str_compare_caseless(header, "Te:") == 0) {
return HTTP_HEADER_KEY_TE;
} else if (str_compare_caseless(header, "Trailer:") == 0) {
return HTTP_HEADER_KEY_TRAILER;
} else if (str_compare_caseless(header, "Transfer-Encoding:") == 0) {
return HTTP_HEADER_KEY_TRANSFER_ENCODING;
} else if (str_compare_caseless(header, "Upgrade:") == 0) {
return HTTP_HEADER_KEY_UPGRADE;
} else if (str_compare_caseless(header, "Vary:") == 0) {
return HTTP_HEADER_KEY_VARY;
} else if (str_compare_caseless(header, "Via:") == 0) {
return HTTP_HEADER_KEY_VIA;
} else if (str_compare_caseless(header, "Warning:") == 0) {
return HTTP_HEADER_KEY_WARNING;
} else if (str_compare_caseless(header, "Www-Authenticate:") == 0) {
return HTTP_HEADER_KEY_WWW_AUTHENTICATE;
} else if (str_compare_caseless(header, "X-Forwarded-For:") == 0) {
return HTTP_HEADER_KEY_X_FORWARDED_FOR;
} else if (str_compare_caseless(header, "X-Forwarded-Host:") == 0) {
return HTTP_HEADER_KEY_X_FORWARDED_HOST;
} else if (str_compare_caseless(header, "X-Forwarded-Proto:") == 0) {
return HTTP_HEADER_KEY_X_FORWARDED_PROTO;
} else if (str_compare_caseless(header, "X-Requested-With:") == 0) {
return HTTP_HEADER_KEY_X_REQUESTED_WITH;
} else if (str_compare_caseless(header, "X-Csrf-Token:") == 0) {
return HTTP_HEADER_KEY_X_CSRF_TOKEN;
} else if (str_compare_caseless(header, "X-Xss-Protection:") == 0) {
return HTTP_HEADER_KEY_X_XSS_PROTECTION;
} else if (str_compare_caseless(header, "X-Content-Type-Options:") == 0) {
return HTTP_HEADER_KEY_X_CONTENT_TYPE_OPTIONS;
} else if (str_compare_caseless(header, "X-Frame-Options:") == 0) {
return HTTP_HEADER_KEY_X_FRAME_OPTIONS;
} else if (str_compare_caseless(header, "X-Powered-By:") == 0) {
return HTTP_HEADER_KEY_X_POWERED_BY;
} else if (str_compare_caseless(header, "X-Upload-Id:") == 0) {
return HTTP_HEADER_KEY_X_UPLOAD_ID;
} else if (str_compare_caseless(header, "X-Rate-Limit-Limit:") == 0) {
return HTTP_HEADER_KEY_X_RATE_LIMIT_LIMIT;
} else if (str_compare_caseless(header, "X-Rate-Limit-Remaining:") == 0) {
return HTTP_HEADER_KEY_X_RATE_LIMIT_REMAINING;
} else if (str_compare_caseless(header, "X-Rate-Limit-Reset:") == 0) {
return HTTP_HEADER_KEY_X_RATE_LIMIT_RESET;
} else if (str_compare_caseless(header, "X-Ua-Compatible:") == 0) {
return HTTP_HEADER_KEY_X_UA_COMPATIBLE;
} else if (str_compare_caseless(header, "X-Dns-Prefetch-Control:") == 0) {
return HTTP_HEADER_KEY_X_DNS_PREFETCH_CONTROL;
} else if (str_compare_caseless(header, "X-Download-Options:") == 0) {
return HTTP_HEADER_KEY_X_DOWNLOAD_OPTIONS;
} else if (str_compare_caseless(header, "X-Permitted-Cross-Domain-Policies:") == 0) {
return HTTP_HEADER_KEY_X_PERMITTED_CROSS_DOMAIN_POLICIES;
} else if (str_compare_caseless(header, "Access-Control-Allow-Origin:") == 0) {
return HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_ORIGIN;
} else if (str_compare_caseless(header, "Access-Control-Allow-Credentials:") == 0) {
return HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_CREDENTIALS;
} else if (str_compare_caseless(header, "Access-Control-Allow-Headers:") == 0) {
return HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_HEADERS;
} else if (str_compare_caseless(header, "Access-Control-Allow-Methods:") == 0) {
return HTTP_HEADER_KEY_ACCESS_CONTROL_ALLOW_METHODS;
} else if (str_compare_caseless(header, "Access-Control-Expose-Headers:") == 0) {
return HTTP_HEADER_KEY_ACCESS_CONTROL_EXPOSE_HEADERS;
} else if (str_compare_caseless(header, "Access-Control-Max-Age:") == 0) {
return HTTP_HEADER_KEY_ACCESS_CONTROL_MAX_AGE;
} else if (str_compare_caseless(header, "Access-Control-Request-Headers:") == 0) {
return HTTP_HEADER_KEY_ACCESS_CONTROL_REQUEST_HEADERS;
} else if (str_compare_caseless(header, "Access-Control-Request-Method:") == 0) {
return HTTP_HEADER_KEY_ACCESS_CONTROL_REQUEST_METHOD;
} else if (str_compare_caseless(header, "Content-Security-Policy:") == 0) {
return HTTP_HEADER_KEY_CONTENT_SECURITY_POLICY;
} else if (str_compare_caseless(header, "Permissions-Policy:") == 0) {
return HTTP_HEADER_KEY_PERMISSIONS_POLICY;
} else if (str_compare_caseless(header, "Referrer-Policy:") == 0) {
return HTTP_HEADER_KEY_REFERRER_POLICY;
} else if (str_compare_caseless(header, "Expect-Ct:") == 0) {
return HTTP_HEADER_KEY_EXPECT_CT;
} else if (str_compare_caseless(header, "Feature-Policy:") == 0) {
return HTTP_HEADER_KEY_FEATURE_POLICY;
} else if (str_compare_caseless(header, "Cross-Origin-Embedder-Policy:") == 0) {
return HTTP_HEADER_KEY_CROSS_ORIGIN_EMBEDDER_POLICY;
} else if (str_compare_caseless(header, "Cross-Origin-Opener-Policy:") == 0) {
return HTTP_HEADER_KEY_CROSS_ORIGIN_OPENER_POLICY;
} else if (str_compare_caseless(header, "Cross-Origin-Resource-Policy:") == 0) {
return HTTP_HEADER_KEY_CROSS_ORIGIN_RESOURCE_POLICY;
} else if (str_compare_caseless(header, "Sec-Websocket-Key:") == 0) {
return HTTP_HEADER_KEY_SEC_WEBSOCKET_KEY;
} else if (str_compare_caseless(header, "Sec-Websocket-Accept:") == 0) {
return HTTP_HEADER_KEY_SEC_WEBSOCKET_ACCEPT;
} else if (str_compare_caseless(header, "Sec-Websocket-Version:") == 0) {
return HTTP_HEADER_KEY_SEC_WEBSOCKET_VERSION;
} else if (str_compare_caseless(header, "Sec-Websocket-Protocol:") == 0) {
return HTTP_HEADER_KEY_SEC_WEBSOCKET_PROTOCOL;
} else if (str_compare_caseless(header, "Sec-Websocket-Extensions:") == 0) {
return HTTP_HEADER_KEY_SEC_WEBSOCKET_EXTENSIONS;
} else if (str_compare_caseless(header, "Alt-Svc:") == 0) {
return HTTP_HEADER_KEY_ALT_SVC;
} else if (str_compare_caseless(header, "Early-Data:") == 0) {
return HTTP_HEADER_KEY_EARLY_DATA;
} else if (str_compare_caseless(header, "Cf-Connecting-Ip:") == 0) {
return HTTP_HEADER_KEY_CF_CONNECTING_IP;
} else if (str_compare_caseless(header, "Cf-Ipcountry:") == 0) {
return HTTP_HEADER_KEY_CF_IPCOUNTRY;
} else if (str_compare_caseless(header, "Cf-Ray:") == 0) {
return HTTP_HEADER_KEY_CF_RAY;
} else if (str_compare_caseless(header, "True-Client-Ip:") == 0) {
return HTTP_HEADER_KEY_TRUE_CLIENT_IP;
} else if (str_compare_caseless(header, "X-Amz-Cf-Id:") == 0) {
return HTTP_HEADER_KEY_X_AMZ_CF_ID;
} else if (str_compare_caseless(header, "X-Amzn-Trace-Id:") == 0) {
return HTTP_HEADER_KEY_X_AMZN_TRACE_ID;
} else if (str_compare_caseless(header, "Dnt:") == 0) {
return HTTP_HEADER_KEY_DNT;
} else if (str_compare_caseless(header, "Save-Data:") == 0) {
return HTTP_HEADER_KEY_SAVE_DATA;
} else if (str_compare_caseless(header, "Downlink:") == 0) {
return HTTP_HEADER_KEY_DOWNLINK;
} else if (str_compare_caseless(header, "Ect:") == 0) {
return HTTP_HEADER_KEY_ECT;
} else if (str_compare_caseless(header, "Rtt:") == 0) {
return HTTP_HEADER_KEY_RTT;
} else if (str_compare_caseless(header, "Purpose:") == 0) {
return HTTP_HEADER_KEY_PURPOSE;
} else if (str_compare_caseless(header, "Sec-Fetch-Site:") == 0) {
return HTTP_HEADER_KEY_SEC_FETCH_SITE;
} else if (str_compare_caseless(header, "Sec-Fetch-Mode:") == 0) {
return HTTP_HEADER_KEY_SEC_FETCH_MODE;
} else if (str_compare_caseless(header, "Sec-Fetch-User:") == 0) {
return HTTP_HEADER_KEY_SEC_FETCH_USER;
} else if (str_compare_caseless(header, "Sec-Fetch-Dest:") == 0) {
return HTTP_HEADER_KEY_SEC_FETCH_DEST;
} else if (str_compare_caseless(header, "Service-Worker-Navigation-Preload:") == 0) {
return HTTP_HEADER_KEY_SERVICE_WORKER_NAVIGATION_PRELOAD;
} else if (str_compare_caseless(header, "Last-Event-Id:") == 0) {
return HTTP_HEADER_KEY_LAST_EVENT_ID;
} else if (str_compare_caseless(header, "Report-To:") == 0) {
return HTTP_HEADER_KEY_REPORT_TO;
} else if (str_compare_caseless(header, "Priority:") == 0) {
return HTTP_HEADER_KEY_PRIORITY;
} else if (str_compare_caseless(header, "Signature:") == 0) {
return HTTP_HEADER_KEY_SIGNATURE;
} else if (str_compare_caseless(header, "Signature-Key:") == 0) {
return HTTP_HEADER_KEY_SIGNATURE_KEY;
} else if (str_compare_caseless(header, "Forwarded:") == 0) {
return HTTP_HEADER_KEY_FORWARDED;
} else if (str_compare_caseless(header, "Original-Method:") == 0) {
return HTTP_HEADER_KEY_ORIGINAL_METHOD;
} else if (str_compare_caseless(header, "Original-Url:") == 0) {
return HTTP_HEADER_KEY_ORIGINAL_URL;
} else if (str_compare_caseless(header, "Original-Host:") == 0) {
return HTTP_HEADER_KEY_ORIGINAL_HOST;
}
return HTTP_HEADER_KEY_UNKNOWN;
}
#endif

View File

@ -12,6 +12,7 @@
#include "../stdlib/Types.h"
#include "../compiler/CompilerUtils.h"
#include "../architecture/Intrinsics.h"
#include "../thread/ThreadDefines.h"
#include "../utils/StringUtils.h"
#include "../utils/TimeUtils.h"
@ -60,6 +61,9 @@ struct LogMemory {
uint64 size;
uint64 pos;
// @performance Should I use spinlock?
mutex mtx;
};
static LogMemory* _log_memory = NULL;
@ -102,7 +106,6 @@ struct LogDataArray{
LogData data[LOG_DATA_ARRAY];
};
// @bug This needs to be thread safe
byte* log_get_memory() noexcept
{
if (_log_memory->pos + MAX_LOG_LENGTH > _log_memory->size) {
@ -118,7 +121,6 @@ byte* log_get_memory() noexcept
}
// @performance This should only be called async to avoid blocking (e.g. render loop)
// @bug This needs to be thread safe
void log_to_file()
{
// we don't log an empty log pool
@ -156,17 +158,20 @@ void log_flush()
return;
}
mutex_lock(&_log_memory->mtx);
log_to_file();
_log_memory->pos = 0;
mutex_unlock(&_log_memory->mtx);
}
// @bug This needs to be thread safe
void log(const char* str, const char* file, const char* function, int32 line)
{
if (!_log_memory) {
return;
}
mutex_lock(&_log_memory->mtx);
int32 len = str_length(str);
while (len > 0) {
LogMessage* msg = (LogMessage *) log_get_memory();
@ -188,8 +193,8 @@ void log(const char* str, const char* file, const char* function, int32 line)
#if DEBUG || VERBOSE
// In debug mode we always output the log message to the debug console
char time_str[9];
format_time_hh_mm_ss(time_str, msg->time / 1000000ULL);
char time_str[13];
format_time_hh_mm_ss_ms(time_str, msg->time / 1000ULL);
compiler_debug_print(time_str);
compiler_debug_print(" ");
compiler_debug_print(msg->message);
@ -201,9 +206,10 @@ void log(const char* str, const char* file, const char* function, int32 line)
_log_memory->pos = 0;
}
}
mutex_unlock(&_log_memory->mtx);
}
// @bug This needs to be thread safe
void log(const char* format, LogDataArray data, const char* file, const char* function, int32 line)
{
if (!_log_memory) {
@ -217,6 +223,8 @@ void log(const char* format, LogDataArray data, const char* file, const char* fu
ASSERT_SIMPLE(str_length(format) + str_length(file) + str_length(function) + 50 < MAX_LOG_LENGTH);
mutex_lock(&_log_memory->mtx);
LogMessage* msg = (LogMessage *) log_get_memory();
msg->file = file;
msg->function = function;
@ -279,8 +287,8 @@ void log(const char* format, LogDataArray data, const char* file, const char* fu
#if DEBUG || VERBOSE
// In debug mode we always output the log message to the debug console
char time_str[9];
format_time_hh_mm_ss(time_str, msg->time / 1000000ULL);
char time_str[13];
format_time_hh_mm_ss_ms(time_str, msg->time / 1000ULL);
compiler_debug_print(time_str);
compiler_debug_print(" ");
compiler_debug_print(msg->message);
@ -291,6 +299,8 @@ void log(const char* format, LogDataArray data, const char* file, const char* fu
log_to_file();
_log_memory->pos = 0;
}
mutex_unlock(&_log_memory->mtx);
}
#define LOG_TO_FILE() log_to_file()

View File

@ -156,7 +156,7 @@ struct PerformanceProfiler {
if (this->auto_log) {
if (this->info_msg && this->info_msg[0]) {
LOG_2(
"-PERF %s (%s): %n cycles",
"[PERF] %s (%s): %n cycles",
{
{LOG_DATA_CHAR_STR, (void *) perf->name},
{LOG_DATA_CHAR_STR, (void *) this->info_msg},
@ -165,7 +165,7 @@ struct PerformanceProfiler {
);
} else {
LOG_2(
"-PERF %s: %n cycles",
"[PERF] %s: %n cycles",
{
{LOG_DATA_CHAR_STR, (void *) perf->name},
{LOG_DATA_INT64, (void *) &perf->total_cycle},

View File

@ -36,7 +36,7 @@ void buffer_alloc(BufferMemory* buf, uint64 size, int32 alignment = 64)
{
ASSERT_SIMPLE(size);
PROFILE(PROFILE_BUFFER_ALLOC, NULL, false, true);
LOG_1("Allocating BufferMemory: %n B", {{LOG_DATA_UINT64, &size}});
LOG_1("[INFO] Allocating BufferMemory: %n B", {{LOG_DATA_UINT64, &size}});
buf->memory = alignment < 2
? (byte *) platform_alloc(size)

View File

@ -44,7 +44,7 @@ void chunk_alloc(ChunkMemory* buf, uint32 count, uint32 chunk_size, int32 alignm
ASSERT_SIMPLE(chunk_size);
ASSERT_SIMPLE(count);
PROFILE(PROFILE_CHUNK_ALLOC, NULL, false, true);
LOG_1("Allocating ChunkMemory");
LOG_1("[INFO] Allocating ChunkMemory");
chunk_size = ROUND_TO_NEAREST(chunk_size, alignment);
@ -353,7 +353,7 @@ void chunk_free_elements(ChunkMemory* buf, uint64 element, uint32 element_count
inline
int64 chunk_dump(const ChunkMemory* buf, byte* data)
{
LOG_1("Dump ChunkMemory");
LOG_1("[INFO] Dump ChunkMemory");
byte* start = data;
// Count
@ -381,7 +381,7 @@ int64 chunk_dump(const ChunkMemory* buf, byte* data)
memcpy(data, buf->memory, buf->size);
data += buf->size;
LOG_1("Dumped ChunkMemory: %n B", {{LOG_DATA_UINT64, (void *) &buf->size}});
LOG_1("[INFO] Dumped ChunkMemory: %n B", {{LOG_DATA_UINT64, (void *) &buf->size}});
return data - start;
}
@ -389,7 +389,7 @@ int64 chunk_dump(const ChunkMemory* buf, byte* data)
inline
int64 chunk_load(ChunkMemory* buf, const byte* data)
{
LOG_1("Loading ChunkMemory");
LOG_1("[INFO] Loading ChunkMemory");
// Count
buf->count = SWAP_ENDIAN_LITTLE(*((uint32 *) data));
@ -416,7 +416,7 @@ int64 chunk_load(ChunkMemory* buf, const byte* data)
buf->free = (uint64 *) (buf->memory + buf->count * buf->chunk_size);
LOG_1("Loaded ChunkMemory: %n B", {{LOG_DATA_UINT64, &buf->size}});
LOG_1("[INFO] Loaded ChunkMemory: %n B", {{LOG_DATA_UINT64, &buf->size}});
return buf->size;
}

View File

@ -47,7 +47,7 @@ void ring_alloc(RingMemory* ring, uint64 size, uint32 alignment = 64)
{
ASSERT_SIMPLE(size);
PROFILE(PROFILE_RING_ALLOC, NULL, false, true);
LOG_1("Allocating RingMemory: %n B", {{LOG_DATA_UINT64, &size}});
LOG_1("[INFO] Allocating RingMemory: %n B", {{LOG_DATA_UINT64, &size}});
ring->memory = alignment < 2
? (byte *) platform_alloc(size)

View File

@ -40,7 +40,7 @@ void thrd_chunk_alloc(ThreadedChunkMemory* buf, uint32 count, uint32 chunk_size,
ASSERT_SIMPLE(chunk_size);
ASSERT_SIMPLE(count);
PROFILE(PROFILE_CHUNK_ALLOC, NULL, false, true);
LOG_1("Allocating ChunkMemory");
LOG_1("[INFO] Allocating ChunkMemory");
chunk_size = ROUND_TO_NEAREST(chunk_size, alignment);
@ -66,7 +66,7 @@ void thrd_chunk_alloc(ThreadedChunkMemory* buf, uint32 count, uint32 chunk_size,
memset(buf->memory, 0, buf->size);
mutex_init(&buf->lock, NULL);
LOG_1("Allocated ChunkMemory: %n B", {{LOG_DATA_UINT64, &buf->size}});
LOG_1("[INFO] Allocated ChunkMemory: %n B", {{LOG_DATA_UINT64, &buf->size}});
}
inline
@ -280,6 +280,7 @@ void thrd_chunk_free_elements(ThreadedChunkMemory* buf, uint64 element, uint32 e
DEBUG_MEMORY_DELETE((uintptr_t) (buf->memory + element * buf->chunk_size), buf->chunk_size);
}
// @performance We can optimize it by checking if we can just append additional chunks if they are free
inline
int32 thrd_chunk_resize(ThreadedChunkMemory* buf, int32 element_id, uint32 elements_old, uint32 elements_new) noexcept
{
@ -290,6 +291,11 @@ int32 thrd_chunk_resize(ThreadedChunkMemory* buf, int32 element_id, uint32 eleme
memcpy(data_new, data, buf->chunk_size * elements_old);
// @see performance remark above
//if (element_id != chunk_id) {
thrd_chunk_free_elements(buf, element_id, elements_old);
//}
return chunk_id;
}

View File

@ -36,7 +36,7 @@ struct ThreadedDataPool {
// This is a bit field that specifies which elements in the data pool are currently in use
alignas(8) atomic_64 uint64* used;
mutex mutex;
mutex mtx;
};
// INFO: A chunk count of 2^n is recommended for maximum performance

View File

@ -40,7 +40,7 @@ struct ThreadedQueue {
// We support both conditional locking and semaphore locking
// These values are not initialized and not used unless you use the queue
mutex mutex;
mutex mtx;
mutex_cond cond;
sem empty;
@ -56,7 +56,7 @@ void thrd_queue_alloc(ThreadedQueue* queue, uint32 element_count, uint32 element
queue->element_size = element_size;
mutex_init(&queue->mutex, NULL);
mutex_init(&queue->mtx, NULL);
coms_pthread_cond_init(&queue->cond, NULL);
coms_sem_init(&queue->empty, element_count);
@ -72,7 +72,7 @@ void thrd_queue_init(ThreadedQueue* queue, BufferMemory* buf, uint32 element_cou
queue->element_size = element_size;
mutex_init(&queue->mutex, NULL);
mutex_init(&queue->mtx, NULL);
coms_pthread_cond_init(&queue->cond, NULL);
coms_sem_init(&queue->empty, element_count);
@ -88,7 +88,7 @@ void thrd_queue_init(ThreadedQueue* queue, byte* buf, uint32 element_count, uint
queue->element_size = element_size;
mutex_init(&queue->mutex, NULL);
mutex_init(&queue->mtx, NULL);
coms_pthread_cond_init(&queue->cond, NULL);
coms_sem_init(&queue->empty, element_count);
@ -101,7 +101,7 @@ void thrd_queue_free(ThreadedQueue* queue)
ring_free((RingMemory *) queue);
coms_sem_destroy(&queue->empty);
coms_sem_destroy(&queue->full);
mutex_destroy(&queue->mutex);
mutex_destroy(&queue->mtx);
coms_pthread_cond_destroy(&queue->cond);
}
@ -110,7 +110,7 @@ inline
void thrd_queue_enqueue_unique_wait(ThreadedQueue* queue, const byte* data) noexcept
{
ASSERT_SIMPLE((uint64_t) data % 4 == 0);
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
byte* tail = queue->tail;
while (tail != queue->tail) {
@ -118,7 +118,7 @@ void thrd_queue_enqueue_unique_wait(ThreadedQueue* queue, const byte* data) noex
// @performance we could probably make this faster since we don't need to compare the entire range
if (is_equal(tail, data, queue->element_size) == 0) {
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
return;
}
@ -127,21 +127,21 @@ void thrd_queue_enqueue_unique_wait(ThreadedQueue* queue, const byte* data) noex
}
while (!ring_commit_safe((RingMemory *) queue, queue->element_size, queue->alignment)) {
coms_pthread_cond_wait(&queue->cond, &queue->mutex);
coms_pthread_cond_wait(&queue->cond, &queue->mtx);
}
byte* mem = ring_get_memory((RingMemory *) queue, queue->element_size, queue->alignment);
memcpy(mem, data, queue->element_size);
coms_pthread_cond_signal(&queue->cond);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
}
inline
void thrd_queue_enqueue_unique(ThreadedQueue* queue, const byte* data) noexcept
{
ASSERT_SIMPLE((uint64_t) data % 4 == 0);
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
byte* tail = queue->tail;
while (tail != queue->tail) {
@ -149,7 +149,7 @@ void thrd_queue_enqueue_unique(ThreadedQueue* queue, const byte* data) noexcept
// @performance we could probably make this faster since we don't need to compare the entire range
if (is_equal(tail, data, queue->element_size) == 0) {
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
return;
}
@ -158,7 +158,7 @@ void thrd_queue_enqueue_unique(ThreadedQueue* queue, const byte* data) noexcept
}
if (!ring_commit_safe((RingMemory *) queue, queue->element_size, queue->alignment)) {
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
return;
}
@ -167,17 +167,17 @@ void thrd_queue_enqueue_unique(ThreadedQueue* queue, const byte* data) noexcept
memcpy(mem, data, queue->element_size);
coms_pthread_cond_signal(&queue->cond);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
}
// Conditional Lock
inline
void thrd_queue_enqueue(ThreadedQueue* queue, const byte* data) noexcept
{
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
if (!ring_commit_safe((RingMemory *) queue, queue->element_size, queue->alignment)) {
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
return;
}
@ -186,32 +186,32 @@ void thrd_queue_enqueue(ThreadedQueue* queue, const byte* data) noexcept
memcpy(mem, data, queue->element_size);
coms_pthread_cond_signal(&queue->cond);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
}
inline
void thrd_queue_enqueue_wait(ThreadedQueue* queue, const byte* data) noexcept
{
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
while (!ring_commit_safe((RingMemory *) queue, queue->element_size, queue->alignment)) {
coms_pthread_cond_wait(&queue->cond, &queue->mutex);
coms_pthread_cond_wait(&queue->cond, &queue->mtx);
}
byte* mem = ring_get_memory((RingMemory *) queue, queue->element_size, queue->alignment);
memcpy(mem, data, queue->element_size);
coms_pthread_cond_signal(&queue->cond);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
}
inline
byte* thrd_queue_enqueue_start_wait(ThreadedQueue* queue) noexcept
{
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
while (!ring_commit_safe((RingMemory *) queue, queue->element_size, queue->alignment)) {
coms_pthread_cond_wait(&queue->cond, &queue->mutex);
coms_pthread_cond_wait(&queue->cond, &queue->mtx);
}
return ring_get_memory((RingMemory *) queue, queue->element_size, queue->alignment);
@ -221,7 +221,7 @@ inline
void thrd_queue_enqueue_end_wait(ThreadedQueue* queue) noexcept
{
coms_pthread_cond_signal(&queue->cond);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
}
inline
@ -232,9 +232,9 @@ bool thrd_queue_dequeue(ThreadedQueue* queue, byte* data) noexcept
}
// we do this twice because the first one is very fast but may return a false positive
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
if (queue->head == queue->tail) {
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
return false;
}
@ -247,25 +247,25 @@ bool thrd_queue_dequeue(ThreadedQueue* queue, byte* data) noexcept
ring_move_pointer((RingMemory *) queue, &queue->tail, queue->element_size, queue->alignment);
coms_pthread_cond_signal(&queue->cond);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
return true;
}
inline
bool thrd_queue_empty(ThreadedQueue* queue) noexcept {
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
bool is_empty = queue->head == queue->tail;
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
return is_empty;
}
inline
bool thrd_queue_full(ThreadedQueue* queue) noexcept {
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
bool is_full = !ring_commit_safe((RingMemory *) queue, queue->element_size, queue->alignment);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
return is_full;
}
@ -274,26 +274,26 @@ bool thrd_queue_full(ThreadedQueue* queue) noexcept {
inline
void thrd_queue_dequeue_wait(ThreadedQueue* queue, byte* data) noexcept
{
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
while (queue->head == queue->tail) {
coms_pthread_cond_wait(&queue->cond, &queue->mutex);
coms_pthread_cond_wait(&queue->cond, &queue->mtx);
}
memcpy(data, queue->tail, queue->element_size);
ring_move_pointer((RingMemory *) queue, &queue->tail, queue->element_size, queue->alignment);
coms_pthread_cond_signal(&queue->cond);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
}
inline
byte* thrd_queue_dequeue_start_wait(ThreadedQueue* queue) noexcept
{
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
while (queue->head == queue->tail) {
coms_pthread_cond_wait(&queue->cond, &queue->mutex);
coms_pthread_cond_wait(&queue->cond, &queue->mtx);
}
return queue->tail;
@ -305,7 +305,7 @@ void thrd_queue_dequeue_end_wait(ThreadedQueue* queue) noexcept
ring_move_pointer((RingMemory *) queue, &queue->tail, queue->element_size, queue->alignment);
coms_pthread_cond_signal(&queue->cond);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
}
// Semaphore Lock
@ -313,12 +313,12 @@ inline
void thrd_queue_enqueue_sem_wait(ThreadedQueue* queue, const byte* data) noexcept
{
coms_sem_wait(&queue->empty);
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
byte* mem = ring_get_memory((RingMemory *) queue, queue->element_size, queue->alignment);
memcpy(mem, data, queue->element_size);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
coms_sem_post(&queue->full);
}
@ -329,12 +329,12 @@ bool thrd_queue_enqueue_semimedwait(ThreadedQueue* queue, const byte* data, uint
return false;
}
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
byte* mem = ring_get_memory((RingMemory *) queue, queue->element_size, queue->alignment);
memcpy(mem, data, queue->element_size);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
coms_sem_post(&queue->full);
return true;
@ -344,7 +344,7 @@ inline
byte* thrd_queue_enqueue_start_sem_wait(ThreadedQueue* queue) noexcept
{
coms_sem_wait(&queue->empty);
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
return ring_get_memory((RingMemory *) queue, queue->element_size, queue->alignment);
}
@ -352,7 +352,7 @@ byte* thrd_queue_enqueue_start_sem_wait(ThreadedQueue* queue) noexcept
inline
void thrd_queue_enqueue_end_sem_wait(ThreadedQueue* queue) noexcept
{
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
coms_sem_post(&queue->full);
}
@ -360,12 +360,12 @@ inline
byte* thrd_queue_dequeue_sem_wait(ThreadedQueue* queue, byte* data) noexcept
{
coms_sem_wait(&queue->full);
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
memcpy(data, queue->tail, queue->element_size);
ring_move_pointer((RingMemory *) queue, &queue->tail, queue->element_size, queue->alignment);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
coms_sem_post(&queue->empty);
}
@ -376,12 +376,12 @@ bool thrd_queue_dequeue_semimedwait(ThreadedQueue* queue, byte* data, uint64 wai
return false;
}
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
memcpy(data, queue->tail, queue->element_size);
ring_move_pointer((RingMemory *) queue, &queue->tail, queue->element_size, queue->alignment);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
coms_sem_post(&queue->empty);
return true;
@ -391,7 +391,7 @@ inline
byte* thrd_queue_dequeue_start_sem_wait(ThreadedQueue* queue) noexcept
{
coms_sem_wait(&queue->full);
mutex_lock(&queue->mutex);
mutex_lock(&queue->mtx);
return queue->tail;
}
@ -401,7 +401,7 @@ void thrd_queue_dequeue_end_sem_wait(ThreadedQueue* queue) noexcept
{
ring_move_pointer((RingMemory *) queue, &queue->tail, queue->element_size, queue->alignment);
mutex_unlock(&queue->mutex);
mutex_unlock(&queue->mtx);
coms_sem_post(&queue->empty);
}

44
module/WebApp.h Normal file
View File

@ -0,0 +1,44 @@
#ifndef COMS_MODULE_WEB_APP_H
#define COMS_MODULE_WEB_APP_H
#include "../system/Library.cpp"
#include "../log/DebugContainer.h"
#include "../http/HttpRequest.h"
#include "../http/HttpResponse.h"
#include "../network/SocketConnection.h"
struct ApplicationData;
typedef void dll_app_load(DebugContainer*);
// INFO: Requires pointer to pointer since we may want to resize the request or response object
// The response object is intuitive but even the response object might need this if we are parsing a file upload
typedef void dll_app_handle_request(void*, SocketConnection*, HttpRequest**, HttpResponse**);
struct WebApp {
dll_app_load* app_load;
dll_app_handle_request* app_handle_request;
// @todo It's a little bit nasty, that we have this thing here
// Remember lib is now back referencing to *codetable
// It would probably better to have a library manager that holds all libs
Library lib;
};
// null || '' = must not be set
// * any
// actual name
struct WebAppMatchPattern {
char subdomain[16];
char domain[24];
char path[24];
int32 app_id;
};
const char* web_app_function_name_table[] = {
"app_load",
"app_handle_request",
};
#endif

View File

@ -1,9 +1,19 @@
#ifndef COMS_MODULE_WEB_H
#define COMS_MODULE_WEB_H
#include "../stdlib/Types.h"
#include "../system/Library.cpp"
#include "../log/DebugContainer.h"
#include "../memory/RingMemory.h"
#include "../../models/ApplicationSettings.h"
struct ApplicationData;
typedef void dll_load_module(DebugContainer*);
typedef void dll_load_functions(void*);
struct WebModule {
dll_load_module* load_module;
dll_load_functions* load_functions;
};
#endif

View File

@ -28,6 +28,9 @@ struct SocketConnection {
socketid sd;
sockaddr_in6 addr;
uint16 port;
// used for epoll
int32 fd;
};
#endif

View File

@ -43,7 +43,7 @@ void* platform_alloc(size_t size)
DEBUG_MEMORY_INIT((uintptr_t) ptr, size);
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
LOG_3("Allocated %n B", {{LOG_DATA_UINT64, &size}});
LOG_3("[INFO] Allocated %n B", {{LOG_DATA_UINT64, &size}});
return (void *) ((uintptr_t) ptr + sizeof(size_t));
}
@ -73,7 +73,7 @@ void* platform_alloc_aligned(size_t size, int32 alignment)
DEBUG_MEMORY_INIT((uintptr_t) aligned_ptr, size);
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
LOG_3("Aligned allocated %n B", {{LOG_DATA_UINT64, &size}});
LOG_3("[INFO] Aligned allocated %n B", {{LOG_DATA_UINT64, &size}});
return aligned_ptr;
}
@ -117,7 +117,7 @@ void* platform_shared_alloc(int32* fd, const char* name, size_t size)
DEBUG_MEMORY_INIT((uintptr_t) shm_ptr, size);
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
LOG_3("Shared allocated %n B", {{LOG_DATA_UINT64, &size}});
LOG_3("[INFO] Shared allocated %n B", {{LOG_DATA_UINT64, &size}});
return (void *) ((uintptr_t) shm_ptr + sizeof(size_t));
}
@ -132,7 +132,7 @@ void* platform_shared_open(int32* fd, const char* name, size_t size)
void* shm_ptr = mmap(NULL, size, PROT_READ, MAP_SHARED, *fd, 0);
ASSERT_SIMPLE(shm_ptr);
LOG_3("Shared opened %n B", {{LOG_DATA_UINT64, &size}});
LOG_3("[INFO] Shared opened %n B", {{LOG_DATA_UINT64, &size}});
*((size_t *) shm_ptr) = size;

View File

@ -160,7 +160,7 @@ FileHandle file_append_handle(const char* path) {
}
inline
bool file_exists(const char* path) {
bool file_exists(const char* path) noexcept {
PROFILE(PROFILE_FILE_UTILS, path, false, true);
struct stat buffer;
@ -390,14 +390,13 @@ FileHandle file_read_handle(const char* path) {
}
if (fd == -1) {
perror("open");
return -1;
}
return fd;
}
inline
FORCE_INLINE
void file_close_handle(FileHandle fp)
{
close(fp);

View File

@ -12,10 +12,11 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <netinet/tcp.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/epoll.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <fcntl.h>
@ -25,114 +26,219 @@
#include "../../../network/SocketConnection.h"
#include "../../../utils/EndianUtils.h"
void socket_non_blocking(SocketConnection* con)
inline
bool socket_non_blocking(SocketConnection* con)
{
int flags = fcntl(con->sd, F_GETFL, 0);
fcntl(con->sd, F_SETFL, flags | O_NONBLOCK);
int32 flags = fcntl(con->sd, F_GETFL, 0);
if (flags < 0) {
return false;
}
return fcntl(con->sd, F_SETFL, flags | O_NONBLOCK) >= 0;
}
inline
void socket_no_delay(SocketConnection* con)
{
int32 nodelay = 1;
setsockopt(con->sd, IPPROTO_TCP, TCP_NODELAY, (char*)&nodelay, sizeof(nodelay));
}
// WARNING: requires `sudo setcap cap_net_raw=eip /path/to/your_program`
bool socket_server_raw_create(SocketConnection* con) {
int32 socket_server_raw_create(SocketConnection* con) {
con->sd = socket(AF_INET6, SOCK_RAW, 255);
int32 flags;
if ((flags = fcntl(con->sd, F_GETFL, 0)) < 0) {
close(con->sd);
con->sd = 0;
return false;
}
if (fcntl(con->sd, F_SETFL, flags | O_NONBLOCK) < 0) {
close(con->sd);
con->sd = 0;
return false;
}
memset(&con->addr, 0, sizeof(con->addr));
con->addr.sin6_family = AF_INET6;
con->addr.sin6_addr = in6addr_any;
con->addr.sin6_port = htons(con->port);
if (bind(con->sd, (sockaddr *) &con->addr, sizeof(con->addr)) < 0) {
close(con->sd);
con->sd = 0;
return false;
}
return true;
}
// WARNING: requires `sudo setcap cap_net_raw=eip /path/to/your_program`
bool socket_server_udp_raw_create(SocketConnection* con) {
con->sd = socket(AF_INET6, SOCK_RAW, IPPROTO_UDP);
int32 flags;
if ((flags = fcntl(con->sd, F_GETFL, 0)) < 0) {
close(con->sd);
con->sd = 0;
return false;
}
if (fcntl(con->sd, F_SETFL, flags | O_NONBLOCK) < 0) {
close(con->sd);
con->sd = 0;
return false;
}
memset(&con->addr, 0, sizeof(con->addr));
con->addr.sin6_family = AF_INET6;
con->addr.sin6_addr = in6addr_any;
con->addr.sin6_port = htons(con->port);
if (bind(con->sd, (sockaddr *) &con->addr, sizeof(con->addr)) < 0) {
close(con->sd);
con->sd = 0;
return false;
}
return true;
}
bool socket_server_udp_create(SocketConnection* con) {
con->sd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP);
int32 flags;
if ((flags = fcntl(con->sd, F_GETFL, 0)) < 0) {
close(con->sd);
con->sd = 0;
return false;
}
if (fcntl(con->sd, F_SETFL, flags | O_NONBLOCK) < 0) {
close(con->sd);
con->sd = 0;
return false;
}
memset(&con->addr, 0, sizeof(con->addr));
con->addr.sin6_family = AF_INET6;
con->addr.sin6_addr = in6addr_any;
con->addr.sin6_port = htons(con->port);
if (bind(con->sd, (sockaddr *) &con->addr, sizeof(con->addr)) < 0) {
close(con->sd);
con->sd = 0;
return false;
}
return true;
}
bool socket_server_http_create(SocketConnection* con)
{
con->sd = socket(AF_INET6, SOCK_STREAM, 0);
if (con->sd < 0) {
con->sd = 0;
return false;
LOG_1("[ERROR] Socket failed");
return -1;
}
int32 flags;
if ((flags = fcntl(con->sd, F_GETFL, 0)) < 0) {
close(con->sd);
con->sd = 0;
LOG_1("[ERROR] Fcntl F_GETFL failed");
return -2;
}
if (fcntl(con->sd, F_SETFL, flags | O_NONBLOCK) < 0) {
close(con->sd);
con->sd = 0;
LOG_1("[ERROR] Fcntl O_NONBLOCK failed");
return -3;
}
memset(&con->addr, 0, sizeof(con->addr));
con->addr.sin6_family = AF_INET6;
con->addr.sin6_addr = in6addr_any;
con->addr.sin6_port = htons(con->port);
if (bind(con->sd, (sockaddr *) &con->addr, sizeof(con->addr)) < 0) {
close(con->sd);
con->sd = 0;
if (errno == EADDRINUSE) {
LOG_1("[ERROR] Port already in use");
return -4;
} else {
LOG_1("[ERROR] Binding failed");
return -5;
}
}
return 0;
}
// WARNING: requires `sudo setcap cap_net_raw=eip /path/to/your_program`
int32 socket_server_udp_raw_create(SocketConnection* con) {
con->sd = socket(AF_INET6, SOCK_RAW, IPPROTO_UDP);
if (con->sd < 0) {
con->sd = 0;
LOG_1("[ERROR] Socket failed");
return -1;
}
int32 flags;
if ((flags = fcntl(con->sd, F_GETFL, 0)) < 0) {
close(con->sd);
con->sd = 0;
LOG_1("[ERROR] Fcntl F_GETFL failed");
return -2;
}
if (fcntl(con->sd, F_SETFL, flags | O_NONBLOCK) < 0) {
close(con->sd);
con->sd = 0;
LOG_1("[ERROR] Fcntl O_NONBLOCK failed");
return -3;
}
memset(&con->addr, 0, sizeof(con->addr));
con->addr.sin6_family = AF_INET6;
con->addr.sin6_addr = in6addr_any;
con->addr.sin6_port = htons(con->port);
if (bind(con->sd, (sockaddr *) &con->addr, sizeof(con->addr)) < 0) {
close(con->sd);
con->sd = 0;
if (errno == EADDRINUSE) {
LOG_1("[ERROR] Port already in use");
return -4;
} else {
LOG_1("[ERROR] Binding failed");
return -5;
}
}
return 0;
}
int32 socket_server_udp_create(SocketConnection* con) {
con->sd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP);
if (con->sd < 0) {
con->sd = 0;
LOG_1("[ERROR] Socket failed");
return -1;
}
int32 flags;
if ((flags = fcntl(con->sd, F_GETFL, 0)) < 0) {
close(con->sd);
con->sd = 0;
LOG_1("[ERROR] Fcntl F_GETFL failed");
return -2;
}
if (fcntl(con->sd, F_SETFL, flags | O_NONBLOCK) < 0) {
close(con->sd);
con->sd = 0;
LOG_1("[ERROR] Fcntl O_NONBLOCK failed");
return -3;
}
memset(&con->addr, 0, sizeof(con->addr));
con->addr.sin6_family = AF_INET6;
con->addr.sin6_addr = in6addr_any;
con->addr.sin6_port = htons(con->port);
if (bind(con->sd, (sockaddr *) &con->addr, sizeof(con->addr)) < 0) {
close(con->sd);
con->sd = 0;
if (errno == EADDRINUSE) {
LOG_1("[ERROR] Port already in use");
return -4;
} else {
LOG_1("[ERROR] Binding failed");
return -5;
}
}
return 0;
}
int32 socket_server_http_create(SocketConnection* con, int32 max_connections = 5)
{
con->sd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
if (con->sd < 0) {
con->sd = 0;
LOG_1("[ERROR] Socket failed");
return -1;
}
int32 opt = 1;
setsockopt(con->sd, SOL_SOCKET, SO_REUSEADDR, (const char*) &opt, sizeof(opt));
if (setsockopt(con->sd, SOL_SOCKET, SO_REUSEADDR, (const char*) &opt, sizeof(opt)) < 0) {
close(con->sd);
con->sd = 0;
LOG_1("[ERROR] Setsockopt SO_REUSEADDR failed");
return -2;
}
// For multi-process servers
opt = 1;
if (setsockopt(con->sd, SOL_SOCKET, SO_REUSEPORT, (const char*) &opt, sizeof(opt)) < 0) {
close(con->sd);
con->sd = 0;
LOG_1("[ERROR] Setsockopt SO_REUSEPORT failed");
return -1;
}
opt = 1;
if (setsockopt(con->sd, SOL_SOCKET, SO_KEEPALIVE, (const char*) &opt, sizeof(opt)) < 0) {
close(con->sd);
con->sd = 0;
LOG_1("[ERROR] Setsockopt SO_KEEPALIVE failed");
return -1;
}
opt = 1;
if (setsockopt(con->sd, IPPROTO_TCP, TCP_NODELAY, (const char*) &opt, sizeof(opt)) < 0) {
close(con->sd);
con->sd = 0;
LOG_1("[ERROR] Setsockopt TCP_NODELAY failed");
return -1;
}
/*
// Configure IPv6 to also accept IPv4 connections
opt = 0;
setsockopt(con->sd, IPPROTO_IPV6, IPV6_V6ONLY, (const char*) &opt, sizeof(opt));
*/
memset(&con->addr, 0, sizeof(con->addr));
con->addr.sin6_family = AF_INET6;
@ -141,30 +247,61 @@ bool socket_server_http_create(SocketConnection* con)
if (bind(con->sd, (struct sockaddr *) &con->addr, sizeof(con->addr)) < 0) {
close(con->sd);
con->sd = 0;
return false;
if (errno == EADDRINUSE) {
LOG_1("[ERROR] Port already in use");
return -3;
} else {
LOG_1("[ERROR] Binding failed");
return -4;
}
}
if (listen(con->sd, 5) < 0) {
if (listen(con->sd, max_connections) < 0) {
close(con->sd);
con->sd = 0;
return false;
LOG_1("[ERROR] Listen failed");
return -5;
}
return true;
con->fd = epoll_create1(0);
if (con->fd < 0) {
close(con->sd);
LOG_1("[ERROR] Epoll failed");
return -6;
}
return 0;
}
bool socket_server_websocket_create(SocketConnection* con) {
int32 socket_server_websocket_create(SocketConnection* con) {
con->sd = socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP);
if (con->sd < 0) {
con->sd = 0;
LOG_1("[ERROR] Socket failed");
return -1;
}
int32 flags;
if ((flags = fcntl(con->sd, F_GETFL, 0)) < 0
|| fcntl(con->sd, F_SETFL, flags | O_NONBLOCK) < 0
) {
if ((flags = fcntl(con->sd, F_GETFL, 0)) < 0) {
close(con->sd);
con->sd = 0;
return false;
LOG_1("[ERROR] Fcntl F_GETFL failed");
return -2;
}
if (fcntl(con->sd, F_SETFL, flags | O_NONBLOCK) < 0) {
close(con->sd);
con->sd = 0;
LOG_1("[ERROR] Fcntl O_NONBLOCK failed");
return -3;
}
int opt = 1;
@ -178,16 +315,32 @@ bool socket_server_websocket_create(SocketConnection* con) {
if (bind(con->sd, (sockaddr*)&con->addr, sizeof(con->addr)) < 0) {
close(con->sd);
con->sd = 0;
return false;
if (errno == EADDRINUSE) {
LOG_1("[ERROR] Port already in use");
return -4;
} else {
LOG_1("[ERROR] Binding failed");
return -5;
}
}
if (listen(con->sd, SOMAXCONN) < 0) {
close(con->sd);
con->sd = 0;
return false;
LOG_1("[ERROR] Listen failed");
}
return true;
}
void socket_connection_close(SocketConnection* con) {
socket_close(con->sd);
if (con->fd) {
socket_close(con->fd);
}
}
#endif

View File

@ -12,8 +12,12 @@
#include "../../../stdlib/Types.h"
#include <netdb.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <sys/sendfile.h>
#include <fcntl.h>
void socket_close(int32 sd) {
shutdown(sd, SHUT_RDWR);
@ -23,4 +27,66 @@ void socket_close(int32 sd) {
#define socket_prepare() ((void)0)
#define socket_cleanup() ((void)0)
#define SOCKET_FILE_CHUNK_SIZE 4096
// @todo implement a version that supports compression (gz)
int32 socket_http_file_send(int32 client_sock, const char* file_path, const char* content_type = NULL) {
int32 file_fd;
struct stat file_stat;
int32 rc;
char header[SOCKET_FILE_CHUNK_SIZE];
int32 header_len;
file_fd = open(file_path, O_RDONLY);
if (file_fd == -1) {
LOG_1("[ERROR] Opening file");
return -1;
}
if (fstat(file_fd, &file_stat) < 0) {
LOG_1("[ERROR] Getting file stats");
close(file_fd);
return -1;
}
// Prepare HTTP headers
if (content_type == NULL) {
content_type = "application/octet-stream";
}
header_len = sprintf_fast(header, sizeof(header),
"HTTP/1.1 200 OK\r\n"
"Content-Type: %s\r\n"
"Content-Length: %l\r\n"
"Connection: close\r\n" // @todo recosnider better handling
"\r\n",
content_type, (int64) file_stat.st_size);
// Send HTTP headers
rc = send(client_sock, header, header_len, 0);
if (rc != header_len) {
LOG_1("[ERROR] Sending HTTP headers");
close(file_fd);
return -1;
}
off_t offset = 0;
ssize_t sent_bytes = sendfile(client_sock, file_fd, &offset, file_stat.st_size);
if (sent_bytes != file_stat.st_size) {
LOG_1("[ERROR] Sending file content");
close(file_fd);
return -1;
}
close(file_fd);
LOG_4("[INFO] sending file");
return 0;
}
#endif

View File

@ -23,16 +23,6 @@
#include "ThreadDefines.h"
#include "Atomic.h"
FORCE_INLINE
int32 futex_wait(volatile int32* futex, int32 val) {
return syscall(SYS_futex, futex, FUTEX_WAIT, val, NULL, NULL, 0);
}
FORCE_INLINE
int32 futex_wake(volatile int32* futex, int32 n) {
return syscall(SYS_futex, futex, FUTEX_WAKE, n, NULL, NULL, 0);
}
inline
int32 coms_pthread_create(coms_pthread_t* thread, void*, ThreadJobFunc start_routine, void* arg) {
if (thread == NULL || start_routine == NULL) {
@ -48,7 +38,7 @@ int32 coms_pthread_create(coms_pthread_t* thread, void*, ThreadJobFunc start_rou
thread->h = clone((int32 (*)(void*))start_routine, (void *) ((uintptr_t) thread->stack + stack_size), flags, arg);
if (thread->h == -1) {
LOG_1("Thread creation faild with error %d", {{LOG_DATA_INT32, &errno}});
LOG_1("Thread creation faild");
return 1;
}
@ -73,41 +63,6 @@ int32 coms_pthread_detach(coms_pthread_t) {
return 0;
}
FORCE_INLINE
int32 mutex_init(mutex* mutex, mutexattr_t*) {
return mutex == NULL ? 1 : 0;
}
FORCE_INLINE
int32 mutex_destroy(mutex* mutex) {
return mutex == NULL ? 1 : 0;
}
inline
int32 mutex_lock(mutex* mutex) {
if (mutex == NULL) {
return 1;
}
while (atomic_fetch_set_acquire(&mutex->futex, 1) != 0) {
futex_wait(&mutex->futex, 1);
}
return 0;
}
inline
int32 mutex_unlock(mutex* mutex) {
if (mutex == NULL) {
return 1;
}
atomic_set_release(&mutex->futex, 0);
futex_wake(&mutex->futex, 1);
return 0;
}
inline
int32 coms_pthread_cond_init(mutex_cond* cond, coms_pthread_condattr_t*) {
if (cond == NULL) {

View File

@ -13,6 +13,9 @@
// #include <unistd.h>
#include "../../../stdlib/Types.h"
#include "../../../thread/Atomic.h"
#include <linux/futex.h>
#include <sys/syscall.h>
#define THREAD_RETURN int32
typedef THREAD_RETURN (*ThreadJobFunc)(void*);
@ -39,4 +42,49 @@ struct coms_pthread_t {
void* stack;
};
FORCE_INLINE
int32 futex_wait(volatile int32* futex, int32 val) {
return syscall(SYS_futex, futex, FUTEX_WAIT, val, NULL, NULL, 0);
}
FORCE_INLINE
int32 futex_wake(volatile int32* futex, int32 n) {
return syscall(SYS_futex, futex, FUTEX_WAKE, n, NULL, NULL, 0);
}
FORCE_INLINE
int32 mutex_init(mutex* mutex, mutexattr_t*) {
return mutex == NULL ? 1 : 0;
}
FORCE_INLINE
int32 mutex_destroy(mutex* mutex) {
return mutex == NULL ? 1 : 0;
}
inline
int32 mutex_lock(mutex* mutex) {
if (mutex == NULL) {
return 1;
}
while (atomic_fetch_set_acquire(&mutex->futex, 1) != 0) {
futex_wait(&mutex->futex, 1);
}
return 0;
}
inline
int32 mutex_unlock(mutex* mutex) {
if (mutex == NULL) {
return 1;
}
atomic_set_release(&mutex->futex, 0);
futex_wake(&mutex->futex, 1);
return 0;
}
#endif

View File

@ -34,7 +34,7 @@ void* platform_alloc(size_t size)
void* ptr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
DEBUG_MEMORY_INIT((uintptr_t) ptr, size);
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
LOG_3("Allocated %n B", {{LOG_DATA_UINT64, &size}});
LOG_3("[INFO] Allocated %n B", {{LOG_DATA_UINT64, &size}});
return ptr;
}
@ -60,7 +60,7 @@ void* platform_alloc_aligned(size_t size, int32 alignment)
DEBUG_MEMORY_INIT((uintptr_t) aligned_ptr, size);
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
LOG_3("Aligned allocated %n B", {{LOG_DATA_UINT64, &size}});
LOG_3("[INFO] Aligned allocated %n B", {{LOG_DATA_UINT64, &size}});
return aligned_ptr;
}
@ -98,7 +98,7 @@ void* platform_shared_alloc(HANDLE* fd, const char* name, size_t size)
DEBUG_MEMORY_INIT((uintptr_t) shm_ptr, size);
LOG_INCREMENT_BY(DEBUG_COUNTER_MEM_ALLOC, size);
LOG_3("Shared allocated %n B", {{LOG_DATA_UINT64, &size}});
LOG_3("[INFO] Shared allocated %n B", {{LOG_DATA_UINT64, &size}});
return shm_ptr;
}
@ -111,7 +111,7 @@ void* platform_shared_open(HANDLE* fd, const char* name, size_t size)
void* shm_ptr = MapViewOfFile(*fd, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, (DWORD) size);
ASSERT_SIMPLE(shm_ptr);
LOG_3("Shared opened %n B", {{LOG_DATA_UINT64, &size}});
LOG_3("[INFO] Shared opened %n B", {{LOG_DATA_UINT64, &size}});
return shm_ptr;
}

View File

@ -46,54 +46,6 @@ int32 coms_pthread_detach(coms_pthread_t thread)
return 0;
}
inline
int32 mutex_init(mutex* mutex, mutexattr_t*)
{
if (mutex == NULL) {
return 1;
}
InitializeCriticalSection(mutex);
return 0;
}
inline
int32 mutex_destroy(mutex* mutex)
{
if (mutex == NULL) {
return 1;
}
DeleteCriticalSection(mutex);
return 0;
}
inline
int32 mutex_lock(mutex* mutex)
{
if (mutex == NULL) {
return 1;
}
EnterCriticalSection(mutex);
return 0;
}
inline
int32 mutex_unlock(mutex* mutex)
{
if (mutex == NULL) {
return 1;
}
LeaveCriticalSection(mutex);
return 0;
}
// WARNING: We don't support windows events since they are much slower than conditional variables/mutexes
inline
int32 coms_pthread_cond_init(mutex_cond* cond, coms_pthread_condattr_t*)

View File

@ -10,6 +10,7 @@
#define COMS_PLATFORM_WIN32_THREADING_THREAD_DEFINES_H
#include "../../../stdlib/Types.h"
#include "../../../thread/Atomic.h"
#include <windows.h>
#define THREAD_RETURN DWORD WINAPI
@ -34,4 +35,52 @@ struct coms_pthread_rwlock_t {
bool exclusive;
};
inline
int32 mutex_init(mutex* mutex, mutexattr_t*)
{
if (mutex == NULL) {
return 1;
}
InitializeCriticalSection(mutex);
return 0;
}
inline
int32 mutex_destroy(mutex* mutex)
{
if (mutex == NULL) {
return 1;
}
DeleteCriticalSection(mutex);
return 0;
}
inline
int32 mutex_lock(mutex* mutex)
{
if (mutex == NULL) {
return 1;
}
EnterCriticalSection(mutex);
return 0;
}
inline
int32 mutex_unlock(mutex* mutex)
{
if (mutex == NULL) {
return 1;
}
LeaveCriticalSection(mutex);
return 0;
}
#endif

View File

@ -146,7 +146,7 @@ struct HashMapRef {
inline
void hashmap_alloc(HashMap* hm, int32 count, int32 element_size, int32 alignment = 64)
{
LOG_1("Allocate HashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
LOG_1("[INFO] Allocate HashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
byte* data = (byte *) platform_alloc(
count * (sizeof(uint16) + element_size)
+ CEIL_DIV(count, alignment) * sizeof(hm->buf.free)
@ -160,7 +160,7 @@ inline
void hashmap_alloc(HashMapRef* hmr, int32 count, int32 data_element_size, int32 alignment = 64)
{
int32 element_size = sizeof(HashEntryInt32Int32);
LOG_1("Allocate HashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
LOG_1("[INFO] Allocate HashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
byte* data = (byte *) platform_alloc_aligned(
count * (sizeof(uint16) + element_size)
+ CEIL_DIV(count, alignment) * sizeof(hmr->hm.buf.free)
@ -187,7 +187,7 @@ void hashmap_free(HashMap* hm)
inline
void hashmap_create(HashMap* hm, int32 count, int32 element_size, RingMemory* ring, int32 alignment = 64) noexcept
{
LOG_1("Create HashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
LOG_1("[INFO] Create HashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
byte* data = ring_get_memory(
ring,
count * (sizeof(uint16) + element_size)
@ -202,7 +202,7 @@ void hashmap_create(HashMap* hm, int32 count, int32 element_size, RingMemory* ri
inline
void hashmap_create(HashMap* hm, int32 count, int32 element_size, BufferMemory* buf, int32 alignment = 64) noexcept
{
LOG_1("Create HashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
LOG_1("[INFO] Create HashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
byte* data = buffer_get_memory(
buf,
count * (sizeof(uint16) + element_size)
@ -217,7 +217,7 @@ void hashmap_create(HashMap* hm, int32 count, int32 element_size, BufferMemory*
inline
void hashmap_create(HashMap* hm, int32 count, int32 element_size, byte* buf, int32 alignment = 64) noexcept
{
LOG_1("Create HashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
LOG_1("[INFO] Create HashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
hm->table = (uint16 *) buf;
chunk_init(&hm->buf, buf + sizeof(uint16) * count, count, element_size, alignment);
}
@ -872,7 +872,7 @@ int32 hashmap_value_size(const HashMap* hm) noexcept
inline
int64 hashmap_dump(const HashMap* hm, byte* data, [[maybe_unused]] int32 steps = 8)
{
LOG_1("Dump HashMap");
LOG_1("[INFO] Dump HashMap");
*((uint32 *) data) = SWAP_ENDIAN_LITTLE(hm->buf.count);
data += sizeof(hm->buf.count);
@ -934,7 +934,7 @@ int64 hashmap_dump(const HashMap* hm, byte* data, [[maybe_unused]] int32 steps =
// dump free array
memcpy(data, hm->buf.free, sizeof(uint64) * CEIL_DIV(hm->buf.count, 64));
LOG_1("Dumped HashMap: %n B", {{LOG_DATA_UINT64, (void *) &hm->buf.size}});
LOG_1("[INFO] Dumped HashMap: %n B", {{LOG_DATA_UINT64, (void *) &hm->buf.size}});
return sizeof(hm->buf.count) // hash map count = buffer count
+ hm->buf.count * sizeof(uint16) // table content
@ -945,7 +945,7 @@ int64 hashmap_dump(const HashMap* hm, byte* data, [[maybe_unused]] int32 steps =
inline
int64 hashmap_load(HashMap* hm, const byte* data, [[maybe_unused]] int32 steps = 8)
{
LOG_1("Load HashMap");
LOG_1("[INFO] Load HashMap");
uint64 count = SWAP_ENDIAN_LITTLE(*((uint32 *) data));
data += sizeof(uint32);
@ -988,7 +988,7 @@ int64 hashmap_load(HashMap* hm, const byte* data, [[maybe_unused]] int32 steps =
}
} chunk_iterate_end;
LOG_1("Loaded HashMap: %n B", {{LOG_DATA_UINT64, &hm->buf.size}});
LOG_1("[INFO] Loaded HashMap: %n B", {{LOG_DATA_UINT64, &hm->buf.size}});
// How many bytes was read from data
return sizeof(hm->buf.count) // hash map count = buffer count

View File

@ -124,7 +124,7 @@ PerfectHashMap* perfect_hashmap_prepare(PerfectHashMap* hm, const char** keys, i
}
ASSERT_SIMPLE(false);
LOG_1("Couldn't create perfect hashmap");
LOG_1("[ERROR] Couldn't create perfect hashmap");
return NULL;
}
@ -166,14 +166,14 @@ PerfectHashMap* perfect_hashmap_prepare(PerfectHashMap* hm, const char* keys, in
}
ASSERT_SIMPLE(false);
LOG_1("Couldn't create perfect hashmap");
LOG_1("[INFO] Couldn't create perfect hashmap");
return NULL;
}
void perfect_hashmap_alloc(PerfectHashMap* hm, int32 count, int32 element_size, int32 alignment = 64)
{
LOG_1("Allocating PerfectHashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
LOG_1("[INFO] Allocating PerfectHashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
hm->map_count = count;
hm->entry_size = element_size;
hm->hash_entries = (byte *) platform_alloc_aligned(count * element_size, alignment);
@ -182,7 +182,7 @@ void perfect_hashmap_alloc(PerfectHashMap* hm, int32 count, int32 element_size,
void perfect_hashmap_alloc(PerfectHashMapRef* hmr, int32 count, int32 total_data_size, int32 alignment = 64)
{
hmr->hm.entry_size = sizeof(PerfectHashEntryInt32Int32);
LOG_1("Allocating PerfectHashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &hmr->hm.entry_size}});
LOG_1("[INFO] Allocating PerfectHashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &hmr->hm.entry_size}});
hmr->hm.map_count = count;
hmr->hm.hash_entries = (byte *) platform_alloc_aligned(
count * hmr->hm.entry_size
@ -206,7 +206,7 @@ void perfect_hashmap_free(PerfectHashMapRef* hmr) {
// WARNING: element_size = element size + remaining HashEntry data size
void perfect_hashmap_create(PerfectHashMap* hm, int32 count, int32 element_size, BufferMemory* buf)
{
LOG_1("Create PerfectHashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
LOG_1("[INFO] Create PerfectHashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
hm->map_count = count;
hm->entry_size = element_size;
hm->hash_entries = buffer_get_memory(
@ -219,7 +219,7 @@ void perfect_hashmap_create(PerfectHashMap* hm, int32 count, int32 element_size,
// WARNING: element_size = element size + remaining HashEntry data size
void perfect_hashmap_create(PerfectHashMap* hm, int32 count, int32 element_size, byte* buf)
{
LOG_1("Create PerfectHashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
LOG_1("[INFO] Create PerfectHashMap for %n elements with %n B per element", {{LOG_DATA_INT32, &count}, {LOG_DATA_INT32, &element_size}});
hm->map_count = count;
hm->entry_size = element_size;
hm->hash_entries = buf;

View File

@ -20,7 +20,7 @@ struct ThreadedHashMap {
void** table;
ChunkMemory buf;
mutex mutex;
mutex mtx;
};
// WARNING: element_size = element size + remaining HashEntry data size
@ -28,7 +28,7 @@ inline
void thrd_hashmap_create(ThreadedHashMap* hm, int32 count, int32 element_size, RingMemory* ring)
{
hashmap_create((HashMap *) hm, count, element_size, ring);
mutex_init(&hm->mutex, NULL);
mutex_init(&hm->mtx, NULL);
}
// WARNING: element_size = element size + remaining HashEntry data size
@ -36,7 +36,7 @@ inline
void thrd_hashmap_create(ThreadedHashMap* hm, int32 count, int32 element_size, BufferMemory* buf)
{
hashmap_create((HashMap *) hm, count, element_size, buf);
mutex_init(&hm->mutex, NULL);
mutex_init(&hm->mtx, NULL);
}
// WARNING: element_size = element size + remaining HashEntry data size
@ -44,85 +44,85 @@ inline
void thrd_hashmap_create(ThreadedHashMap* hm, int32 count, int32 element_size, byte* buf)
{
hashmap_create((HashMap *) hm, count, element_size, buf);
mutex_init(&hm->mutex, NULL);
mutex_init(&hm->mtx, NULL);
}
inline
void thrd_hashmap_free(ThreadedHashMap* hm)
{
mutex_destroy(&hm->mutex);
mutex_destroy(&hm->mtx);
}
inline
void thrd_hashmap_insert(ThreadedHashMap* hm, const char* key, int32 value) {
mutex_lock(&hm->mutex);
mutex_lock(&hm->mtx);
hashmap_insert((HashMap *) hm, key, value);
mutex_unlock(&hm->mutex);
mutex_unlock(&hm->mtx);
}
inline
void thrd_hashmap_insert(ThreadedHashMap* hm, const char* key, int64 value) {
mutex_lock(&hm->mutex);
mutex_lock(&hm->mtx);
hashmap_insert((HashMap *) hm, key, value);
mutex_unlock(&hm->mutex);
mutex_unlock(&hm->mtx);
}
inline
void thrd_hashmap_insert(ThreadedHashMap* hm, const char* key, uintptr_t value) {
mutex_lock(&hm->mutex);
mutex_lock(&hm->mtx);
hashmap_insert((HashMap *) hm, key, value);
mutex_unlock(&hm->mutex);
mutex_unlock(&hm->mtx);
}
inline
void thrd_hashmap_insert(ThreadedHashMap* hm, const char* key, void* value) {
mutex_lock(&hm->mutex);
mutex_lock(&hm->mtx);
hashmap_insert((HashMap *) hm, key, value);
mutex_unlock(&hm->mutex);
mutex_unlock(&hm->mtx);
}
inline
void thrd_hashmap_insert(ThreadedHashMap* hm, const char* key, f32 value) {
mutex_lock(&hm->mutex);
mutex_lock(&hm->mtx);
hashmap_insert((HashMap *) hm, key, value);
mutex_unlock(&hm->mutex);
mutex_unlock(&hm->mtx);
}
inline
void thrd_hashmap_insert(ThreadedHashMap* hm, const char* key, const char* value) {
mutex_lock(&hm->mutex);
mutex_lock(&hm->mtx);
hashmap_insert((HashMap *) hm, key, value);
mutex_unlock(&hm->mutex);
mutex_unlock(&hm->mtx);
}
inline
void thrd_hashmap_insert(ThreadedHashMap* hm, const char* key, byte* value) {
mutex_lock(&hm->mutex);
mutex_lock(&hm->mtx);
hashmap_insert((HashMap *) hm, key, value);
mutex_unlock(&hm->mutex);
mutex_unlock(&hm->mtx);
}
inline
void thrd_hashmap_get_entry(ThreadedHashMap* hm, HashEntry* entry, const char* key) {
mutex_lock(&hm->mutex);
mutex_lock(&hm->mtx);
HashEntry* temp = hashmap_get_entry((HashMap *) hm, key);
memcpy(entry, temp, hm->buf.chunk_size);
mutex_unlock(&hm->mutex);
mutex_unlock(&hm->mtx);
}
inline
void thrd_hashmap_get_entry(ThreadedHashMap* hm, HashEntry* entry, const char* key, uint64 index) {
mutex_lock(&hm->mutex);
mutex_lock(&hm->mtx);
HashEntry* temp = hashmap_get_entry((HashMap *) hm, key, index);
memcpy(entry, temp, hm->buf.chunk_size);
mutex_unlock(&hm->mutex);
mutex_unlock(&hm->mtx);
}
inline
void thrd_hashmap_remove(ThreadedHashMap* hm, const char* key) {
mutex_lock(&hm->mutex);
mutex_lock(&hm->mtx);
hashmap_remove((HashMap *) hm, key);
mutex_unlock(&hm->mutex);
mutex_unlock(&hm->mtx);
}
#endif

View File

@ -26,12 +26,12 @@
void thread_create(Worker* worker, ThreadJobFunc routine, void* arg)
{
LOG_1("Thread starting");
LOG_1("[INFO] Thread starting");
coms_pthread_create(&worker->thread, NULL, routine, arg);
LOG_INCREMENT(DEBUG_COUNTER_THREAD);
LOG_2("%d threads running", {{LOG_DATA_INT64, (void *) &_stats_counter[DEBUG_COUNTER_THREAD]}});
LOG_2("[INFO] %d threads running", {{LOG_DATA_INT64, (void *) &_stats_counter[DEBUG_COUNTER_THREAD]}});
}
void thread_stop(Worker* worker)
@ -39,9 +39,9 @@ void thread_stop(Worker* worker)
atomic_set_release(&worker->state, 0);
coms_pthread_join(worker->thread, NULL);
LOG_1("Thread ended");
LOG_1("[INFO] Thread ended");
LOG_DECREMENT(DEBUG_COUNTER_THREAD);
LOG_2("%d threads running", {{LOG_DATA_INT64, (void *) &_stats_counter[DEBUG_COUNTER_THREAD]}});
LOG_2("[INFO] %d threads running", {{LOG_DATA_INT64, (void *) &_stats_counter[DEBUG_COUNTER_THREAD]}});
}
#endif

View File

@ -18,9 +18,18 @@
typedef void (*ThreadPoolJobFunc)(void*);
enum PoolWorkerState : int32 {
POOL_WORKER_STATE_CANCEL = -1,
POOL_WORKER_STATE_COMPLETED = 0,
POOL_WORKER_STATE_WAITING = 1,
POOL_WORKER_STATE_RUNNING = 2
};
// @performance Could we reduce the size of PoolWorker by reducing atomic_32 to atomic_16?
// I don't think so because ThreadPoolJobFunc should be 8 bytes
struct PoolWorker {
alignas(4) atomic_32 int32 id;
alignas(4) atomic_32 int32 state;
alignas(4) atomic_32 PoolWorkerState state;
ThreadPoolJobFunc func;
ThreadPoolJobFunc callback;
};

View File

@ -33,7 +33,7 @@ struct ThreadPool {
int32 size;
int32 element_size;
// 0 = down, 1 = shutting down, 2 = running
// 1 = waiting for run, 2 = running, 0 = completed, -1 = canceling
alignas(4) atomic_32 int32 state;
alignas(4) atomic_32 int32 id_counter;
@ -58,7 +58,7 @@ THREAD_RETURN thread_pool_worker(void* arg)
}
// @bug Why doesn't this work? There must be some threading issue
LOG_2("Thread pool worker starting up");
LOG_2("[INFO] Thread pool worker starting up");
LOG_INCREMENT(DEBUG_COUNTER_THREAD);
PoolWorker* work;
@ -78,30 +78,30 @@ THREAD_RETURN thread_pool_worker(void* arg)
// We define a queue element as free based on it's id
// So even if we "keep" it in the queue the pool will not overwrite it as long as the id > 0 (see pool_add)
// This is only a ThreadPool specific queue behavior to avoid additional memory copy
// @bug this needs to be a threaded queue
work = (PoolWorker *) queue_dequeue_keep(&pool->work_queue);
mutex_unlock(&pool->work_mutex);
if (!work) {
// When the worker functions of the thread pool get woken up it is possible that the work is already dequeued
// by another thread -> we need to check if the work is actually valid
if (work->state <= POOL_WORKER_STATE_COMPLETED || work->id <= 0) {
atomic_set_release((volatile int32*) &work->state, POOL_WORKER_STATE_COMPLETED);
continue;
}
atomic_increment_release(&pool->working_cnt);
atomic_set_release(&work->state, 2);
LOG_2("ThreadPool worker started");
atomic_set_release((volatile int32*) &work->state, POOL_WORKER_STATE_RUNNING);
LOG_3("ThreadPool worker started");
work->func(work);
LOG_2("ThreadPool worker ended");
atomic_set_release(&work->state, 1);
LOG_3("ThreadPool worker ended");
// @question Do I really need state and id both? seems like setting one should be sufficient
// Obviously we would also have to change thread_pool_add_work to check for state instead of id
atomic_set_release((volatile int32*) &work->state, POOL_WORKER_STATE_COMPLETED);
if (work->callback) {
work->callback(work);
}
// Job gets marked after completion -> can be overwritten now
if (atomic_get_relaxed(&work->id) == -1) {
atomic_set_release(&work->id, 0);
}
atomic_decrement_release(&pool->working_cnt);
// Signal that we ran out of work (maybe the main thread needs this info)
@ -115,7 +115,7 @@ THREAD_RETURN thread_pool_worker(void* arg)
atomic_decrement_release(&pool->thread_cnt);
coms_pthread_cond_signal(&pool->working_cond);
LOG_2("Thread pool worker shutting down");
LOG_2("[INFO] Thread pool worker shutting down");
LOG_DECREMENT(DEBUG_COUNTER_THREAD);
return (THREAD_RETURN) NULL;
@ -130,7 +130,7 @@ void thread_pool_alloc(
) {
PROFILE(PROFILE_THREAD_POOL_ALLOC);
LOG_1(
"Allocating thread pool with %d threads and %d queue length",
"[INFO] Allocating thread pool with %d threads and %d queue length",
{
{LOG_DATA_INT32, &thread_count},
{LOG_DATA_INT32, &worker_count}
@ -156,7 +156,7 @@ void thread_pool_alloc(
coms_pthread_detach(thread);
}
LOG_2("%d threads running", {{LOG_DATA_INT64, (void *) &_stats_counter[DEBUG_COUNTER_THREAD]}});
LOG_2("[INFO] %d threads running", {{LOG_DATA_INT64, (void *) &_stats_counter[DEBUG_COUNTER_THREAD]}});
}
void thread_pool_create(
@ -195,7 +195,7 @@ void thread_pool_create(
coms_pthread_detach(thread);
}
LOG_2("%d threads running", {{LOG_DATA_INT64, (void *) &_stats_counter[DEBUG_COUNTER_THREAD]}});
LOG_2("[INFO] %d threads running", {{LOG_DATA_INT64, (void *) &_stats_counter[DEBUG_COUNTER_THREAD]}});
}
void thread_pool_wait(ThreadPool* pool)
@ -232,14 +232,17 @@ PoolWorker* thread_pool_add_work(ThreadPool* pool, const PoolWorker* job)
{
mutex_lock(&pool->work_mutex);
PoolWorker* temp_job = (PoolWorker *) ring_get_memory_nomove((RingMemory *) &pool->work_queue, pool->element_size, 8);
if (atomic_get_relaxed(&temp_job->id) > 0) {
if (atomic_get_relaxed((volatile int32*) &temp_job->state) > POOL_WORKER_STATE_COMPLETED) {
mutex_unlock(&pool->work_mutex);
ASSERT_SIMPLE(temp_job->id == 0);
ASSERT_SIMPLE(temp_job->state <= POOL_WORKER_STATE_COMPLETED);
return NULL;
}
memcpy(temp_job, job, pool->element_size);
temp_job->state = POOL_WORKER_STATE_WAITING;
ring_move_pointer((RingMemory *) &pool->work_queue, &pool->work_queue.head, pool->element_size, 8);
if (temp_job->id == 0) {
@ -259,9 +262,9 @@ PoolWorker* thread_pool_add_work_start(ThreadPool* pool)
mutex_lock(&pool->work_mutex);
PoolWorker* temp_job = (PoolWorker *) queue_enqueue_start(&pool->work_queue);
if (atomic_get_relaxed(&temp_job->id) > 0) {
if (atomic_get_relaxed((volatile int32*) &temp_job->state) > POOL_WORKER_STATE_COMPLETED) {
mutex_unlock(&pool->work_mutex);
ASSERT_SIMPLE(temp_job->id == 0);
ASSERT_SIMPLE(temp_job->state <= POOL_WORKER_STATE_COMPLETED);
return NULL;
}
@ -271,6 +274,8 @@ PoolWorker* thread_pool_add_work_start(ThreadPool* pool)
temp_job->id = atomic_fetch_add_acquire(&pool->id_counter, 1) + 1;
}
temp_job->state = POOL_WORKER_STATE_WAITING;
return temp_job;
}

View File

@ -14,6 +14,8 @@
#include "../utils/TestUtils.h"
#include "../utils/TimeUtils.h"
// PERFORMANCE: Approx. 4x faster than rand()
inline
uint32 rand_fast(uint32* state) {
static const uint32 z = 0x9E3779B9;
uint32 x = *state;
@ -29,6 +31,7 @@ uint32 rand_fast(uint32* state) {
return x;
}
inline
uint64 rand_fast(uint64* state) {
static const uint64 z = 0x9FB21C651E98DF25;
uint64 x = *state;
@ -44,6 +47,10 @@ uint64 rand_fast(uint64* state) {
return x;
}
uint32 rand_fast(uint32* state, int32 max) {
return (uint32) (((uint64) rand_fast(state) * max) >> 32);
}
/**
* Picks n random elements from end and stores them in begin.
*/

View File

@ -1168,11 +1168,12 @@ bool str_contains(const char* __restrict haystack, const char* __restrict needle
while (*haystack != '\0' && length > 0) {
const char* p1 = haystack;
const char* p2 = needle;
size_t remaining = length;
while (*p1 != '\0' && *p2 != '\0' && *p1 == *p2) {
while (*p2 != '\0' && remaining > 0 && *p1 == *p2) {
++p1;
++p2;
--length;
--remaining;
}
if (*p2 == '\0') {
@ -1690,6 +1691,33 @@ int32 float_to_str(f64 value, char* buffer, int32 precision = 5) noexcept
return (int32) (buffer - start);
}
inline
void format_time_hh_mm_ss_ms(char time_str[13], int32 hours, int32 minutes, int32 secs, int32 ms) noexcept {
time_str[0] = (char) ('0' + (hours / 10));
time_str[1] = (char) ('0' + (hours % 10));
time_str[2] = ':';
time_str[3] = (char) ('0' + (minutes / 10));
time_str[4] = (char) ('0' + (minutes % 10));
time_str[5] = ':';
time_str[6] = (char) ('0' + (secs / 10));
time_str[7] = (char) ('0' + (secs % 10));
time_str[8] = '.';
time_str[9] = (char) ('0' + (ms / 100));
time_str[10] = (char) ('0' + ((ms / 10) % 10));
time_str[11] = (char) ('0' + (ms % 10));
time_str[12] = '\0';
}
inline
void format_time_hh_mm_ss_ms(char time_str[13], uint64 ms) noexcept {
uint64 seconds = ms / 1000;
int32 hours = (seconds / 3600) % 24;
int32 minutes = (seconds / 60) % 60;
int32 secs = seconds % 60;
format_time_hh_mm_ss_ms(time_str, hours, minutes, secs, ms % 1000);
}
inline
void format_time_hh_mm_ss(char time_str[9], int32 hours, int32 minutes, int32 secs) noexcept {
time_str[0] = (char) ('0' + (hours / 10));
@ -1704,10 +1732,10 @@ void format_time_hh_mm_ss(char time_str[9], int32 hours, int32 minutes, int32 se
}
inline
void format_time_hh_mm_ss(char time_str[9], uint64 time) noexcept {
int32 hours = (time / 3600) % 24;
int32 minutes = (time / 60) % 60;
int32 secs = time % 60;
void format_time_hh_mm_ss(char time_str[9], uint64 seconds) noexcept {
int32 hours = (seconds / 3600) % 24;
int32 minutes = (seconds / 60) % 60;
int32 secs = seconds % 60;
format_time_hh_mm_ss(time_str, hours, minutes, secs);
}
@ -1723,9 +1751,9 @@ void format_time_hh_mm(char time_str[6], int32 hours, int32 minutes) noexcept {
}
inline
void format_time_hh_mm(char time_str[6], uint64 time) noexcept {
int32 hours = (time / 3600) % 24;
int32 minutes = (time / 60) % 60;
void format_time_hh_mm(char time_str[6], uint64 seconds) noexcept {
int32 hours = (seconds / 3600) % 24;
int32 minutes = (seconds / 60) % 60;
format_time_hh_mm(time_str, hours, minutes);
}
@ -1804,7 +1832,7 @@ void sprintf_fast(char* __restrict buffer, const char* __restrict format, ...) n
va_end(args);
}
void sprintf_fast(char* __restrict buffer, int32 buffer_length, const char* __restrict format, ...) noexcept {
int32 sprintf_fast(char* __restrict buffer, int32 buffer_length, const char* __restrict format, ...) noexcept {
va_list args;
va_start(args, format);
@ -1883,6 +1911,8 @@ void sprintf_fast(char* __restrict buffer, int32 buffer_length, const char* __re
*buffer = '\0';
va_end(args);
return length - 1;
}
// There are situations where you only want to replace a certain amount of %