[MOD/WIP] Start separating the main DB API

Not everything is available as of now, I'm still working on it, but it
builds and seems to work, and its 9PM, so that's worthapush.
This commit is contained in:
LDA 2024-08-07 20:45:53 +02:00
parent 87d9421f11
commit b87979e9a2
7 changed files with 998 additions and 979 deletions

4
configure vendored
View file

@ -15,7 +15,7 @@ TOOLS="tools"
# Default compiler flags. These must be supported by all POSIX C compilers.
# "Fancy" compilers that have additional options must be detected and set below.
CFLAGS="-O1 -D_DEFAULT_SOURCE -I${INCLUDE}"
CFLAGS="-O1 -D_DEFAULT_SOURCE -I${INCLUDE} -I${SRC}";
LIBS="-lm -lpthread"
# Default args for all platforms.
@ -153,7 +153,7 @@ print_obj() {
get_deps() {
src="$1"
${CC} -I${INCLUDE} -E "$src" \
${CC} -I${SRC} -I${INCLUDE} -E "$src" \
| grep '^#' \
| awk '{print $3}' \
| cut -d '"' -f 2 \

View file

@ -68,6 +68,15 @@ typedef struct DbRef DbRef;
*/
extern Db * DbOpen(char *, size_t);
/**
* Open a LMDB data directory. This function is similar to
* .Fn DbOpen ,
* but works with a LMDB-based backend, with the second argument
* being the maximum filesize. This function fails with NULL if ever
* called without LMDB enabled at compiletime.
*/
extern Db * DbOpenLMDB(char *, size_t);
/**
* Close the database. This function will flush anything in the cache
* to the disk, and then close the data directory. It assumes that

977
src/Db.c
View file

@ -1,977 +0,0 @@
/*
* Copyright (C) 2022-2024 Jordan Bancino <@jordan:bancino.net>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <Db.h>
#include <Memory.h>
#include <Json.h>
#include <Util.h>
#include <Str.h>
#include <Stream.h>
#include <Log.h>
#include <sys/types.h>
#include <dirent.h>
#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
struct Db
{
char *dir;
pthread_mutex_t lock;
size_t cacheSize;
size_t maxCache;
HashMap *cache;
/*
* The cache uses a double linked list (see DbRef
* below) to know which objects are most and least
* recently used. The following diagram helps me
* know what way all the pointers go, because it
* can get very confusing sometimes. For example,
* there's nothing stopping "next" from pointing to
* least recent, and "prev" from pointing to most
* recent, so hopefully this clarifies the pointer
* terminology used when dealing with the linked
* list:
*
* mostRecent leastRecent
* | prev prev | prev
* +---+ ---> +---+ ---> +---+ ---> NULL
* |ref| |ref| |ref|
* NULL <--- +---+ <--- +---+ <--- +---+
* next next next
*/
DbRef *mostRecent;
DbRef *leastRecent;
};
struct DbRef
{
HashMap *json;
uint64_t ts;
size_t size;
Array *name;
DbRef *prev;
DbRef *next;
int fd;
Stream *stream;
};
static void
StringArrayFree(Array * arr)
{
size_t i;
for (i = 0; i < ArraySize(arr); i++)
{
Free(ArrayGet(arr, i));
}
ArrayFree(arr);
}
static ssize_t DbComputeSize(HashMap *);
static ssize_t
DbComputeSizeOfValue(JsonValue * val)
{
MemoryInfo *a;
ssize_t total = 0;
size_t i;
union
{
char *str;
Array *arr;
} u;
if (!val)
{
return -1;
}
a = MemoryInfoGet(val);
if (a)
{
total += MemoryInfoGetSize(a);
}
switch (JsonValueType(val))
{
case JSON_OBJECT:
total += DbComputeSize(JsonValueAsObject(val));
break;
case JSON_ARRAY:
u.arr = JsonValueAsArray(val);
a = MemoryInfoGet(u.arr);
if (a)
{
total += MemoryInfoGetSize(a);
}
for (i = 0; i < ArraySize(u.arr); i++)
{
total += DbComputeSizeOfValue(ArrayGet(u.arr, i));
}
break;
case JSON_STRING:
u.str = JsonValueAsString(val);
a = MemoryInfoGet(u.str);
if (a)
{
total += MemoryInfoGetSize(a);
}
break;
case JSON_NULL:
case JSON_INTEGER:
case JSON_FLOAT:
case JSON_BOOLEAN:
default:
/* These don't use any extra heap space */
break;
}
return total;
}
static ssize_t
DbComputeSize(HashMap * json)
{
char *key;
JsonValue *val;
MemoryInfo *a;
size_t total;
if (!json)
{
return -1;
}
total = 0;
a = MemoryInfoGet(json);
if (a)
{
total += MemoryInfoGetSize(a);
}
while (HashMapIterate(json, &key, (void **) &val))
{
a = MemoryInfoGet(key);
if (a)
{
total += MemoryInfoGetSize(a);
}
total += DbComputeSizeOfValue(val);
}
return total;
}
static char *
DbHashKey(Array * args)
{
size_t i;
char *str = NULL;
for (i = 0; i < ArraySize(args); i++)
{
char *tmp = StrConcat(2, str, ArrayGet(args, i));
Free(str);
str = tmp;
}
return str;
}
static char
DbSanitiseChar(char input)
{
switch (input)
{
case '/':
return '_';
case '.':
return '-';
}
return input;
}
static char *
DbDirName(Db * db, Array * args, size_t strip)
{
size_t i, j;
char *str = StrConcat(2, db->dir, "/");
for (i = 0; i < ArraySize(args) - strip; i++)
{
char *tmp;
char *sanitise = StrDuplicate(ArrayGet(args, i));
for (j = 0; j < strlen(sanitise); j++)
{
sanitise[j] = DbSanitiseChar(sanitise[j]);
}
tmp = StrConcat(3, str, sanitise, "/");
Free(str);
Free(sanitise);
str = tmp;
}
return str;
}
static char *
DbFileName(Db * db, Array * args)
{
size_t i;
char *str = StrConcat(2, db->dir, "/");
for (i = 0; i < ArraySize(args); i++)
{
char *tmp;
char *arg = StrDuplicate(ArrayGet(args, i));
size_t j = 0;
/* Sanitize name to prevent directory traversal attacks */
while (arg[j])
{
arg[j] = DbSanitiseChar(arg[j]);
j++;
}
tmp = StrConcat(3, str, arg,
(i < ArraySize(args) - 1) ? "/" : ".json");
Free(arg);
Free(str);
str = tmp;
}
return str;
}
static void
DbCacheEvict(Db * db)
{
DbRef *ref = db->leastRecent;
DbRef *tmp;
while (ref && db->cacheSize > db->maxCache)
{
char *hash;
JsonFree(ref->json);
hash = DbHashKey(ref->name);
HashMapDelete(db->cache, hash);
Free(hash);
StringArrayFree(ref->name);
db->cacheSize -= ref->size;
if (ref->next)
{
ref->next->prev = ref->prev;
}
else
{
db->mostRecent = ref->prev;
}
if (ref->prev)
{
ref->prev->next = ref->next;
}
else
{
db->leastRecent = ref->next;
}
tmp = ref->next;
Free(ref);
ref = tmp;
}
}
Db *
DbOpen(char *dir, size_t cache)
{
Db *db;
pthread_mutexattr_t attr;
if (!dir)
{
return NULL;
}
db = Malloc(sizeof(Db));
if (!db)
{
return NULL;
}
db->dir = dir;
db->maxCache = cache;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&db->lock, &attr);
pthread_mutexattr_destroy(&attr);
db->mostRecent = NULL;
db->leastRecent = NULL;
db->cacheSize = 0;
if (db->maxCache)
{
db->cache = HashMapCreate();
if (!db->cache)
{
return NULL;
}
}
else
{
db->cache = NULL;
}
return db;
}
void
DbMaxCacheSet(Db * db, size_t cache)
{
if (!db)
{
return;
}
pthread_mutex_lock(&db->lock);
db->maxCache = cache;
if (db->maxCache && !db->cache)
{
db->cache = HashMapCreate();
db->cacheSize = 0;
}
DbCacheEvict(db);
pthread_mutex_unlock(&db->lock);
}
void
DbClose(Db * db)
{
if (!db)
{
return;
}
pthread_mutex_lock(&db->lock);
DbMaxCacheSet(db, 0);
DbCacheEvict(db);
HashMapFree(db->cache);
pthread_mutex_unlock(&db->lock);
pthread_mutex_destroy(&db->lock);
Free(db);
}
static DbRef *
DbLockFromArr(Db * db, Array * args)
{
char *file;
char *hash;
DbRef *ref;
struct flock lock;
int fd;
Stream *stream;
if (!db || !args)
{
return NULL;
}
ref = NULL;
hash = NULL;
pthread_mutex_lock(&db->lock);
/* Check if the item is in the cache */
hash = DbHashKey(args);
ref = HashMapGet(db->cache, hash);
file = DbFileName(db, args);
fd = open(file, O_RDWR);
if (fd == -1)
{
if (ref)
{
HashMapDelete(db->cache, hash);
JsonFree(ref->json);
StringArrayFree(ref->name);
db->cacheSize -= ref->size;
if (ref->next)
{
ref->next->prev = ref->prev;
}
else
{
db->mostRecent = ref->prev;
}
if (ref->prev)
{
ref->prev->next = ref->next;
}
else
{
db->leastRecent = ref->next;
}
if (!db->leastRecent)
{
db->leastRecent = db->mostRecent;
}
Free(ref);
}
ref = NULL;
goto finish;
}
stream = StreamFd(fd);
lock.l_start = 0;
lock.l_len = 0;
lock.l_type = F_WRLCK;
lock.l_whence = SEEK_SET;
/* Lock the file on the disk */
if (fcntl(fd, F_SETLK, &lock) < 0)
{
StreamClose(stream);
ref = NULL;
goto finish;
}
if (ref) /* In cache */
{
uint64_t diskTs = UtilLastModified(file);
ref->fd = fd;
ref->stream = stream;
if (diskTs > ref->ts)
{
/* File was modified on disk since it was cached */
HashMap *json = JsonDecode(ref->stream);
if (!json)
{
StreamClose(ref->stream);
ref = NULL;
goto finish;
}
JsonFree(ref->json);
ref->json = json;
ref->ts = diskTs;
ref->size = DbComputeSize(ref->json);
}
/* Float this ref to mostRecent */
if (ref->next)
{
ref->next->prev = ref->prev;
if (!ref->prev)
{
db->leastRecent = ref->next;
}
else
{
ref->prev->next = ref->next;
}
ref->prev = db->mostRecent;
ref->next = NULL;
if (db->mostRecent)
{
db->mostRecent->next = ref;
}
db->mostRecent = ref;
}
/* If there is no least recent, this is the only thing in the
* cache, so it is also least recent. */
if (!db->leastRecent)
{
db->leastRecent = ref;
}
/* The file on disk may be larger than what we have in memory,
* which may require items in cache to be evicted. */
DbCacheEvict(db);
}
else
{
Array *name;
size_t i;
/* Not in cache; load from disk */
ref = Malloc(sizeof(DbRef));
if (!ref)
{
StreamClose(stream);
goto finish;
}
ref->json = JsonDecode(stream);
if (!ref->json)
{
Free(ref);
StreamClose(stream);
ref = NULL;
goto finish;
}
ref->fd = fd;
ref->stream = stream;
name = ArrayCreate();
for (i = 0; i < ArraySize(args); i++)
{
ArrayAdd(name, StrDuplicate(ArrayGet(args, i)));
}
ref->name = name;
if (db->cache)
{
ref->ts = UtilTsMillis();
ref->size = DbComputeSize(ref->json);
HashMapSet(db->cache, hash, ref);
db->cacheSize += ref->size;
ref->next = NULL;
ref->prev = db->mostRecent;
if (db->mostRecent)
{
db->mostRecent->next = ref;
}
db->mostRecent = ref;
if (!db->leastRecent)
{
db->leastRecent = ref;
}
/* Adding this item to the cache may case it to grow too
* large, requiring some items to be evicted */
DbCacheEvict(db);
}
}
finish:
if (!ref)
{
pthread_mutex_unlock(&db->lock);
}
Free(file);
Free(hash);
return ref;
}
DbRef *
DbCreate(Db * db, size_t nArgs,...)
{
Stream *fp;
char *file;
char *dir;
va_list ap;
Array *args;
DbRef *ret;
if (!db)
{
return NULL;
}
va_start(ap, nArgs);
args = ArrayFromVarArgs(nArgs, ap);
va_end(ap);
if (!args)
{
return NULL;
}
pthread_mutex_lock(&db->lock);
file = DbFileName(db, args);
if (UtilLastModified(file))
{
Free(file);
ArrayFree(args);
pthread_mutex_unlock(&db->lock);
return NULL;
}
dir = DbDirName(db, args, 1);
if (UtilMkdir(dir, 0750) < 0)
{
Free(file);
ArrayFree(args);
Free(dir);
pthread_mutex_unlock(&db->lock);
return NULL;
}
Free(dir);
fp = StreamOpen(file, "w");
Free(file);
if (!fp)
{
ArrayFree(args);
pthread_mutex_unlock(&db->lock);
return NULL;
}
StreamPuts(fp, "{}");
StreamClose(fp);
/* DbLockFromArr() will lock again for us */
pthread_mutex_unlock(&db->lock);
ret = DbLockFromArr(db, args);
ArrayFree(args);
return ret;
}
bool
DbDelete(Db * db, size_t nArgs,...)
{
va_list ap;
Array *args;
char *file;
char *hash;
bool ret = true;
DbRef *ref;
if (!db)
{
return false;
}
va_start(ap, nArgs);
args = ArrayFromVarArgs(nArgs, ap);
va_end(ap);
pthread_mutex_lock(&db->lock);
hash = DbHashKey(args);
file = DbFileName(db, args);
ref = HashMapGet(db->cache, hash);
if (ref)
{
HashMapDelete(db->cache, hash);
JsonFree(ref->json);
StringArrayFree(ref->name);
db->cacheSize -= ref->size;
if (ref->next)
{
ref->next->prev = ref->prev;
}
else
{
db->mostRecent = ref->prev;
}
if (ref->prev)
{
ref->prev->next = ref->next;
}
else
{
db->leastRecent = ref->next;
}
if (!db->leastRecent)
{
db->leastRecent = db->mostRecent;
}
Free(ref);
}
Free(hash);
if (UtilLastModified(file))
{
ret = (remove(file) == 0);
}
pthread_mutex_unlock(&db->lock);
ArrayFree(args);
Free(file);
return ret;
}
DbRef *
DbLock(Db * db, size_t nArgs,...)
{
va_list ap;
Array *args;
DbRef *ret;
va_start(ap, nArgs);
args = ArrayFromVarArgs(nArgs, ap);
va_end(ap);
if (!args)
{
return NULL;
}
ret = DbLockFromArr(db, args);
ArrayFree(args);
return ret;
}
bool
DbUnlock(Db * db, DbRef * ref)
{
bool destroy;
if (!db || !ref)
{
return false;
}
lseek(ref->fd, 0L, SEEK_SET);
if (ftruncate(ref->fd, 0) < 0)
{
pthread_mutex_unlock(&db->lock);
Log(LOG_ERR, "Failed to truncate file on disk.");
Log(LOG_ERR, "Error on fd %d: %s", ref->fd, strerror(errno));
return false;
}
JsonEncode(ref->json, ref->stream, JSON_DEFAULT);
StreamClose(ref->stream);
if (db->cache)
{
char *key = DbHashKey(ref->name);
if (HashMapGet(db->cache, key))
{
db->cacheSize -= ref->size;
ref->size = DbComputeSize(ref->json);
db->cacheSize += ref->size;
/* If this ref has grown significantly since we last
* computed its size, it may have filled the cache and
* require some items to be evicted. */
DbCacheEvict(db);
destroy = false;
}
else
{
destroy = true;
}
Free(key);
}
else
{
destroy = true;
}
if (destroy)
{
JsonFree(ref->json);
StringArrayFree(ref->name);
Free(ref);
}
pthread_mutex_unlock(&db->lock);
return true;
}
bool
DbExists(Db * db, size_t nArgs,...)
{
va_list ap;
Array *args;
char *file;
bool ret;
va_start(ap, nArgs);
args = ArrayFromVarArgs(nArgs, ap);
va_end(ap);
if (!args)
{
return false;
}
pthread_mutex_lock(&db->lock);
file = DbFileName(db, args);
ret = (UtilLastModified(file) != 0);
pthread_mutex_unlock(&db->lock);
Free(file);
ArrayFree(args);
return ret;
}
Array *
DbList(Db * db, size_t nArgs,...)
{
Array *result;
Array *path;
DIR *files;
struct dirent *file;
char *dir;
va_list ap;
if (!db || !nArgs)
{
return NULL;
}
result = ArrayCreate();
if (!result)
{
return NULL;
}
va_start(ap, nArgs);
path = ArrayFromVarArgs(nArgs, ap);
dir = DbDirName(db, path, 0);
pthread_mutex_lock(&db->lock);
files = opendir(dir);
if (!files)
{
ArrayFree(path);
ArrayFree(result);
Free(dir);
pthread_mutex_unlock(&db->lock);
return NULL;
}
while ((file = readdir(files)))
{
size_t namlen = strlen(file->d_name);
if (namlen > 5)
{
int nameOffset = namlen - 5;
if (StrEquals(file->d_name + nameOffset, ".json"))
{
file->d_name[nameOffset] = '\0';
ArrayAdd(result, StrDuplicate(file->d_name));
}
}
}
closedir(files);
ArrayFree(path);
Free(dir);
pthread_mutex_unlock(&db->lock);
return result;
}
void
DbListFree(Array * arr)
{
StringArrayFree(arr);
}
HashMap *
DbJson(DbRef * ref)
{
return ref ? ref->json : NULL;
}
bool
DbJsonSet(DbRef * ref, HashMap * json)
{
if (!ref || !json)
{
return false;
}
JsonFree(ref->json);
ref->json = JsonDuplicate(json);
return true;
}

576
src/Db/Db.c Normal file
View file

@ -0,0 +1,576 @@
/*
* Copyright (C) 2022-2024 Jordan Bancino <@jordan:bancino.net>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <Db.h>
#include <Memory.h>
#include <Json.h>
#include <Util.h>
#include <Str.h>
#include <Stream.h>
#include <Log.h>
#include <sys/types.h>
#include <dirent.h>
#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include "Db/Internal.h"
void
StringArrayFree(Array * arr)
{
size_t i;
for (i = 0; i < ArraySize(arr); i++)
{
Free(ArrayGet(arr, i));
}
ArrayFree(arr);
}
static ssize_t DbComputeSize(HashMap *);
static ssize_t
DbComputeSizeOfValue(JsonValue * val)
{
MemoryInfo *a;
ssize_t total = 0;
size_t i;
union
{
char *str;
Array *arr;
} u;
if (!val)
{
return -1;
}
a = MemoryInfoGet(val);
if (a)
{
total += MemoryInfoGetSize(a);
}
switch (JsonValueType(val))
{
case JSON_OBJECT:
total += DbComputeSize(JsonValueAsObject(val));
break;
case JSON_ARRAY:
u.arr = JsonValueAsArray(val);
a = MemoryInfoGet(u.arr);
if (a)
{
total += MemoryInfoGetSize(a);
}
for (i = 0; i < ArraySize(u.arr); i++)
{
total += DbComputeSizeOfValue(ArrayGet(u.arr, i));
}
break;
case JSON_STRING:
u.str = JsonValueAsString(val);
a = MemoryInfoGet(u.str);
if (a)
{
total += MemoryInfoGetSize(a);
}
break;
case JSON_NULL:
case JSON_INTEGER:
case JSON_FLOAT:
case JSON_BOOLEAN:
default:
/* These don't use any extra heap space */
break;
}
return total;
}
static ssize_t
DbComputeSize(HashMap * json)
{
char *key;
JsonValue *val;
MemoryInfo *a;
size_t total;
if (!json)
{
return -1;
}
total = 0;
a = MemoryInfoGet(json);
if (a)
{
total += MemoryInfoGetSize(a);
}
while (HashMapIterate(json, &key, (void **) &val))
{
a = MemoryInfoGet(key);
if (a)
{
total += MemoryInfoGetSize(a);
}
total += DbComputeSizeOfValue(val);
}
return total;
}
static char *
DbHashKey(Array * args)
{
size_t i;
char *str = NULL;
for (i = 0; i < ArraySize(args); i++)
{
char *tmp = StrConcat(2, str, ArrayGet(args, i));
Free(str);
str = tmp;
}
return str;
}
static void
DbCacheEvict(Db * db)
{
DbRef *ref = db->leastRecent;
DbRef *tmp;
while (ref && db->cacheSize > db->maxCache)
{
char *hash;
JsonFree(ref->json);
hash = DbHashKey(ref->name);
HashMapDelete(db->cache, hash);
Free(hash);
StringArrayFree(ref->name);
db->cacheSize -= ref->size;
if (ref->next)
{
ref->next->prev = ref->prev;
}
else
{
db->mostRecent = ref->prev;
}
if (ref->prev)
{
ref->prev->next = ref->next;
}
else
{
db->leastRecent = ref->next;
}
tmp = ref->next;
Free(ref);
ref = tmp;
}
}
void
DbMaxCacheSet(Db * db, size_t cache)
{
if (!db)
{
return;
}
pthread_mutex_lock(&db->lock);
db->maxCache = cache;
if (db->maxCache && !db->cache)
{
db->cache = HashMapCreate();
db->cacheSize = 0;
}
DbCacheEvict(db);
pthread_mutex_unlock(&db->lock);
}
void
DbClose(Db * db)
{
if (!db)
{
return;
}
pthread_mutex_lock(&db->lock);
if (db->close)
{
db->close(db);
}
DbMaxCacheSet(db, 0);
DbCacheEvict(db);
HashMapFree(db->cache);
pthread_mutex_unlock(&db->lock);
pthread_mutex_destroy(&db->lock);
Free(db);
}
DbRef *
DbCreate(Db * db, size_t nArgs,...)
{
va_list ap;
Array *args;
DbRef *ret;
if (!db || !db->create)
{
return NULL;
}
va_start(ap, nArgs);
args = ArrayFromVarArgs(nArgs, ap);
va_end(ap);
if (!args)
{
return NULL;
}
ret = db->create(db, args);
ArrayFree(args);
return ret;
}
bool
DbDelete(Db * db, size_t nArgs,...)
{
(void) nArgs;
(void) db;
/* TODO */
/*va_list ap;
Array *args;
char *file;
char *hash;
bool ret = true;
DbRef *ref;
if (!db)
{
return false;
}
va_start(ap, nArgs);
args = ArrayFromVarArgs(nArgs, ap);
va_end(ap);
pthread_mutex_lock(&db->lock);
hash = DbHashKey(args);
file = DbFileName(db, args);
ref = HashMapGet(db->cache, hash);
if (ref)
{
HashMapDelete(db->cache, hash);
JsonFree(ref->json);
StringArrayFree(ref->name);
db->cacheSize -= ref->size;
if (ref->next)
{
ref->next->prev = ref->prev;
}
else
{
db->mostRecent = ref->prev;
}
if (ref->prev)
{
ref->prev->next = ref->next;
}
else
{
db->leastRecent = ref->next;
}
if (!db->leastRecent)
{
db->leastRecent = db->mostRecent;
}
Free(ref);
}
Free(hash);
if (UtilLastModified(file))
{
ret = (remove(file) == 0);
}
pthread_mutex_unlock(&db->lock);
ArrayFree(args);
Free(file);
return ret;*/
return false;
}
DbRef *
DbLock(Db * db, size_t nArgs,...)
{
va_list ap;
Array *args;
DbRef *ret;
va_start(ap, nArgs);
args = ArrayFromVarArgs(nArgs, ap);
va_end(ap);
if (!args || !db->lockFunc)
{
return NULL;
}
ret = db->lockFunc(db, args);
ArrayFree(args);
return ret;
}
bool
DbUnlock(Db * db, DbRef * ref)
{
if (!db || !ref || !db->unlock)
{
return false;
}
return db->unlock(db, ref);
}
bool
DbExists(Db * db, size_t nArgs,...)
{
(void) nArgs;
(void) db;
return false;
/*va_list ap;
Array *args;
char *file;
bool ret;
va_start(ap, nArgs);
args = ArrayFromVarArgs(nArgs, ap);
va_end(ap);
if (!args)
{
return false;
}
pthread_mutex_lock(&db->lock);
file = DbFileName(db, args);
ret = (UtilLastModified(file) != 0);
pthread_mutex_unlock(&db->lock);
Free(file);
ArrayFree(args);
return ret;*/
}
Array *
DbList(Db * db, size_t nArgs,...)
{
(void) db;
(void) nArgs;
/* TODO */
/*Array *result;
Array *path;
DIR *files;
struct dirent *file;
char *dir;
va_list ap;
if (!db || !nArgs)
{
return NULL;
}
result = ArrayCreate();
if (!result)
{
return NULL;
}
va_start(ap, nArgs);
path = ArrayFromVarArgs(nArgs, ap);
dir = DbDirName(db, path, 0);
pthread_mutex_lock(&db->lock);
files = opendir(dir);
if (!files)
{
ArrayFree(path);
ArrayFree(result);
Free(dir);
pthread_mutex_unlock(&db->lock);
return NULL;
}
while ((file = readdir(files)))
{
size_t namlen = strlen(file->d_name);
if (namlen > 5)
{
int nameOffset = namlen - 5;
if (StrEquals(file->d_name + nameOffset, ".json"))
{
file->d_name[nameOffset] = '\0';
ArrayAdd(result, StrDuplicate(file->d_name));
}
}
}
closedir(files);
ArrayFree(path);
Free(dir);
pthread_mutex_unlock(&db->lock);
return result;*/
return NULL;
}
void
DbListFree(Array * arr)
{
StringArrayFree(arr);
}
HashMap *
DbJson(DbRef * ref)
{
return ref ? ref->json : NULL;
}
bool
DbJsonSet(DbRef * ref, HashMap * json)
{
if (!ref || !json)
{
return false;
}
JsonFree(ref->json);
ref->json = JsonDuplicate(json);
return true;
}
void
DbInit(Db *db)
{
pthread_mutexattr_t attr;
if (!db)
{
return;
}
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&db->lock, &attr);
pthread_mutexattr_destroy(&attr);
db->mostRecent = NULL;
db->leastRecent = NULL;
db->cacheSize = 0;
if (db->maxCache)
{
db->cache = HashMapCreate();
}
else
{
db->cache = NULL;
}
}
void
DbRefInit(Db *db, DbRef *ref)
{
if (!db || !ref)
{
return;
}
ref->prev = db->mostRecent;
ref->next = NULL;
ref->json = NULL;
ref->name = NULL;
/* As default values to be overwritten by impls */
ref->ts = UtilTsMillis();
ref->size = 0;
if (db->mostRecent)
{
db->mostRecent->next = ref;
}
if (!db->leastRecent)
{
db->leastRecent = ref;
}
db->mostRecent = ref;
}

274
src/Db/Flat.c Normal file
View file

@ -0,0 +1,274 @@
#include <Db.h>
#include <Memory.h>
#include <Json.h>
#include <Util.h>
#include <Str.h>
#include <Stream.h>
#include <Log.h>
#include <sys/types.h>
#include <dirent.h>
#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include "Db/Internal.h"
typedef struct FlatDb {
Db base;
char *dir;
/* Theres not much to do here. */
} FlatDb;
typedef struct FlatDbRef {
DbRef base;
Stream *stream;
int fd;
} FlatDbRef;
static char
DbSanitiseChar(char input)
{
switch (input)
{
case '/':
return '_';
case '.':
return '-';
}
return input;
}
static char *
DbDirName(FlatDb * db, Array * args, size_t strip)
{
size_t i, j;
char *str = StrConcat(2, db->dir, "/");
for (i = 0; i < ArraySize(args) - strip; i++)
{
char *tmp;
char *sanitise = StrDuplicate(ArrayGet(args, i));
for (j = 0; j < strlen(sanitise); j++)
{
sanitise[j] = DbSanitiseChar(sanitise[j]);
}
tmp = StrConcat(3, str, sanitise, "/");
Free(str);
Free(sanitise);
str = tmp;
}
return str;
}
static char *
DbFileName(FlatDb * db, Array * args)
{
size_t i;
char *str = StrConcat(2, db->dir, "/");
for (i = 0; i < ArraySize(args); i++)
{
char *tmp;
char *arg = StrDuplicate(ArrayGet(args, i));
size_t j = 0;
/* Sanitize name to prevent directory traversal attacks */
while (arg[j])
{
arg[j] = DbSanitiseChar(arg[j]);
j++;
}
tmp = StrConcat(3, str, arg,
(i < ArraySize(args) - 1) ? "/" : ".json");
Free(arg);
Free(str);
str = tmp;
}
return str;
}
static DbRef *
FlatLock(Db *d, Array *dir)
{
FlatDb *db = (FlatDb *) d;
FlatDbRef *ref = NULL;
size_t i;
char *path;
if (!d || !dir)
{
return NULL;
}
pthread_mutex_lock(&d->lock);
path = DbFileName(db, dir);
/* TODO: Caching */
{
int fd = open(path, O_RDWR);
Stream *stream;
struct flock lock;
if (fd == -1)
{
ref = NULL;
goto end;
}
stream = StreamFd(fd);
lock.l_start = 0;
lock.l_len = 0;
lock.l_type = F_WRLCK;
lock.l_whence = SEEK_SET;
if (fcntl(fd, F_SETLK, &lock) < 0)
{
StreamClose(stream);
ref = NULL;
goto end;
}
ref = Malloc(sizeof(*ref));
DbRefInit(d, &(ref->base));
ref->fd = fd;
ref->stream = stream;
ref->base.ts = UtilLastModified(path);
ref->base.json = JsonDecode(stream);
if (!ref->base.json)
{
Free(ref);
StreamClose(stream);
ref = NULL;
goto end;
}
ref->base.name = ArrayCreate();
for (i = 0; i < ArraySize(dir); i++)
{
ArrayAdd(
ref->base.name,
StrDuplicate(ArrayGet(dir, i))
);
}
}
end:
Free(path);
if (!ref)
{
pthread_mutex_unlock(&d->lock);
}
return (DbRef *) ref;
}
static bool
FlatUnlock(Db *d, DbRef *r)
{
FlatDbRef *ref = (FlatDbRef *) r;
if (!d || !r)
{
return false;
}
lseek(ref->fd, 0L, SEEK_SET);
if (ftruncate(ref->fd, 0) < 0)
{
pthread_mutex_unlock(&d->lock);
Log(LOG_ERR, "Failed to truncate file on disk.");
Log(LOG_ERR, "Error on fd %d: %s", ref->fd, strerror(errno));
return false;
}
JsonEncode(ref->base.json, ref->stream, JSON_DEFAULT);
StreamClose(ref->stream);
JsonFree(ref->base.json);
StringArrayFree(ref->base.name);
Free(ref);
pthread_mutex_unlock(&d->lock);
return true;
}
static DbRef *
FlatCreate(Db *d, Array *dir)
{
FlatDb *db = (FlatDb *) d;
char *path, *dirPath;
Stream *fp;
DbRef *ret;
if (!d || !dir)
{
return NULL;
}
pthread_mutex_lock(&d->lock);
path = DbFileName(db, dir);
if (UtilLastModified(path))
{
Free(path);
pthread_mutex_unlock(&d->lock);
return NULL;
}
dirPath = DbDirName(db, dir, 1);
if (UtilMkdir(dirPath, 0750) < 0)
{
Free(path);
Free(dirPath);
pthread_mutex_unlock(&d->lock);
return NULL;
}
Free(dirPath);
fp = StreamOpen(path, "w");
Free(path);
if (!fp)
{
pthread_mutex_unlock(&d->lock);
return NULL;
}
StreamPuts(fp, "{}");
StreamClose(fp);
/* FlatLock() will lock again for us */
pthread_mutex_unlock(&d->lock);
ret = FlatLock(d, dir);
return ret;
}
Db *
DbOpen(char *dir, size_t cache)
{
FlatDb *db;
if (!dir)
{
return NULL;
}
db = Malloc(sizeof(*db));
DbInit(&(db->base));
db->dir = dir;
db->base.cacheSize = cache;
db->base.lockFunc = FlatLock;
db->base.unlock = FlatUnlock;
db->base.create = FlatCreate;
db->base.close = NULL;
return (Db *) db;
}

98
src/Db/Internal.h Normal file
View file

@ -0,0 +1,98 @@
/*
* Copyright (C) 2022-2024 Jordan Bancino <@jordan:bancino.net>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef CYTOPLASM_DB_INTERNAL_H
#define CYTOPLASM_DB_INTERNAL_H
#include <HashMap.h>
#include <Db.h>
#include <pthread.h>
/* TODO: Define the base structures to define a database internally.
* All "implementations" shall have them as a first element, so that
* basic, general functions can work on them properly. */
/* The base structure of a database */
struct Db
{
pthread_mutex_t lock;
size_t cacheSize;
size_t maxCache;
HashMap *cache;
/*
* The cache uses a double linked list (see DbRef
* below) to know which objects are most and least
* recently used. The following diagram helps me
* know what way all the pointers go, because it
* can get very confusing sometimes. For example,
* there's nothing stopping "next" from pointing to
* least recent, and "prev" from pointing to most
* recent, so hopefully this clarifies the pointer
* terminology used when dealing with the linked
* list:
*
* mostRecent leastRecent
* | prev prev | prev
* +---+ ---> +---+ ---> +---+ ---> NULL
* |ref| |ref| |ref|
* NULL <--- +---+ <--- +---+ <--- +---+
* next next next
*/
DbRef *mostRecent;
DbRef *leastRecent;
/* TODO: Functions for implementation-specific operations
* (opening a ref, closing a db, removing an entry, ...) */
DbRef * (*lockFunc)(Db *, Array *);
DbRef * (*create)(Db *, Array *);
bool (*unlock)(Db *, DbRef *);
void (*close)(Db *);
/* Implementation-specific constructs */
};
struct DbRef
{
HashMap *json;
uint64_t ts;
size_t size;
Array *name;
DbRef *prev;
DbRef *next;
/* TODO: Functions for implementation-specific operations */
/* Implementation-specific constructs */
};
extern void DbInit(Db *);
extern void DbRefInit(Db *, DbRef *);
extern void StringArrayFree(Array *);
#endif

39
src/Db/LMDB.c Normal file
View file

@ -0,0 +1,39 @@
#include <Memory.h>
#include <Db.h>
#include "Db/Internal.h"
#ifdef EDB_LMDB
typedef struct LMDB {
Db base; /* The base implementation required to pass */
} LMDB;
Db *
DbOpenLMDB(char *dir, size_t size)
{
/* TODO */
LMDB *db;
if (!dir || !size)
{
return NULL;
}
db = Malloc(sizeof(*db));
DbInit(db);
(void) size;
(void) dir;
return db;
}
#else
Db *
DbOpenLMDB(char *dir, size_t size)
{
/* Unimplemented function */
(void) size;
(void) dir;
return NULL;
}
#endif