Use std::unordered_map and std::mutex instead for object mapping and related cleanups

pull/43/head
jackun 4 years ago
parent 54950853b5
commit b91be85ff9
No known key found for this signature in database
GPG Key ID: 119DB3F1D05A9ED3

@ -208,11 +208,8 @@ vk_enum_to_str = custom_target(
)
util_files = files(
'src/mesa/util/hash_table.c',
'src/mesa/util/os_socket.c',
'src/mesa/util/os_time.c',
'src/mesa/util/ralloc.c',
'src/mesa/main/hash.c',
)
subdir('modules/ImGui')

@ -1,73 +0,0 @@
/*
* C11 <threads.h> emulation library
*
* (C) Copyright yohhoy 2012.
* Distributed under the Boost Software License, Version 1.0.
*
* Permission is hereby granted, free of charge, to any person or organization
* obtaining a copy of the software and accompanying documentation covered by
* this license (the "Software") to use, reproduce, display, distribute,
* execute, and transmit the Software, and to prepare [[derivative work]]s of the
* Software, and to permit third-parties to whom the Software is furnished to
* do so, all subject to the following:
*
* The copyright notices in the Software and this entire statement, including
* the above license grant, this restriction and the following disclaimer,
* must be included in all copies of the Software, in whole or in part, and
* all derivative works of the Software, unless such copies or derivative
* works are solely in the form of machine-executable object code generated by
* a source language processor.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
* SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
* FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef EMULATED_THREADS_H_INCLUDED_
#define EMULATED_THREADS_H_INCLUDED_
#include <time.h>
#ifndef TIME_UTC
#define TIME_UTC 1
#endif
#include "../c99_compat.h" /* for `inline` */
/*---------------------------- types ----------------------------*/
typedef void (*tss_dtor_t)(void*);
typedef int (*thrd_start_t)(void*);
/*-------------------- enumeration constants --------------------*/
enum {
mtx_plain = 0,
mtx_try = 1,
mtx_timed = 2,
mtx_recursive = 4
};
enum {
thrd_success = 0, // succeeded
thrd_timeout, // timeout
thrd_error, // failed
thrd_busy, // resource busy
thrd_nomem // out of memory
};
/*-------------------------- functions --------------------------*/
#if defined(_WIN32) && !defined(__CYGWIN__)
#include "threads_win32.h"
#elif defined(HAVE_PTHREAD)
#include "threads_posix.h"
#else
#error Not supported on this platform.
#endif
#endif /* EMULATED_THREADS_H_INCLUDED_ */

@ -1,396 +0,0 @@
/*
* C11 <threads.h> emulation library
*
* (C) Copyright yohhoy 2012.
* Distributed under the Boost Software License, Version 1.0.
*
* Permission is hereby granted, free of charge, to any person or organization
* obtaining a copy of the software and accompanying documentation covered by
* this license (the "Software") to use, reproduce, display, distribute,
* execute, and transmit the Software, and to prepare [[derivative work]]s of the
* Software, and to permit third-parties to whom the Software is furnished to
* do so, all subject to the following:
*
* The copyright notices in the Software and this entire statement, including
* the above license grant, this restriction and the following disclaimer,
* must be included in all copies of the Software, in whole or in part, and
* all derivative works of the Software, unless such copies or derivative
* works are solely in the form of machine-executable object code generated by
* a source language processor.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
* SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
* FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdlib.h>
#ifndef assert
#include <assert.h>
#endif
#include <limits.h>
#include <errno.h>
#include <unistd.h>
#include <sched.h>
#include <stdint.h> /* for intptr_t */
/*
Configuration macro:
EMULATED_THREADS_USE_NATIVE_TIMEDLOCK
Use pthread_mutex_timedlock() for `mtx_timedlock()'
Otherwise use mtx_trylock() + *busy loop* emulation.
*/
#if !defined(__CYGWIN__) && !defined(__APPLE__) && !defined(__NetBSD__)
#define EMULATED_THREADS_USE_NATIVE_TIMEDLOCK
#endif
#include <pthread.h>
/*---------------------------- macros ----------------------------*/
#define ONCE_FLAG_INIT PTHREAD_ONCE_INIT
#ifdef INIT_ONCE_STATIC_INIT
#define TSS_DTOR_ITERATIONS PTHREAD_DESTRUCTOR_ITERATIONS
#else
#define TSS_DTOR_ITERATIONS 1 // assume TSS dtor MAY be called at least once.
#endif
// FIXME: temporary non-standard hack to ease transition
#define _MTX_INITIALIZER_NP PTHREAD_MUTEX_INITIALIZER
/*---------------------------- types ----------------------------*/
typedef pthread_cond_t cnd_t;
typedef pthread_t thrd_t;
typedef pthread_key_t tss_t;
typedef pthread_mutex_t mtx_t;
typedef pthread_once_t once_flag;
/*
Implementation limits:
- Conditionally emulation for "mutex with timeout"
(see EMULATED_THREADS_USE_NATIVE_TIMEDLOCK macro)
*/
struct impl_thrd_param {
thrd_start_t func;
void *arg;
};
static inline void *
impl_thrd_routine(void *p)
{
struct impl_thrd_param pack = *((struct impl_thrd_param *)p);
free(p);
return (void*)(intptr_t)pack.func(pack.arg);
}
/*--------------- 7.25.2 Initialization functions ---------------*/
// 7.25.2.1
static inline void
call_once(once_flag *flag, void (*func)(void))
{
pthread_once(flag, func);
}
/*------------- 7.25.3 Condition variable functions -------------*/
// 7.25.3.1
static inline int
cnd_broadcast(cnd_t *cond)
{
assert(cond != NULL);
return (pthread_cond_broadcast(cond) == 0) ? thrd_success : thrd_error;
}
// 7.25.3.2
static inline void
cnd_destroy(cnd_t *cond)
{
assert(cond);
pthread_cond_destroy(cond);
}
// 7.25.3.3
static inline int
cnd_init(cnd_t *cond)
{
assert(cond != NULL);
return (pthread_cond_init(cond, NULL) == 0) ? thrd_success : thrd_error;
}
// 7.25.3.4
static inline int
cnd_signal(cnd_t *cond)
{
assert(cond != NULL);
return (pthread_cond_signal(cond) == 0) ? thrd_success : thrd_error;
}
// 7.25.3.5
static inline int
cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *abs_time)
{
int rt;
assert(mtx != NULL);
assert(cond != NULL);
assert(abs_time != NULL);
rt = pthread_cond_timedwait(cond, mtx, abs_time);
if (rt == ETIMEDOUT)
return thrd_busy;
return (rt == 0) ? thrd_success : thrd_error;
}
// 7.25.3.6
static inline int
cnd_wait(cnd_t *cond, mtx_t *mtx)
{
assert(mtx != NULL);
assert(cond != NULL);
return (pthread_cond_wait(cond, mtx) == 0) ? thrd_success : thrd_error;
}
/*-------------------- 7.25.4 Mutex functions --------------------*/
// 7.25.4.1
static inline void
mtx_destroy(mtx_t *mtx)
{
assert(mtx != NULL);
pthread_mutex_destroy(mtx);
}
/*
* XXX: Workaround when building with -O0 and without pthreads link.
*
* In such cases constant folding and dead code elimination won't be
* available, thus the compiler will always add the pthread_mutexattr*
* functions into the binary. As we try to link, we'll fail as the
* symbols are unresolved.
*
* Ideally we'll enable the optimisations locally, yet that does not
* seem to work.
*
* So the alternative workaround is to annotate the symbols as weak.
* Thus the linker will be happy and things don't clash when building
* with -O1 or greater.
*/
#if defined(HAVE_FUNC_ATTRIBUTE_WEAK) && !defined(__CYGWIN__)
__attribute__((weak))
int pthread_mutexattr_init(pthread_mutexattr_t *attr);
__attribute__((weak))
int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type);
__attribute__((weak))
int pthread_mutexattr_destroy(pthread_mutexattr_t *attr);
#endif
// 7.25.4.2
static inline int
mtx_init(mtx_t *mtx, int type)
{
pthread_mutexattr_t attr;
assert(mtx != NULL);
if (type != mtx_plain && type != mtx_timed && type != mtx_try
&& type != (mtx_plain|mtx_recursive)
&& type != (mtx_timed|mtx_recursive)
&& type != (mtx_try|mtx_recursive))
return thrd_error;
if ((type & mtx_recursive) == 0) {
pthread_mutex_init(mtx, NULL);
return thrd_success;
}
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(mtx, &attr);
pthread_mutexattr_destroy(&attr);
return thrd_success;
}
// 7.25.4.3
static inline int
mtx_lock(mtx_t *mtx)
{
assert(mtx != NULL);
return (pthread_mutex_lock(mtx) == 0) ? thrd_success : thrd_error;
}
static inline int
mtx_trylock(mtx_t *mtx);
static inline void
thrd_yield(void);
// 7.25.4.4
static inline int
mtx_timedlock(mtx_t *mtx, const struct timespec *ts)
{
assert(mtx != NULL);
assert(ts != NULL);
{
#ifdef EMULATED_THREADS_USE_NATIVE_TIMEDLOCK
int rt;
rt = pthread_mutex_timedlock(mtx, ts);
if (rt == 0)
return thrd_success;
return (rt == ETIMEDOUT) ? thrd_busy : thrd_error;
#else
time_t expire = time(NULL);
expire += ts->tv_sec;
while (mtx_trylock(mtx) != thrd_success) {
time_t now = time(NULL);
if (expire < now)
return thrd_busy;
// busy loop!
thrd_yield();
}
return thrd_success;
#endif
}
}
// 7.25.4.5
static inline int
mtx_trylock(mtx_t *mtx)
{
assert(mtx != NULL);
return (pthread_mutex_trylock(mtx) == 0) ? thrd_success : thrd_busy;
}
// 7.25.4.6
static inline int
mtx_unlock(mtx_t *mtx)
{
assert(mtx != NULL);
return (pthread_mutex_unlock(mtx) == 0) ? thrd_success : thrd_error;
}
/*------------------- 7.25.5 Thread functions -------------------*/
// 7.25.5.1
static inline int
thrd_create(thrd_t *thr, thrd_start_t func, void *arg)
{
struct impl_thrd_param *pack;
assert(thr != NULL);
pack = (struct impl_thrd_param *)malloc(sizeof(struct impl_thrd_param));
if (!pack) return thrd_nomem;
pack->func = func;
pack->arg = arg;
if (pthread_create(thr, NULL, impl_thrd_routine, pack) != 0) {
free(pack);
return thrd_error;
}
return thrd_success;
}
// 7.25.5.2
static inline thrd_t
thrd_current(void)
{
return pthread_self();
}
// 7.25.5.3
static inline int
thrd_detach(thrd_t thr)
{
return (pthread_detach(thr) == 0) ? thrd_success : thrd_error;
}
// 7.25.5.4
static inline int
thrd_equal(thrd_t thr0, thrd_t thr1)
{
return pthread_equal(thr0, thr1);
}
// 7.25.5.5
static inline void
thrd_exit(int res)
{
pthread_exit((void*)(intptr_t)res);
}
// 7.25.5.6
static inline int
thrd_join(thrd_t thr, int *res)
{
void *code;
if (pthread_join(thr, &code) != 0)
return thrd_error;
if (res)
*res = (int)(intptr_t)code;
return thrd_success;
}
// 7.25.5.7
static inline void
thrd_sleep(const struct timespec *time_point, struct timespec *remaining)
{
assert(time_point != NULL);
nanosleep(time_point, remaining);
}
// 7.25.5.8
static inline void
thrd_yield(void)
{
sched_yield();
}
/*----------- 7.25.6 Thread-specific storage functions -----------*/
// 7.25.6.1
static inline int
tss_create(tss_t *key, tss_dtor_t dtor)
{
assert(key != NULL);
return (pthread_key_create(key, dtor) == 0) ? thrd_success : thrd_error;
}
// 7.25.6.2
static inline void
tss_delete(tss_t key)
{
pthread_key_delete(key);
}
// 7.25.6.3
static inline void *
tss_get(tss_t key)
{
return pthread_getspecific(key);
}
// 7.25.6.4
static inline int
tss_set(tss_t key, void *val)
{
return (pthread_setspecific(key, val) == 0) ? thrd_success : thrd_error;
}
/*-------------------- 7.25.7 Time functions --------------------*/
// 7.25.6.1
#ifndef HAVE_TIMESPEC_GET
static inline int
timespec_get(struct timespec *ts, int base)
{
if (!ts) return 0;
if (base == TIME_UTC) {
clock_gettime(CLOCK_REALTIME, ts);
return base;
}
return 0;
}
#endif

@ -1,653 +0,0 @@
/*
* C11 <threads.h> emulation library
*
* (C) Copyright yohhoy 2012.
* Distributed under the Boost Software License, Version 1.0.
*
* Permission is hereby granted, free of charge, to any person or organization
* obtaining a copy of the software and accompanying documentation covered by
* this license (the "Software") to use, reproduce, display, distribute,
* execute, and transmit the Software, and to prepare [[derivative work]]s of the
* Software, and to permit third-parties to whom the Software is furnished to
* do so, all subject to the following:
*
* The copyright notices in the Software and this entire statement, including
* the above license grant, this restriction and the following disclaimer,
* must be included in all copies of the Software, in whole or in part, and
* all derivative works of the Software, unless such copies or derivative
* works are solely in the form of machine-executable object code generated by
* a source language processor.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
* SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
* FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef assert
#include <assert.h>
#endif
#include <limits.h>
#include <errno.h>
#include <process.h> // MSVCRT
#include <stdlib.h>
/*
Configuration macro:
EMULATED_THREADS_USE_NATIVE_CALL_ONCE
Use native WindowsAPI one-time initialization function.
(requires WinVista or later)
Otherwise emulate by mtx_trylock() + *busy loop* for WinXP.
EMULATED_THREADS_USE_NATIVE_CV
Use native WindowsAPI condition variable object.
(requires WinVista or later)
Otherwise use emulated implementation for WinXP.
EMULATED_THREADS_TSS_DTOR_SLOTNUM
Max registerable TSS dtor number.
*/
// XXX: Retain XP compatability
#if 0
#if _WIN32_WINNT >= 0x0600
// Prefer native WindowsAPI on newer environment.
#if !defined(__MINGW32__)
#define EMULATED_THREADS_USE_NATIVE_CALL_ONCE
#endif
#define EMULATED_THREADS_USE_NATIVE_CV
#endif
#endif
#define EMULATED_THREADS_TSS_DTOR_SLOTNUM 64 // see TLS_MINIMUM_AVAILABLE
#include <windows.h>
// check configuration
#if defined(EMULATED_THREADS_USE_NATIVE_CALL_ONCE) && (_WIN32_WINNT < 0x0600)
#error EMULATED_THREADS_USE_NATIVE_CALL_ONCE requires _WIN32_WINNT>=0x0600
#endif
#if defined(EMULATED_THREADS_USE_NATIVE_CV) && (_WIN32_WINNT < 0x0600)
#error EMULATED_THREADS_USE_NATIVE_CV requires _WIN32_WINNT>=0x0600
#endif
/* Visual Studio 2015 and later */
#ifdef _MSC_VER
#define HAVE_TIMESPEC_GET
#endif
/*---------------------------- macros ----------------------------*/
#ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
#define ONCE_FLAG_INIT INIT_ONCE_STATIC_INIT
#else
#define ONCE_FLAG_INIT {0}
#endif
#define TSS_DTOR_ITERATIONS 1
// FIXME: temporary non-standard hack to ease transition
#define _MTX_INITIALIZER_NP {(PCRITICAL_SECTION_DEBUG)-1, -1, 0, 0, 0, 0}
/*---------------------------- types ----------------------------*/
typedef struct cnd_t {
#ifdef EMULATED_THREADS_USE_NATIVE_CV
CONDITION_VARIABLE condvar;
#else
int blocked;
int gone;
int to_unblock;
HANDLE sem_queue;
HANDLE sem_gate;
CRITICAL_SECTION monitor;
#endif
} cnd_t;
typedef HANDLE thrd_t;
typedef DWORD tss_t;
typedef CRITICAL_SECTION mtx_t;
#ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
typedef INIT_ONCE once_flag;
#else
typedef struct once_flag_t {
volatile LONG status;
} once_flag;
#endif
static inline void * tss_get(tss_t key);
static inline void thrd_yield(void);
static inline int mtx_trylock(mtx_t *mtx);
static inline int mtx_lock(mtx_t *mtx);
static inline int mtx_unlock(mtx_t *mtx);
/*
Implementation limits:
- Conditionally emulation for "Initialization functions"
(see EMULATED_THREADS_USE_NATIVE_CALL_ONCE macro)
- Emulated `mtx_timelock()' with mtx_trylock() + *busy loop*
*/
static void impl_tss_dtor_invoke(void); // forward decl.
struct impl_thrd_param {
thrd_start_t func;
void *arg;
};
static unsigned __stdcall impl_thrd_routine(void *p)
{
struct impl_thrd_param pack;
int code;
memcpy(&pack, p, sizeof(struct impl_thrd_param));
free(p);
code = pack.func(pack.arg);
impl_tss_dtor_invoke();
return (unsigned)code;
}
static DWORD impl_timespec2msec(const struct timespec *ts)
{
return (DWORD)((ts->tv_sec * 1000U) + (ts->tv_nsec / 1000000L));
}
#ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
struct impl_call_once_param { void (*func)(void); };
static BOOL CALLBACK impl_call_once_callback(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *Context)
{
struct impl_call_once_param *param = (struct impl_call_once_param*)Parameter;
(param->func)();
((void)InitOnce); ((void)Context); // suppress warning
return TRUE;
}
#endif // ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
#ifndef EMULATED_THREADS_USE_NATIVE_CV
/*
Note:
The implementation of condition variable is ported from Boost.Interprocess
See http://www.boost.org/boost/interprocess/sync/windows/condition.hpp
*/
static void impl_cond_do_signal(cnd_t *cond, int broadcast)
{
int nsignal = 0;
EnterCriticalSection(&cond->monitor);
if (cond->to_unblock != 0) {
if (cond->blocked == 0) {
LeaveCriticalSection(&cond->monitor);
return;
}
if (broadcast) {
cond->to_unblock += nsignal = cond->blocked;
cond->blocked = 0;
} else {
nsignal = 1;
cond->to_unblock++;
cond->blocked--;
}
} else if (cond->blocked > cond->gone) {
WaitForSingleObject(cond->sem_gate, INFINITE);
if (cond->gone != 0) {
cond->blocked -= cond->gone;
cond->gone = 0;
}
if (broadcast) {
nsignal = cond->to_unblock = cond->blocked;
cond->blocked = 0;
} else {
nsignal = cond->to_unblock = 1;
cond->blocked--;
}
}
LeaveCriticalSection(&cond->monitor);
if (0 < nsignal)
ReleaseSemaphore(cond->sem_queue, nsignal, NULL);
}
static int impl_cond_do_wait(cnd_t *cond, mtx_t *mtx, const struct timespec *ts)
{
int nleft = 0;
int ngone = 0;
int timeout = 0;
DWORD w;
WaitForSingleObject(cond->sem_gate, INFINITE);
cond->blocked++;
ReleaseSemaphore(cond->sem_gate, 1, NULL);
mtx_unlock(mtx);
w = WaitForSingleObject(cond->sem_queue, ts ? impl_timespec2msec(ts) : INFINITE);
timeout = (w == WAIT_TIMEOUT);
EnterCriticalSection(&cond->monitor);
if ((nleft = cond->to_unblock) != 0) {
if (timeout) {
if (cond->blocked != 0) {
cond->blocked--;
} else {
cond->gone++;
}
}
if (--cond->to_unblock == 0) {
if (cond->blocked != 0) {
ReleaseSemaphore(cond->sem_gate, 1, NULL);
nleft = 0;
}
else if ((ngone = cond->gone) != 0) {
cond->gone = 0;
}
}
} else if (++cond->gone == INT_MAX/2) {
WaitForSingleObject(cond->sem_gate, INFINITE);
cond->blocked -= cond->gone;
ReleaseSemaphore(cond->sem_gate, 1, NULL);
cond->gone = 0;
}
LeaveCriticalSection(&cond->monitor);
if (nleft == 1) {
while (ngone--)
WaitForSingleObject(cond->sem_queue, INFINITE);
ReleaseSemaphore(cond->sem_gate, 1, NULL);
}
mtx_lock(mtx);
return timeout ? thrd_busy : thrd_success;
}
#endif // ifndef EMULATED_THREADS_USE_NATIVE_CV
static struct impl_tss_dtor_entry {
tss_t key;
tss_dtor_t dtor;
} impl_tss_dtor_tbl[EMULATED_THREADS_TSS_DTOR_SLOTNUM];
static int impl_tss_dtor_register(tss_t key, tss_dtor_t dtor)
{
int i;
for (i = 0; i < EMULATED_THREADS_TSS_DTOR_SLOTNUM; i++) {
if (!impl_tss_dtor_tbl[i].dtor)
break;
}
if (i == EMULATED_THREADS_TSS_DTOR_SLOTNUM)
return 1;
impl_tss_dtor_tbl[i].key = key;
impl_tss_dtor_tbl[i].dtor = dtor;
return 0;
}
static void impl_tss_dtor_invoke()
{
int i;
for (i = 0; i < EMULATED_THREADS_TSS_DTOR_SLOTNUM; i++) {
if (impl_tss_dtor_tbl[i].dtor) {
void* val = tss_get(impl_tss_dtor_tbl[i].key);
if (val)
(impl_tss_dtor_tbl[i].dtor)(val);
}
}
}
/*--------------- 7.25.2 Initialization functions ---------------*/
// 7.25.2.1
static inline void
call_once(once_flag *flag, void (*func)(void))
{
assert(flag && func);
#ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
{
struct impl_call_once_param param;
param.func = func;
InitOnceExecuteOnce(flag, impl_call_once_callback, (PVOID)&param, NULL);
}
#else
if (InterlockedCompareExchange(&flag->status, 1, 0) == 0) {
(func)();
InterlockedExchange(&flag->status, 2);
} else {
while (flag->status == 1) {
// busy loop!
thrd_yield();
}
}
#endif
}
/*------------- 7.25.3 Condition variable functions -------------*/
// 7.25.3.1
static inline int
cnd_broadcast(cnd_t *cond)
{
if (!cond) return thrd_error;
#ifdef EMULATED_THREADS_USE_NATIVE_CV
WakeAllConditionVariable(&cond->condvar);
#else
impl_cond_do_signal(cond, 1);
#endif
return thrd_success;
}
// 7.25.3.2
static inline void
cnd_destroy(cnd_t *cond)
{
assert(cond);
#ifdef EMULATED_THREADS_USE_NATIVE_CV
// do nothing
#else
CloseHandle(cond->sem_queue);
CloseHandle(cond->sem_gate);
DeleteCriticalSection(&cond->monitor);
#endif
}
// 7.25.3.3
static inline int
cnd_init(cnd_t *cond)
{
if (!cond) return thrd_error;
#ifdef EMULATED_THREADS_USE_NATIVE_CV
InitializeConditionVariable(&cond->condvar);
#else
cond->blocked = 0;
cond->gone = 0;
cond->to_unblock = 0;
cond->sem_queue = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
cond->sem_gate = CreateSemaphore(NULL, 1, 1, NULL);
InitializeCriticalSection(&cond->monitor);
#endif
return thrd_success;
}
// 7.25.3.4
static inline int
cnd_signal(cnd_t *cond)
{
if (!cond) return thrd_error;
#ifdef EMULATED_THREADS_USE_NATIVE_CV
WakeConditionVariable(&cond->condvar);
#else
impl_cond_do_signal(cond, 0);
#endif
return thrd_success;
}
// 7.25.3.5
static inline int
cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *abs_time)
{
if (!cond || !mtx || !abs_time) return thrd_error;
#ifdef EMULATED_THREADS_USE_NATIVE_CV
if (SleepConditionVariableCS(&cond->condvar, mtx, impl_timespec2msec(abs_time)))
return thrd_success;
return (GetLastError() == ERROR_TIMEOUT) ? thrd_busy : thrd_error;
#else
return impl_cond_do_wait(cond, mtx, abs_time);
#endif
}
// 7.25.3.6
static inline int
cnd_wait(cnd_t *cond, mtx_t *mtx)
{
if (!cond || !mtx) return thrd_error;
#ifdef EMULATED_THREADS_USE_NATIVE_CV
SleepConditionVariableCS(&cond->condvar, mtx, INFINITE);
#else
impl_cond_do_wait(cond, mtx, NULL);
#endif
return thrd_success;
}
/*-------------------- 7.25.4 Mutex functions --------------------*/
// 7.25.4.1
static inline void
mtx_destroy(mtx_t *mtx)
{
assert(mtx);
DeleteCriticalSection(mtx);
}
// 7.25.4.2
static inline int
mtx_init(mtx_t *mtx, int type)
{
if (!mtx) return thrd_error;
if (type != mtx_plain && type != mtx_timed && type != mtx_try
&& type != (mtx_plain|mtx_recursive)
&& type != (mtx_timed|mtx_recursive)
&& type != (mtx_try|mtx_recursive))
return thrd_error;
InitializeCriticalSection(mtx);
return thrd_success;
}
// 7.25.4.3
static inline int
mtx_lock(mtx_t *mtx)
{
if (!mtx) return thrd_error;
EnterCriticalSection(mtx);
return thrd_success;
}
// 7.25.4.4
static inline int
mtx_timedlock(mtx_t *mtx, const struct timespec *ts)
{
time_t expire, now;
if (!mtx || !ts) return thrd_error;
expire = time(NULL);
expire += ts->tv_sec;
while (mtx_trylock(mtx) != thrd_success) {
now = time(NULL);
if (expire < now)
return thrd_busy;
// busy loop!
thrd_yield();
}
return thrd_success;
}
// 7.25.4.5
static inline int
mtx_trylock(mtx_t *mtx)
{
if (!mtx) return thrd_error;
return TryEnterCriticalSection(mtx) ? thrd_success : thrd_busy;
}
// 7.25.4.6
static inline int
mtx_unlock(mtx_t *mtx)
{
if (!mtx) return thrd_error;
LeaveCriticalSection(mtx);
return thrd_success;
}
/*------------------- 7.25.5 Thread functions -------------------*/
// 7.25.5.1
static inline int
thrd_create(thrd_t *thr, thrd_start_t func, void *arg)
{
struct impl_thrd_param *pack;
uintptr_t handle;
if (!thr) return thrd_error;
pack = (struct impl_thrd_param *)malloc(sizeof(struct impl_thrd_param));
if (!pack) return thrd_nomem;
pack->func = func;
pack->arg = arg;
handle = _beginthreadex(NULL, 0, impl_thrd_routine, pack, 0, NULL);
if (handle == 0) {
if (errno == EAGAIN || errno == EACCES)
return thrd_nomem;
return thrd_error;
}
*thr = (thrd_t)handle;
return thrd_success;
}
#if 0
// 7.25.5.2
static inline thrd_t
thrd_current(void)
{
HANDLE hCurrentThread;
BOOL bRet;
/* GetCurrentThread() returns a pseudo-handle, which we need
* to pass to DuplicateHandle(). Only the resulting handle can be used
* from other threads.
*
* Note that neither handle can be compared to the one by thread_create.
* Only the thread IDs - as returned by GetThreadId() and GetCurrentThreadId()
* can be compared directly.
*
* Other potential solutions would be:
* - define thrd_t as a thread Ids, but this would mean we'd need to OpenThread for many operations
* - use malloc'ed memory for thrd_t. This would imply using TLS for current thread.
*
* Neither is particularly nice.
*
* Life would be much easier if C11 threads had different abstractions for
* threads and thread IDs, just like C++11 threads does...
*/
bRet = DuplicateHandle(GetCurrentProcess(), // source process (pseudo) handle
GetCurrentThread(), // source (pseudo) handle
GetCurrentProcess(), // target process
&hCurrentThread, // target handle
0,
FALSE,
DUPLICATE_SAME_ACCESS);
assert(bRet);
if (!bRet) {
hCurrentThread = GetCurrentThread();
}
return hCurrentThread;
}
#endif
// 7.25.5.3
static inline int
thrd_detach(thrd_t thr)
{
CloseHandle(thr);
return thrd_success;
}
// 7.25.5.4
static inline int
thrd_equal(thrd_t thr0, thrd_t thr1)
{
return GetThreadId(thr0) == GetThreadId(thr1);
}
// 7.25.5.5
static inline void
thrd_exit(int res)
{
impl_tss_dtor_invoke();
_endthreadex((unsigned)res);
}
// 7.25.5.6
static inline int
thrd_join(thrd_t thr, int *res)
{
DWORD w, code;
w = WaitForSingleObject(thr, INFINITE);
if (w != WAIT_OBJECT_0)
return thrd_error;
if (res) {
if (!GetExitCodeThread(thr, &code)) {
CloseHandle(thr);
return thrd_error;
}
*res = (int)code;
}
CloseHandle(thr);
return thrd_success;
}
// 7.25.5.7
static inline void
thrd_sleep(const struct timespec *time_point, struct timespec *remaining)
{
assert(time_point);
assert(!remaining); /* not implemented */
Sleep(impl_timespec2msec(time_point));
}
// 7.25.5.8
static inline void
thrd_yield(void)
{
SwitchToThread();
}
/*----------- 7.25.6 Thread-specific storage functions -----------*/
// 7.25.6.1
static inline int
tss_create(tss_t *key, tss_dtor_t dtor)
{
if (!key) return thrd_error;
*key = TlsAlloc();
if (dtor) {
if (impl_tss_dtor_register(*key, dtor)) {
TlsFree(*key);
return thrd_error;
}
}
return (*key != 0xFFFFFFFF) ? thrd_success : thrd_error;
}
// 7.25.6.2
static inline void
tss_delete(tss_t key)
{
TlsFree(key);
}
// 7.25.6.3
static inline void *
tss_get(tss_t key)
{
return TlsGetValue(key);
}
// 7.25.6.4
static inline int
tss_set(tss_t key, void *val)
{
return TlsSetValue(key, val) ? thrd_success : thrd_error;
}
/*-------------------- 7.25.7 Time functions --------------------*/
// 7.25.6.1
#ifndef HAVE_TIMESPEC_GET
static inline int
timespec_get(struct timespec *ts, int base)
{
if (!ts) return 0;
if (base == TIME_UTC) {
ts->tv_sec = time(NULL);
ts->tv_nsec = 0;
return base;
}
return 0;
}
#endif

@ -1,74 +0,0 @@
/*
* Copyright © 2010 Valve Software
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <stdint.h>
/*
* Code for fast 32-bit unsigned remainder, based off of "Faster Remainder by
* Direct Computation: Applications to Compilers and Software Libraries,"
* available at https://arxiv.org/pdf/1902.01961.pdf.
*
* util_fast_urem32(n, d, REMAINDER_MAGIC(d)) returns the same thing as
* n % d for any unsigned n and d, however it compiles down to only a few
* multiplications, so it should be faster than plain uint32_t modulo if the
* same divisor is used many times.
*/
#define REMAINDER_MAGIC(divisor) \
((uint64_t) ~0ull / (divisor) + 1)
/*
* Get bits 64-96 of a 32x64-bit multiply. If __int128_t is available, we use
* it, which usually compiles down to one instruction on 64-bit architectures.
* Otherwise on 32-bit architectures we usually get four instructions (one
* 32x32->64 multiply, one 32x32->32 multiply, and one 64-bit add).
*/
static inline uint32_t
_mul32by64_hi(uint32_t a, uint64_t b)
{
#ifdef HAVE_UINT128
return ((__uint128_t) b * a) >> 64;
#else
/*
* Let b = b0 + 2^32 * b1. Then a * b = a * b0 + 2^32 * a * b1. We would
* have to do a 96-bit addition to get the full result, except that only
* one term has non-zero lower 32 bits, which means that to get the high 32
* bits, we only have to add the high 64 bits of each term. Unfortunately,
* we have to do the 64-bit addition in case the low 32 bits overflow.
*/
uint32_t b0 = (uint32_t) b;
uint32_t b1 = b >> 32;
return ((((uint64_t) a * b0) >> 32) + (uint64_t) a * b1) >> 32;
#endif
}
static inline uint32_t
util_fast_urem32(uint32_t n, uint32_t d, uint64_t magic)
{
uint64_t lowbits = magic * n;
uint32_t result = _mul32by64_hi(d, lowbits);
assert(result == n % d);
return result;
}

@ -1,425 +0,0 @@
/**
* \file hash.c
* Generic hash table.
*
* Used for display lists, texture objects, vertex/fragment programs,
* buffer objects, etc. The hash functions are thread-safe.
*
* \note key=0 is illegal.
*
* \author Brian Paul
*/
/*
* Mesa 3-D graphics library
*
* Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
//#include "errors.h"
#include <GL/gl.h>
#include "hash.h"
#include "../util/hash_table.h"
/**
* Create a new hash table.
*
* \return pointer to a new, empty hash table.
*/
struct _mesa_HashTable *
_mesa_NewHashTable(void)
{
struct _mesa_HashTable *table = CALLOC_STRUCT(_mesa_HashTable);
if (table) {
table->ht = _mesa_hash_table_create(NULL, uint_key_hash,
uint_key_compare);
if (table->ht == NULL) {
free(table);
//_mesa_error_no_memory(__func__);
return NULL;
}
_mesa_hash_table_set_deleted_key(table->ht, uint_key(DELETED_KEY_VALUE));
/*
* Needs to be recursive, since the callback in _mesa_HashWalk()
* is allowed to call _mesa_HashRemove().
*/
mtx_init(&table->Mutex, mtx_recursive);
}
else {
//_mesa_error_no_memory(__func__);
}
return table;
}
/**
* Delete a hash table.
* Frees each entry on the hash table and then the hash table structure itself.
* Note that the caller should have already traversed the table and deleted
* the objects in the table (i.e. We don't free the entries' data pointer).
*
* \param table the hash table to delete.
*/
void
_mesa_DeleteHashTable(struct _mesa_HashTable *table)
{
assert(table);
if (_mesa_hash_table_next_entry(table->ht, NULL) != NULL) {
// _mesa_problem(NULL, "In _mesa_DeleteHashTable, found non-freed data");
}
_mesa_hash_table_destroy(table->ht, NULL);
mtx_destroy(&table->Mutex);
free(table);
}
/**
* Lookup an entry in the hash table, without locking.
* \sa _mesa_HashLookup
*/
static inline void *
_mesa_HashLookup_unlocked(struct _mesa_HashTable *table, GLuint key)
{
const struct hash_entry *entry;
assert(table);
assert(key);
if (key == DELETED_KEY_VALUE)
return table->deleted_key_data;
entry = _mesa_hash_table_search_pre_hashed(table->ht,
uint_hash(key),
uint_key(key));
if (!entry)
return NULL;
return entry->data;
}
/**
* Lookup an entry in the hash table.
*
* \param table the hash table.
* \param key the key.
*
* \return pointer to user's data or NULL if key not in table
*/
void *
_mesa_HashLookup(struct _mesa_HashTable *table, GLuint key)
{
void *res;
_mesa_HashLockMutex(table);
res = _mesa_HashLookup_unlocked(table, key);
_mesa_HashUnlockMutex(table);
return res;
}
/**
* Lookup an entry in the hash table without locking the mutex.
*
* The hash table mutex must be locked manually by calling
* _mesa_HashLockMutex() before calling this function.
*
* \param table the hash table.
* \param key the key.
*
* \return pointer to user's data or NULL if key not in table
*/
void *
_mesa_HashLookupLocked(struct _mesa_HashTable *table, GLuint key)
{
return _mesa_HashLookup_unlocked(table, key);
}
static inline void
_mesa_HashInsert_unlocked(struct _mesa_HashTable *table, GLuint key, void *data)
{
uint32_t hash = uint_hash(key);
struct hash_entry *entry;
assert(table);
assert(key);
if (key > table->MaxKey)
table->MaxKey = key;
if (key == DELETED_KEY_VALUE) {
table->deleted_key_data = data;
} else {
entry = _mesa_hash_table_search_pre_hashed(table->ht, hash, uint_key(key));
if (entry) {
entry->data = data;
} else {
_mesa_hash_table_insert_pre_hashed(table->ht, hash, uint_key(key), data);
}
}
}
/**
* Insert a key/pointer pair into the hash table without locking the mutex.
* If an entry with this key already exists we'll replace the existing entry.
*
* The hash table mutex must be locked manually by calling
* _mesa_HashLockMutex() before calling this function.
*
* \param table the hash table.
* \param key the key (not zero).
* \param data pointer to user data.
*/
void
_mesa_HashInsertLocked(struct _mesa_HashTable *table, GLuint key, void *data)
{
_mesa_HashInsert_unlocked(table, key, data);
}
/**
* Insert a key/pointer pair into the hash table.
* If an entry with this key already exists we'll replace the existing entry.
*
* \param table the hash table.
* \param key the key (not zero).
* \param data pointer to user data.
*/
void
_mesa_HashInsert(struct _mesa_HashTable *table, GLuint key, void *data)
{
_mesa_HashLockMutex(table);
_mesa_HashInsert_unlocked(table, key, data);
_mesa_HashUnlockMutex(table);
}
/**
* Remove an entry from the hash table.
*
* \param table the hash table.
* \param key key of entry to remove.
*
* While holding the hash table's lock, searches the entry with the matching
* key and unlinks it.
*/
static inline void
_mesa_HashRemove_unlocked(struct _mesa_HashTable *table, GLuint key)
{
struct hash_entry *entry;
assert(table);
assert(key);
/* assert if _mesa_HashRemove illegally called from _mesa_HashDeleteAll
* callback function. Have to check this outside of mutex lock.
*/
assert(!table->InDeleteAll);
if (key == DELETED_KEY_VALUE) {
table->deleted_key_data = NULL;
} else {
entry = _mesa_hash_table_search_pre_hashed(table->ht,
uint_hash(key),
uint_key(key));
_mesa_hash_table_remove(table->ht, entry);
}
}
void
_mesa_HashRemoveLocked(struct _mesa_HashTable *table, GLuint key)
{
_mesa_HashRemove_unlocked(table, key);
}
void
_mesa_HashRemove(struct _mesa_HashTable *table, GLuint key)
{
_mesa_HashLockMutex(table);
_mesa_HashRemove_unlocked(table, key);
_mesa_HashUnlockMutex(table);
}
/**
* Delete all entries in a hash table, but don't delete the table itself.
* Invoke the given callback function for each table entry.
*
* \param table the hash table to delete
* \param callback the callback function
* \param userData arbitrary pointer to pass along to the callback
* (this is typically a struct gl_context pointer)
*/
void
_mesa_HashDeleteAll(struct _mesa_HashTable *table,
void (*callback)(GLuint key, void *data, void *userData),
void *userData)
{
assert(callback);
_mesa_HashLockMutex(table);
table->InDeleteAll = GL_TRUE;
hash_table_foreach(table->ht, entry) {
callback((uintptr_t)entry->key, entry->data, userData);
_mesa_hash_table_remove(table->ht, entry);
}
if (table->deleted_key_data) {
callback(DELETED_KEY_VALUE, table->deleted_key_data, userData);
table->deleted_key_data = NULL;
}
table->InDeleteAll = GL_FALSE;
_mesa_HashUnlockMutex(table);
}
/**
* Walk over all entries in a hash table, calling callback function for each.
* \param table the hash table to walk
* \param callback the callback function
* \param userData arbitrary pointer to pass along to the callback
* (this is typically a struct gl_context pointer)
*/
static void
hash_walk_unlocked(const struct _mesa_HashTable *table,
void (*callback)(GLuint key, void *data, void *userData),
void *userData)
{
assert(table);
assert(callback);
hash_table_foreach(table->ht, entry) {
callback((uintptr_t)entry->key, entry->data, userData);
}
if (table->deleted_key_data)
callback(DELETED_KEY_VALUE, table->deleted_key_data, userData);
}
void
_mesa_HashWalk(const struct _mesa_HashTable *table,
void (*callback)(GLuint key, void *data, void *userData),
void *userData)
{
/* cast-away const */
struct _mesa_HashTable *table2 = (struct _mesa_HashTable *) table;
_mesa_HashLockMutex(table2);
hash_walk_unlocked(table, callback, userData);
_mesa_HashUnlockMutex(table2);
}
void
_mesa_HashWalkLocked(const struct _mesa_HashTable *table,
void (*callback)(GLuint key, void *data, void *userData),
void *userData)
{
hash_walk_unlocked(table, callback, userData);
}
static void
debug_print_entry(GLuint key, void *data, void *userData)
{
//_mesa_debug(NULL, "%u %p\n", key, data);
}
/**
* Dump contents of hash table for debugging.
*
* \param table the hash table.
*/
void
_mesa_HashPrint(const struct _mesa_HashTable *table)
{
if (table->deleted_key_data)
debug_print_entry(DELETED_KEY_VALUE, table->deleted_key_data, NULL);
_mesa_HashWalk(table, debug_print_entry, NULL);
}
/**
* Find a block of adjacent unused hash keys.
*
* \param table the hash table.
* \param numKeys number of keys needed.
*
* \return Starting key of free block or 0 if failure.
*
* If there are enough free keys between the maximum key existing in the table
* (_mesa_HashTable::MaxKey) and the maximum key possible, then simply return
* the adjacent key. Otherwise do a full search for a free key block in the
* allowable key range.
*/
GLuint
_mesa_HashFindFreeKeyBlock(struct _mesa_HashTable *table, GLuint numKeys)
{
const GLuint maxKey = ~((GLuint) 0) - 1;
if (maxKey - numKeys > table->MaxKey) {
/* the quick solution */
return table->MaxKey + 1;
}
else {
/* the slow solution */
GLuint freeCount = 0;
GLuint freeStart = 1;
GLuint key;
for (key = 1; key != maxKey; key++) {
if (_mesa_HashLookup_unlocked(table, key)) {
/* darn, this key is already in use */
freeCount = 0;
freeStart = key+1;
}
else {
/* this key not in use, check if we've found enough */
freeCount++;
if (freeCount == numKeys) {
return freeStart;
}
}
}
/* cannot allocate a block of numKeys consecutive keys */
return 0;
}
}
/**
* Return the number of entries in the hash table.
*/
GLuint
_mesa_HashNumEntries(const struct _mesa_HashTable *table)
{
GLuint count = 0;
if (table->deleted_key_data)
count++;
count += _mesa_hash_table_num_entries(table->ht);
return count;
}

@ -1,191 +0,0 @@
/**
* \file hash.h
* Generic hash table.
*/
/*
* Mesa 3-D graphics library
*
* Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef HASH_H
#define HASH_H
#include <stdbool.h>
#include <GL/gl.h>
//#include "imports.h"
#include "../c11/threads.h"
/**********************************************************************/
/** Memory macros */
/*@{*/
/** Allocate a structure of type \p T */
#define MALLOC_STRUCT(T) (struct T *) malloc(sizeof(struct T))
/** Allocate and zero a structure of type \p T */
#define CALLOC_STRUCT(T) (struct T *) calloc(1, sizeof(struct T))
/*@}*/
/**
* Magic GLuint object name that gets stored outside of the struct hash_table.
*
* The hash table needs a particular pointer to be the marker for a key that
* was deleted from the table, along with NULL for the "never allocated in the
* table" marker. Legacy GL allows any GLuint to be used as a GL object name,
* and we use a 1:1 mapping from GLuints to key pointers, so we need to be
* able to track a GLuint that happens to match the deleted key outside of
* struct hash_table. We tell the hash table to use "1" as the deleted key
* value, so that we test the deleted-key-in-the-table path as best we can.
*/
#define DELETED_KEY_VALUE 1
/** @{
* Mapping from our use of GLuint as both the key and the hash value to the
* hash_table.h API
*
* There exist many integer hash functions, designed to avoid collisions when
* the integers are spread across key space with some patterns. In GL, the
* pattern (in the case of glGen*()ed object IDs) is that the keys are unique
* contiguous integers starting from 1. Because of that, we just use the key
* as the hash value, to minimize the cost of the hash function. If objects
* are never deleted, we will never see a collision in the table, because the
* table resizes itself when it approaches full, and thus key % table_size ==
* key.
*
* The case where we could have collisions for genned objects would be
* something like: glGenBuffers(&a, 100); glDeleteBuffers(&a + 50, 50);
* glGenBuffers(&b, 100), because objects 1-50 and 101-200 are allocated at
* the end of that sequence, instead of 1-150. So far it doesn't appear to be
* a problem.
*/
static inline bool
uint_key_compare(const void *a, const void *b)
{
return a == b;
}
static inline uint32_t
uint_hash(GLuint id)
{
return id;
}
static inline uint32_t
uint_key_hash(const void *key)
{
return uint_hash((uintptr_t)key);
}
static inline void *
uint_key(GLuint id)
{
return (void *)(uintptr_t) id;
}
/** @} */
/**
* The hash table data structure.
*/
struct _mesa_HashTable {
struct hash_table *ht;
GLuint MaxKey; /**< highest key inserted so far */
mtx_t Mutex; /**< mutual exclusion lock */
GLboolean InDeleteAll; /**< Debug check */
/** Value that would be in the table for DELETED_KEY_VALUE. */
void *deleted_key_data;
};
extern struct _mesa_HashTable *_mesa_NewHashTable(void);
extern void _mesa_DeleteHashTable(struct _mesa_HashTable *table);
extern void *_mesa_HashLookup(struct _mesa_HashTable *table, GLuint key);
extern void _mesa_HashInsert(struct _mesa_HashTable *table, GLuint key, void *data);
extern void _mesa_HashRemove(struct _mesa_HashTable *table, GLuint key);
/**
* Lock the hash table mutex.
*
* This function should be used when multiple objects need
* to be looked up in the hash table, to avoid having to lock
* and unlock the mutex each time.
*
* \param table the hash table.
*/
static inline void
_mesa_HashLockMutex(struct _mesa_HashTable *table)
{
assert(table);
mtx_lock(&table->Mutex);
}
/**
* Unlock the hash table mutex.
*
* \param table the hash table.
*/
static inline void
_mesa_HashUnlockMutex(struct _mesa_HashTable *table)
{
assert(table);
mtx_unlock(&table->Mutex);
}
extern void *_mesa_HashLookupLocked(struct _mesa_HashTable *table, GLuint key);
extern void _mesa_HashInsertLocked(struct _mesa_HashTable *table,
GLuint key, void *data);
extern void _mesa_HashRemoveLocked(struct _mesa_HashTable *table, GLuint key);
extern void
_mesa_HashDeleteAll(struct _mesa_HashTable *table,
void (*callback)(GLuint key, void *data, void *userData),
void *userData);
extern void
_mesa_HashWalk(const struct _mesa_HashTable *table,
void (*callback)(GLuint key, void *data, void *userData),
void *userData);
extern void
_mesa_HashWalkLocked(const struct _mesa_HashTable *table,
void (*callback)(GLuint key, void *data, void *userData),
void *userData);
extern void _mesa_HashPrint(const struct _mesa_HashTable *table);
extern GLuint _mesa_HashFindFreeKeyBlock(struct _mesa_HashTable *table, GLuint numKeys);
extern GLuint
_mesa_HashNumEntries(const struct _mesa_HashTable *table);
extern void _mesa_test_hash_functions(void);
#endif

@ -1,108 +0,0 @@
/*
* Copyright © 2015 Intel
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UTIL_FUTEX_H
#define UTIL_FUTEX_H
#if defined(HAVE_LINUX_FUTEX_H)
#include <limits.h>
#include <stdint.h>
#include <unistd.h>
#include <linux/futex.h>
#include <sys/syscall.h>
#include <sys/time.h>
static inline long sys_futex(void *addr1, int op, int val1, const struct timespec *timeout, void *addr2, int val3)
{
return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
}
static inline int futex_wake(uint32_t *addr, int count)
{
return sys_futex(addr, FUTEX_WAKE, count, NULL, NULL, 0);
}
static inline int futex_wait(uint32_t *addr, int32_t value, const struct timespec *timeout)
{
/* FUTEX_WAIT_BITSET with FUTEX_BITSET_MATCH_ANY is equivalent to
* FUTEX_WAIT, except that it treats the timeout as absolute. */
return sys_futex(addr, FUTEX_WAIT_BITSET, value, timeout, NULL,
FUTEX_BITSET_MATCH_ANY);
}
#elif defined(__FreeBSD__)
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/umtx.h>
#include <sys/time.h>
static inline int futex_wake(uint32_t *addr, int count)
{
assert(count == (int)(uint32_t)count); /* Check that bits weren't discarded */
return _umtx_op(addr, UMTX_OP_WAKE, (uint32_t)count, NULL, NULL) == -1 ? errno : 0;
}
static inline int futex_wait(uint32_t *addr, int32_t value, struct timespec *timeout)
{
void *uaddr = NULL, *uaddr2 = NULL;
struct _umtx_time tmo = {
._flags = UMTX_ABSTIME,
._clockid = CLOCK_MONOTONIC
};
assert(value == (int)(uint32_t)value); /* Check that bits weren't discarded */
if (timeout != NULL) {
tmo._timeout = *timeout;
uaddr = (void *)(uintptr_t)sizeof(tmo);
uaddr2 = (void *)&tmo;
}
return _umtx_op(addr, UMTX_OP_WAIT_UINT, (uint32_t)value, uaddr, uaddr2) == -1 ? errno : 0;
}
#elif defined(__OpenBSD__)
#include <sys/time.h>
#include <sys/futex.h>
static inline int futex_wake(uint32_t *addr, int count)
{
return futex(addr, FUTEX_WAKE, count, NULL, NULL);
}
static inline int futex_wait(uint32_t *addr, int32_t value, const struct timespec *timeout)
{
struct timespec tsrel, tsnow;
clock_gettime(CLOCK_MONOTONIC, &tsnow);
timespecsub(timeout, &tsrel, &tsrel);
return futex(addr, FUTEX_WAIT, value, &tsrel, NULL);
}
#endif
#endif /* UTIL_FUTEX_H */

@ -1,802 +0,0 @@
/*
* Copyright © 2009,2012 Intel Corporation
* Copyright © 1988-2004 Keith Packard and Bart Massey.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Except as contained in this notice, the names of the authors
* or their institutions shall not be used in advertising or
* otherwise to promote the sale, use or other dealings in this
* Software without prior written authorization from the
* authors.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Keith Packard <keithp@keithp.com>
*/
/**
* Implements an open-addressing, linear-reprobing hash table.
*
* For more information, see:
*
* http://cgit.freedesktop.org/~anholt/hash_table/tree/README
*/
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "hash_table.h"
#include "ralloc.h"
#include "macros.h"
#include "../main/hash.h"
#include "../fast_urem_by_const.h"
static const uint32_t deleted_key_value;
/**
* From Knuth -- a good choice for hash/rehash values is p, p-2 where
* p and p-2 are both prime. These tables are sized to have an extra 10%
* free to avoid exponential performance degradation as the hash table fills
*/
static const struct {
uint32_t max_entries, size, rehash;
uint64_t size_magic, rehash_magic;
} hash_sizes[] = {
#define ENTRY(max_entries, size, rehash) \
{ max_entries, size, rehash, \
REMAINDER_MAGIC(size), REMAINDER_MAGIC(rehash) }
ENTRY(2, 5, 3 ),
ENTRY(4, 7, 5 ),
ENTRY(8, 13, 11 ),
ENTRY(16, 19, 17 ),
ENTRY(32, 43, 41 ),
ENTRY(64, 73, 71 ),
ENTRY(128, 151, 149 ),
ENTRY(256, 283, 281 ),
ENTRY(512, 571, 569 ),
ENTRY(1024, 1153, 1151 ),
ENTRY(2048, 2269, 2267 ),
ENTRY(4096, 4519, 4517 ),
ENTRY(8192, 9013, 9011 ),
ENTRY(16384, 18043, 18041 ),
ENTRY(32768, 36109, 36107 ),
ENTRY(65536, 72091, 72089 ),
ENTRY(131072, 144409, 144407 ),
ENTRY(262144, 288361, 288359 ),
ENTRY(524288, 576883, 576881 ),
ENTRY(1048576, 1153459, 1153457 ),
ENTRY(2097152, 2307163, 2307161 ),
ENTRY(4194304, 4613893, 4613891 ),
ENTRY(8388608, 9227641, 9227639 ),
ENTRY(16777216, 18455029, 18455027 ),
ENTRY(33554432, 36911011, 36911009 ),
ENTRY(67108864, 73819861, 73819859 ),
ENTRY(134217728, 147639589, 147639587 ),
ENTRY(268435456, 295279081, 295279079 ),
ENTRY(536870912, 590559793, 590559791 ),
ENTRY(1073741824, 1181116273, 1181116271 ),
ENTRY(2147483648ul, 2362232233ul, 2362232231ul )
};
static inline bool
key_pointer_is_reserved(const struct hash_table *ht, const void *key)
{
return key == NULL || key == ht->deleted_key;
}
static int
entry_is_free(const struct hash_entry *entry)
{
return entry->key == NULL;
}
static int
entry_is_deleted(const struct hash_table *ht, struct hash_entry *entry)
{
return entry->key == ht->deleted_key;
}
static int
entry_is_present(const struct hash_table *ht, struct hash_entry *entry)
{
return entry->key != NULL && entry->key != ht->deleted_key;
}
bool
_mesa_hash_table_init(struct hash_table *ht,
void *mem_ctx,
uint32_t (*key_hash_function)(const void *key),
bool (*key_equals_function)(const void *a,
const void *b))
{
ht->size_index = 0;
ht->size = hash_sizes[ht->size_index].size;
ht->rehash = hash_sizes[ht->size_index].rehash;
ht->size_magic = hash_sizes[ht->size_index].size_magic;
ht->rehash_magic = hash_sizes[ht->size_index].rehash_magic;
ht->max_entries = hash_sizes[ht->size_index].max_entries;
ht->key_hash_function = key_hash_function;
ht->key_equals_function = key_equals_function;
ht->table = rzalloc_array(mem_ctx, struct hash_entry, ht->size);
ht->entries = 0;
ht->deleted_entries = 0;
ht->deleted_key = &deleted_key_value;
return ht->table != NULL;
}
struct hash_table *
_mesa_hash_table_create(void *mem_ctx,
uint32_t (*key_hash_function)(const void *key),
bool (*key_equals_function)(const void *a,
const void *b))
{
struct hash_table *ht;
/* mem_ctx is used to allocate the hash table, but the hash table is used
* to allocate all of the suballocations.
*/
ht = ralloc(mem_ctx, struct hash_table);
if (ht == NULL)
return NULL;
if (!_mesa_hash_table_init(ht, ht, key_hash_function, key_equals_function)) {
ralloc_free(ht);
return NULL;
}
return ht;
}
struct hash_table *
_mesa_hash_table_clone(struct hash_table *src, void *dst_mem_ctx)
{
struct hash_table *ht;
ht = ralloc(dst_mem_ctx, struct hash_table);
if (ht == NULL)
return NULL;
memcpy(ht, src, sizeof(struct hash_table));
ht->table = ralloc_array(ht, struct hash_entry, ht->size);
if (ht->table == NULL) {
ralloc_free(ht);
return NULL;
}
memcpy(ht->table, src->table, ht->size * sizeof(struct hash_entry));
return ht;
}
/**
* Frees the given hash table.
*
* If delete_function is passed, it gets called on each entry present before
* freeing.
*/
void
_mesa_hash_table_destroy(struct hash_table *ht,
void (*delete_function)(struct hash_entry *entry))
{
if (!ht)
return;
if (delete_function) {
hash_table_foreach(ht, entry) {
delete_function(entry);
}
}
ralloc_free(ht);
}
/**
* Deletes all entries of the given hash table without deleting the table
* itself or changing its structure.
*
* If delete_function is passed, it gets called on each entry present.
*/
void
_mesa_hash_table_clear(struct hash_table *ht,
void (*delete_function)(struct hash_entry *entry))
{
struct hash_entry *entry;
for (entry = ht->table; entry != ht->table + ht->size; entry++) {
if (entry->key == NULL)
continue;
if (delete_function != NULL && entry->key != ht->deleted_key)
delete_function(entry);
entry->key = NULL;
}
ht->entries = 0;
ht->deleted_entries = 0;
}
/** Sets the value of the key pointer used for deleted entries in the table.
*
* The assumption is that usually keys are actual pointers, so we use a
* default value of a pointer to an arbitrary piece of storage in the library.
* But in some cases a consumer wants to store some other sort of value in the
* table, like a uint32_t, in which case that pointer may conflict with one of
* their valid keys. This lets that user select a safe value.
*
* This must be called before any keys are actually deleted from the table.
*/
void
_mesa_hash_table_set_deleted_key(struct hash_table *ht, const void *deleted_key)
{
ht->deleted_key = deleted_key;
}
static struct hash_entry *
hash_table_search(struct hash_table *ht, uint32_t hash, const void *key)
{
assert(!key_pointer_is_reserved(ht, key));
uint32_t size = ht->size;
uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
ht->rehash_magic);
uint32_t hash_address = start_hash_address;
do {
struct hash_entry *entry = ht->table + hash_address;
if (entry_is_free(entry)) {
return NULL;
} else if (entry_is_present(ht, entry) && entry->hash == hash) {
if (ht->key_equals_function(key, entry->key)) {
return entry;
}
}
hash_address += double_hash;
if (hash_address >= size)
hash_address -= size;
} while (hash_address != start_hash_address);
return NULL;
}
/**
* Finds a hash table entry with the given key and hash of that key.
*
* Returns NULL if no entry is found. Note that the data pointer may be
* modified by the user.
*/
struct hash_entry *
_mesa_hash_table_search(struct hash_table *ht, const void *key)
{
assert(ht->key_hash_function);
return hash_table_search(ht, ht->key_hash_function(key), key);
}
struct hash_entry *
_mesa_hash_table_search_pre_hashed(struct hash_table *ht, uint32_t hash,
const void *key)
{
assert(ht->key_hash_function == NULL || hash == ht->key_hash_function(key));
return hash_table_search(ht, hash, key);
}
static struct hash_entry *
hash_table_insert(struct hash_table *ht, uint32_t hash,
const void *key, void *data);
static void
hash_table_insert_rehash(struct hash_table *ht, uint32_t hash,
const void *key, void *data)
{
uint32_t size = ht->size;
uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
ht->rehash_magic);
uint32_t hash_address = start_hash_address;
do {
struct hash_entry *entry = ht->table + hash_address;
if (likely(entry->key == NULL)) {
entry->hash = hash;
entry->key = key;
entry->data = data;
return;
}
hash_address += double_hash;
if (hash_address >= size)
hash_address -= size;
} while (true);
}
static void
_mesa_hash_table_rehash(struct hash_table *ht, unsigned new_size_index)
{
struct hash_table old_ht;
struct hash_entry *table;
if (new_size_index >= ARRAY_SIZE(hash_sizes))
return;
table = rzalloc_array(ralloc_parent(ht->table), struct hash_entry,
hash_sizes[new_size_index].size);
if (table == NULL)
return;
old_ht = *ht;
ht->table = table;
ht->size_index = new_size_index;
ht->size = hash_sizes[ht->size_index].size;
ht->rehash = hash_sizes[ht->size_index].rehash;
ht->size_magic = hash_sizes[ht->size_index].size_magic;
ht->rehash_magic = hash_sizes[ht->size_index].rehash_magic;
ht->max_entries = hash_sizes[ht->size_index].max_entries;
ht->entries = 0;
ht->deleted_entries = 0;
hash_table_foreach(&old_ht, entry) {
hash_table_insert_rehash(ht, entry->hash, entry->key, entry->data);
}
ht->entries = old_ht.entries;
ralloc_free(old_ht.table);
}
static struct hash_entry *
hash_table_insert(struct hash_table *ht, uint32_t hash,
const void *key, void *data)
{
struct hash_entry *available_entry = NULL;
assert(!key_pointer_is_reserved(ht, key));
if (ht->entries >= ht->max_entries) {
_mesa_hash_table_rehash(ht, ht->size_index + 1);
} else if (ht->deleted_entries + ht->entries >= ht->max_entries) {
_mesa_hash_table_rehash(ht, ht->size_index);
}
uint32_t size = ht->size;
uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
ht->rehash_magic);
uint32_t hash_address = start_hash_address;
do {
struct hash_entry *entry = ht->table + hash_address;
if (!entry_is_present(ht, entry)) {
/* Stash the first available entry we find */
if (available_entry == NULL)
available_entry = entry;
if (entry_is_free(entry))
break;
}
/* Implement replacement when another insert happens
* with a matching key. This is a relatively common
* feature of hash tables, with the alternative
* generally being "insert the new value as well, and
* return it first when the key is searched for".
*
* Note that the hash table doesn't have a delete
* callback. If freeing of old data pointers is
* required to avoid memory leaks, perform a search
* before inserting.
*/
if (!entry_is_deleted(ht, entry) &&
entry->hash == hash &&
ht->key_equals_function(key, entry->key)) {
entry->key = key;
entry->data = data;
return entry;
}
hash_address += double_hash;
if (hash_address >= size)
hash_address -= size;
} while (hash_address != start_hash_address);
if (available_entry) {
if (entry_is_deleted(ht, available_entry))
ht->deleted_entries--;
available_entry->hash = hash;
available_entry->key = key;
available_entry->data = data;
ht->entries++;
return available_entry;
}
/* We could hit here if a required resize failed. An unchecked-malloc
* application could ignore this result.
*/
return NULL;
}
/**
* Inserts the key with the given hash into the table.
*
* Note that insertion may rearrange the table on a resize or rehash,
* so previously found hash_entries are no longer valid after this function.
*/
struct hash_entry *
_mesa_hash_table_insert(struct hash_table *ht, const void *key, void *data)
{
assert(ht->key_hash_function);
return hash_table_insert(ht, ht->key_hash_function(key), key, data);
}
struct hash_entry *
_mesa_hash_table_insert_pre_hashed(struct hash_table *ht, uint32_t hash,
const void *key, void *data)
{
assert(ht->key_hash_function == NULL || hash == ht->key_hash_function(key));
return hash_table_insert(ht, hash, key, data);
}
/**
* This function deletes the given hash table entry.
*
* Note that deletion doesn't otherwise modify the table, so an iteration over
* the table deleting entries is safe.
*/
void
_mesa_hash_table_remove(struct hash_table *ht,
struct hash_entry *entry)
{
if (!entry)
return;
entry->key = ht->deleted_key;
ht->entries--;
ht->deleted_entries++;
}
/**
* Removes the entry with the corresponding key, if exists.
*/
void _mesa_hash_table_remove_key(struct hash_table *ht,
const void *key)
{
_mesa_hash_table_remove(ht, _mesa_hash_table_search(ht, key));
}
/**
* This function is an iterator over the hash table.
*
* Pass in NULL for the first entry, as in the start of a for loop. Note that
* an iteration over the table is O(table_size) not O(entries).
*/
struct hash_entry *
_mesa_hash_table_next_entry(struct hash_table *ht,
struct hash_entry *entry)
{
if (entry == NULL)
entry = ht->table;
else
entry = entry + 1;
for (; entry != ht->table + ht->size; entry++) {
if (entry_is_present(ht, entry)) {
return entry;
}
}
return NULL;
}
/**
* Returns a random entry from the hash table.
*
* This may be useful in implementing random replacement (as opposed
* to just removing everything) in caches based on this hash table
* implementation. @predicate may be used to filter entries, or may
* be set to NULL for no filtering.
*/
struct hash_entry *
_mesa_hash_table_random_entry(struct hash_table *ht,
bool (*predicate)(struct hash_entry *entry))
{
struct hash_entry *entry;
uint32_t i = rand() % ht->size;
if (ht->entries == 0)
return NULL;
for (entry = ht->table + i; entry != ht->table + ht->size; entry++) {
if (entry_is_present(ht, entry) &&
(!predicate || predicate(entry))) {
return entry;
}
}
for (entry = ht->table; entry != ht->table + i; entry++) {
if (entry_is_present(ht, entry) &&
(!predicate || predicate(entry))) {
return entry;
}
}
return NULL;
}
/**
* Quick FNV-1a hash implementation based on:
* http://www.isthe.com/chongo/tech/comp/fnv/
*
* FNV-1a is not be the best hash out there -- Jenkins's lookup3 is supposed
* to be quite good, and it probably beats FNV. But FNV has the advantage
* that it involves almost no code. For an improvement on both, see Paul
* Hsieh's http://www.azillionmonkeys.com/qed/hash.html
*/
uint32_t
_mesa_hash_data(const void *data, size_t size)
{
return _mesa_fnv32_1a_accumulate_block(_mesa_fnv32_1a_offset_bias,
data, size);
}
/** FNV-1a string hash implementation */
uint32_t
_mesa_hash_string(const void *_key)
{
uint32_t hash = _mesa_fnv32_1a_offset_bias;
const char *key = _key;
while (*key != 0) {
hash = _mesa_fnv32_1a_accumulate(hash, *key);
key++;
}
return hash;
}
/**
* String compare function for use as the comparison callback in
* _mesa_hash_table_create().
*/
bool
_mesa_key_string_equal(const void *a, const void *b)
{
return strcmp(a, b) == 0;
}
bool
_mesa_key_pointer_equal(const void *a, const void *b)
{
return a == b;
}
/**
* Helper to create a hash table with pointer keys.
*/
struct hash_table *
_mesa_pointer_hash_table_create(void *mem_ctx)
{
return _mesa_hash_table_create(mem_ctx, _mesa_hash_pointer,
_mesa_key_pointer_equal);
}
/**
* Hash table wrapper which supports 64-bit keys.
*
* TODO: unify all hash table implementations.
*/
struct hash_key_u64 {
uint64_t value;
};
static uint32_t
key_u64_hash(const void *key)
{
return _mesa_hash_data(key, sizeof(struct hash_key_u64));
}
static bool
key_u64_equals(const void *a, const void *b)
{
const struct hash_key_u64 *aa = a;
const struct hash_key_u64 *bb = b;
return aa->value == bb->value;
}
#define FREED_KEY_VALUE 0
struct hash_table_u64 *
_mesa_hash_table_u64_create(void *mem_ctx)
{
STATIC_ASSERT(FREED_KEY_VALUE != DELETED_KEY_VALUE);
struct hash_table_u64 *ht;
ht = CALLOC_STRUCT(hash_table_u64);
if (!ht)
return NULL;
if (sizeof(void *) == 8) {
ht->table = _mesa_hash_table_create(mem_ctx, _mesa_hash_pointer,
_mesa_key_pointer_equal);
} else {
ht->table = _mesa_hash_table_create(mem_ctx, key_u64_hash,
key_u64_equals);
}
if (ht->table)
_mesa_hash_table_set_deleted_key(ht->table, uint_key(DELETED_KEY_VALUE));
return ht;
}
void
_mesa_hash_table_u64_clear(struct hash_table_u64 *ht,
void (*delete_function)(struct hash_entry *entry))
{
if (!ht)
return;
if (ht->deleted_key_data) {
if (delete_function) {
struct hash_table *table = ht->table;
struct hash_entry entry;
/* Create a fake entry for the delete function. */
if (sizeof(void *) == 8) {
entry.hash = table->key_hash_function(table->deleted_key);
} else {
struct hash_key_u64 _key = { .value = (uintptr_t)table->deleted_key };
entry.hash = table->key_hash_function(&_key);
}
entry.key = table->deleted_key;
entry.data = ht->deleted_key_data;
delete_function(&entry);
}
ht->deleted_key_data = NULL;
}
if (ht->freed_key_data) {
if (delete_function) {
struct hash_table *table = ht->table;
struct hash_entry entry;
/* Create a fake entry for the delete function. */
if (sizeof(void *) == 8) {
entry.hash = table->key_hash_function(uint_key(FREED_KEY_VALUE));
} else {
struct hash_key_u64 _key = { .value = (uintptr_t)FREED_KEY_VALUE };
entry.hash = table->key_hash_function(&_key);
}
entry.key = uint_key(FREED_KEY_VALUE);
entry.data = ht->freed_key_data;
delete_function(&entry);
}
ht->freed_key_data = NULL;
}
_mesa_hash_table_clear(ht->table, delete_function);
}
void
_mesa_hash_table_u64_destroy(struct hash_table_u64 *ht,
void (*delete_function)(struct hash_entry *entry))
{
if (!ht)
return;
_mesa_hash_table_u64_clear(ht, delete_function);
_mesa_hash_table_destroy(ht->table, delete_function);
free(ht);
}
void
_mesa_hash_table_u64_insert(struct hash_table_u64 *ht, uint64_t key,
void *data)
{
if (key == FREED_KEY_VALUE) {
ht->freed_key_data = data;
return;
}
if (key == DELETED_KEY_VALUE) {
ht->deleted_key_data = data;
return;
}
if (sizeof(void *) == 8) {
_mesa_hash_table_insert(ht->table, (void *)(uintptr_t)key, data);
} else {
struct hash_key_u64 *_key = CALLOC_STRUCT(hash_key_u64);
if (!_key)
return;
_key->value = key;
_mesa_hash_table_insert(ht->table, _key, data);
}
}
static struct hash_entry *
hash_table_u64_search(struct hash_table_u64 *ht, uint64_t key)
{
if (sizeof(void *) == 8) {
return _mesa_hash_table_search(ht->table, (void *)(uintptr_t)key);
} else {
struct hash_key_u64 _key = { .value = key };
return _mesa_hash_table_search(ht->table, &_key);
}
}
void *
_mesa_hash_table_u64_search(struct hash_table_u64 *ht, uint64_t key)
{
struct hash_entry *entry;
if (key == FREED_KEY_VALUE)
return ht->freed_key_data;
if (key == DELETED_KEY_VALUE)
return ht->deleted_key_data;
entry = hash_table_u64_search(ht, key);
if (!entry)
return NULL;
return entry->data;
}
void
_mesa_hash_table_u64_remove(struct hash_table_u64 *ht, uint64_t key)
{
struct hash_entry *entry;
if (key == FREED_KEY_VALUE) {
ht->freed_key_data = NULL;
return;
}
if (key == DELETED_KEY_VALUE) {
ht->deleted_key_data = NULL;
return;
}
entry = hash_table_u64_search(ht, key);
if (!entry)
return;
if (sizeof(void *) == 8) {
_mesa_hash_table_remove(ht->table, entry);
} else {
struct hash_key *_key = (struct hash_key *)entry->key;
_mesa_hash_table_remove(ht->table, entry);
free(_key);
}
}

@ -1,205 +0,0 @@
/*
* Copyright © 2009,2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#ifndef _HASH_TABLE_H
#define _HASH_TABLE_H
#include <stdlib.h>
#include <inttypes.h>
#include <stdbool.h>
#include "../c99_compat.h"
#include "macros.h"
#ifdef __cplusplus
extern "C" {
#endif
struct hash_entry {
uint32_t hash;
const void *key;
void *data;
};
struct hash_table {
struct hash_entry *table;
uint32_t (*key_hash_function)(const void *key);
bool (*key_equals_function)(const void *a, const void *b);
const void *deleted_key;
uint32_t size;
uint32_t rehash;
uint64_t size_magic;
uint64_t rehash_magic;
uint32_t max_entries;
uint32_t size_index;
uint32_t entries;
uint32_t deleted_entries;
};
struct hash_table *
_mesa_hash_table_create(void *mem_ctx,
uint32_t (*key_hash_function)(const void *key),
bool (*key_equals_function)(const void *a,
const void *b));
bool
_mesa_hash_table_init(struct hash_table *ht,
void *mem_ctx,
uint32_t (*key_hash_function)(const void *key),
bool (*key_equals_function)(const void *a,
const void *b));
struct hash_table *
_mesa_hash_table_clone(struct hash_table *src, void *dst_mem_ctx);
void _mesa_hash_table_destroy(struct hash_table *ht,
void (*delete_function)(struct hash_entry *entry));
void _mesa_hash_table_clear(struct hash_table *ht,
void (*delete_function)(struct hash_entry *entry));
void _mesa_hash_table_set_deleted_key(struct hash_table *ht,
const void *deleted_key);
static inline uint32_t _mesa_hash_table_num_entries(struct hash_table *ht)
{
return ht->entries;
}
struct hash_entry *
_mesa_hash_table_insert(struct hash_table *ht, const void *key, void *data);
struct hash_entry *
_mesa_hash_table_insert_pre_hashed(struct hash_table *ht, uint32_t hash,
const void *key, void *data);
struct hash_entry *
_mesa_hash_table_search(struct hash_table *ht, const void *key);
struct hash_entry *
_mesa_hash_table_search_pre_hashed(struct hash_table *ht, uint32_t hash,
const void *key);
void _mesa_hash_table_remove(struct hash_table *ht,
struct hash_entry *entry);
void _mesa_hash_table_remove_key(struct hash_table *ht,
const void *key);
struct hash_entry *_mesa_hash_table_next_entry(struct hash_table *ht,
struct hash_entry *entry);
struct hash_entry *
_mesa_hash_table_random_entry(struct hash_table *ht,
bool (*predicate)(struct hash_entry *entry));
uint32_t _mesa_hash_data(const void *data, size_t size);
uint32_t _mesa_hash_string(const void *key);
bool _mesa_key_string_equal(const void *a, const void *b);
bool _mesa_key_pointer_equal(const void *a, const void *b);
static inline uint32_t _mesa_key_hash_string(const void *key)
{
return _mesa_hash_string((const char *)key);
}
static inline uint32_t _mesa_hash_pointer(const void *pointer)
{
uintptr_t num = (uintptr_t) pointer;
return (uint32_t) ((num >> 2) ^ (num >> 6) ^ (num >> 10) ^ (num >> 14));
}
struct hash_table *
_mesa_pointer_hash_table_create(void *mem_ctx);
enum {
_mesa_fnv32_1a_offset_bias = 2166136261u,
};
static inline uint32_t
_mesa_fnv32_1a_accumulate_block(uint32_t hash, const void *data, size_t size)
{
const uint8_t *bytes = (const uint8_t *)data;
while (size-- != 0) {
hash ^= *bytes;
hash = hash * 0x01000193;
bytes++;
}
return hash;
}
#define _mesa_fnv32_1a_accumulate(hash, expr) \
_mesa_fnv32_1a_accumulate_block(hash, &(expr), sizeof(expr))
/**
* This foreach function is safe against deletion (which just replaces
* an entry's data with the deleted marker), but not against insertion
* (which may rehash the table, making entry a dangling pointer).
*/
#define hash_table_foreach(ht, entry) \
for (struct hash_entry *entry = _mesa_hash_table_next_entry(ht, NULL); \
entry != NULL; \
entry = _mesa_hash_table_next_entry(ht, entry))
static inline void
hash_table_call_foreach(struct hash_table *ht,
void (*callback)(const void *key,
void *data,
void *closure),
void *closure)
{
hash_table_foreach(ht, entry)
callback(entry->key, entry->data, closure);
}
/**
* Hash table wrapper which supports 64-bit keys.
*/
struct hash_table_u64 {
struct hash_table *table;
void *freed_key_data;
void *deleted_key_data;
};
struct hash_table_u64 *
_mesa_hash_table_u64_create(void *mem_ctx);
void
_mesa_hash_table_u64_destroy(struct hash_table_u64 *ht,
void (*delete_function)(struct hash_entry *entry));
void
_mesa_hash_table_u64_insert(struct hash_table_u64 *ht, uint64_t key,
void *data);
void *
_mesa_hash_table_u64_search(struct hash_table_u64 *ht, uint64_t key);
void
_mesa_hash_table_u64_remove(struct hash_table_u64 *ht, uint64_t key);
void
_mesa_hash_table_u64_clear(struct hash_table_u64 *ht,
void (*delete_function)(struct hash_entry *entry));
#ifdef __cplusplus
} /* extern C */
#endif
#endif /* _HASH_TABLE_H */

@ -1,249 +0,0 @@
/**************************************************************************
*
* Copyright 2006 VMware, Inc., Bismarck, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
**************************************************************************/
/**
* \file
* List macros heavily inspired by the Linux kernel
* list handling. No list looping yet.
*
* Is not threadsafe, so common operations need to
* be protected using an external mutex.
*/
#ifndef _UTIL_LIST_H_
#define _UTIL_LIST_H_
#include <stdbool.h>
#include <stddef.h>
#include <assert.h>
#include "../c99_compat.h"
#ifdef DEBUG
# define list_assert(cond, msg) assert(cond && msg)
#else
# define list_assert(cond, msg) (void)(0 && (cond))
#endif
struct list_head
{
struct list_head *prev;
struct list_head *next;
};
static inline void list_inithead(struct list_head *item)
{
item->prev = item;
item->next = item;
}
static inline void list_add(struct list_head *item, struct list_head *list)
{
item->prev = list;
item->next = list->next;
list->next->prev = item;
list->next = item;
}
static inline void list_addtail(struct list_head *item, struct list_head *list)
{
item->next = list;
item->prev = list->prev;
list->prev->next = item;
list->prev = item;
}
static inline bool list_is_empty(const struct list_head *list);
static inline void list_replace(struct list_head *from, struct list_head *to)
{
if (list_is_empty(from)) {
list_inithead(to);
} else {
to->prev = from->prev;
to->next = from->next;
from->next->prev = to;
from->prev->next = to;
}
}
static inline void list_del(struct list_head *item)
{
item->prev->next = item->next;
item->next->prev = item->prev;
item->prev = item->next = NULL;
}
static inline void list_delinit(struct list_head *item)
{
item->prev->next = item->next;
item->next->prev = item->prev;
item->next = item;
item->prev = item;
}
static inline bool list_is_empty(const struct list_head *list)
{
return list->next == list;
}
/**
* Returns whether the list has exactly one element.
*/
static inline bool list_is_singular(const struct list_head *list)
{
return list->next != NULL && list->next != list && list->next->next == list;
}
static inline unsigned list_length(const struct list_head *list)
{
struct list_head *node;
unsigned length = 0;
for (node = list->next; node != list; node = node->next)
length++;
return length;
}
static inline void list_splice(struct list_head *src, struct list_head *dst)
{
if (list_is_empty(src))
return;
src->next->prev = dst;
src->prev->next = dst->next;
dst->next->prev = src->prev;
dst->next = src->next;
}
static inline void list_splicetail(struct list_head *src, struct list_head *dst)
{
if (list_is_empty(src))
return;
src->prev->next = dst;
src->next->prev = dst->prev;
dst->prev->next = src->next;
dst->prev = src->prev;
}
static inline void list_validate(const struct list_head *list)
{
struct list_head *node;
assert(list->next->prev == list && list->prev->next == list);
for (node = list->next; node != list; node = node->next)
assert(node->next->prev == node && node->prev->next == node);
}
#define LIST_ENTRY(__type, __item, __field) \
((__type *)(((char *)(__item)) - offsetof(__type, __field)))
/**
* Cast from a pointer to a member of a struct back to the containing struct.
*
* 'sample' MUST be initialized, or else the result is undefined!
*/
#ifndef container_of
#define container_of(ptr, sample, member) \
(void *)((char *)(ptr) \
- ((char *)&(sample)->member - (char *)(sample)))
#endif
#define list_first_entry(ptr, type, member) \
LIST_ENTRY(type, (ptr)->next, member)
#define list_last_entry(ptr, type, member) \
LIST_ENTRY(type, (ptr)->prev, member)
#define LIST_FOR_EACH_ENTRY(pos, head, member) \
for (pos = NULL, pos = container_of((head)->next, pos, member); \
&pos->member != (head); \
pos = container_of(pos->member.next, pos, member))
#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \
for (pos = NULL, pos = container_of((head)->next, pos, member), \
storage = container_of(pos->member.next, pos, member); \
&pos->member != (head); \
pos = storage, storage = container_of(storage->member.next, storage, member))
#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \
for (pos = NULL, pos = container_of((head)->prev, pos, member), \
storage = container_of(pos->member.prev, pos, member); \
&pos->member != (head); \
pos = storage, storage = container_of(storage->member.prev, storage, member))
#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \
for (pos = NULL, pos = container_of((start), pos, member); \
&pos->member != (head); \
pos = container_of(pos->member.next, pos, member))
#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \
for (pos = NULL, pos = container_of((start), pos, member); \
&pos->member != (head); \
pos = container_of(pos->member.prev, pos, member))
#define list_for_each_entry(type, pos, head, member) \
for (type *pos = LIST_ENTRY(type, (head)->next, member), \
*__next = LIST_ENTRY(type, pos->member.next, member); \
&pos->member != (head); \
pos = LIST_ENTRY(type, pos->member.next, member), \
list_assert(pos == __next, "use _safe iterator"), \
__next = LIST_ENTRY(type, __next->member.next, member))
#define list_for_each_entry_safe(type, pos, head, member) \
for (type *pos = LIST_ENTRY(type, (head)->next, member), \
*__next = LIST_ENTRY(type, pos->member.next, member); \
&pos->member != (head); \
pos = __next, \
__next = LIST_ENTRY(type, __next->member.next, member))
#define list_for_each_entry_rev(type, pos, head, member) \
for (type *pos = LIST_ENTRY(type, (head)->prev, member), \
*__prev = LIST_ENTRY(type, pos->member.prev, member); \
&pos->member != (head); \
pos = LIST_ENTRY(type, pos->member.prev, member), \
list_assert(pos == __prev, "use _safe iterator"), \
__prev = LIST_ENTRY(type, __prev->member.prev, member))
#define list_for_each_entry_safe_rev(type, pos, head, member) \
for (type *pos = LIST_ENTRY(type, (head)->prev, member), \
*__prev = LIST_ENTRY(type, pos->member.prev, member); \
&pos->member != (head); \
pos = __prev, \
__prev = LIST_ENTRY(type, __prev->member.prev, member))
#define list_for_each_entry_from(type, pos, start, head, member) \
for (type *pos = LIST_ENTRY(type, (start), member); \
&pos->member != (head); \
pos = LIST_ENTRY(type, pos->member.next, member))
#define list_for_each_entry_from_rev(type, pos, start, head, member) \
for (type *pos = LIST_ENTRY(type, (start), member); \
&pos->member != (head); \
pos = LIST_ENTRY(type, pos->member.prev, member))
#endif /*_UTIL_LIST_H_*/

@ -1,920 +0,0 @@
/*
* Copyright © 2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <assert.h>
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include <stdint.h>
/* Some versions of MinGW are missing _vscprintf's declaration, although they
* still provide the symbol in the import library. */
#ifdef __MINGW32__
_CRTIMP int _vscprintf(const char *format, va_list argptr);
#endif
#include "ralloc.h"
#ifndef va_copy
#ifdef __va_copy
#define va_copy(dest, src) __va_copy((dest), (src))
#else
#define va_copy(dest, src) (dest) = (src)
#endif
#endif
#define CANARY 0x5A1106
/* Align the header's size so that ralloc() allocations will return with the
* same alignment as a libc malloc would have (8 on 32-bit GLIBC, 16 on
* 64-bit), avoiding performance penalities on x86 and alignment faults on
* ARM.
*/
struct
#ifdef _MSC_VER
__declspec(align(8))
#elif defined(__LP64__)
__attribute__((aligned(16)))
#else
__attribute__((aligned(8)))
#endif
ralloc_header
{
#ifndef NDEBUG
/* A canary value used to determine whether a pointer is ralloc'd. */
unsigned canary;
#endif
struct ralloc_header *parent;
/* The first child (head of a linked list) */
struct ralloc_header *child;
/* Linked list of siblings */
struct ralloc_header *prev;
struct ralloc_header *next;
void (*destructor)(void *);
};
typedef struct ralloc_header ralloc_header;
static void unlink_block(ralloc_header *info);
static void unsafe_free(ralloc_header *info);
static ralloc_header *
get_header(const void *ptr)
{
ralloc_header *info = (ralloc_header *) (((char *) ptr) -
sizeof(ralloc_header));
assert(info->canary == CANARY);
return info;
}
#define PTR_FROM_HEADER(info) (((char *) info) + sizeof(ralloc_header))
static void
add_child(ralloc_header *parent, ralloc_header *info)
{
if (parent != NULL) {
info->parent = parent;
info->next = parent->child;
parent->child = info;
if (info->next != NULL)
info->next->prev = info;
}
}
void *
ralloc_context(const void *ctx)
{
return ralloc_size(ctx, 0);
}
void *
ralloc_size(const void *ctx, size_t size)
{
void *block = malloc(size + sizeof(ralloc_header));
ralloc_header *info;
ralloc_header *parent;
if (unlikely(block == NULL))
return NULL;
info = (ralloc_header *) block;
/* measurements have shown that calloc is slower (because of
* the multiplication overflow checking?), so clear things
* manually
*/
info->parent = NULL;
info->child = NULL;
info->prev = NULL;
info->next = NULL;
info->destructor = NULL;
parent = ctx != NULL ? get_header(ctx) : NULL;
add_child(parent, info);
#ifndef NDEBUG
info->canary = CANARY;
#endif
return PTR_FROM_HEADER(info);
}
void *
rzalloc_size(const void *ctx, size_t size)
{
void *ptr = ralloc_size(ctx, size);
if (likely(ptr))
memset(ptr, 0, size);
return ptr;
}
/* helper function - assumes ptr != NULL */
static void *
resize(void *ptr, size_t size)
{
ralloc_header *child, *old, *info;
old = get_header(ptr);
info = realloc(old, size + sizeof(ralloc_header));
if (info == NULL)
return NULL;
/* Update parent and sibling's links to the reallocated node. */
if (info != old && info->parent != NULL) {
if (info->parent->child == old)
info->parent->child = info;
if (info->prev != NULL)
info->prev->next = info;
if (info->next != NULL)
info->next->prev = info;
}
/* Update child->parent links for all children */
for (child = info->child; child != NULL; child = child->next)
child->parent = info;
return PTR_FROM_HEADER(info);
}
void *
reralloc_size(const void *ctx, void *ptr, size_t size)
{
if (unlikely(ptr == NULL))
return ralloc_size(ctx, size);
assert(ralloc_parent(ptr) == ctx);
return resize(ptr, size);
}
void *
rerzalloc_size(const void *ctx, void *ptr, size_t old_size, size_t new_size)
{
if (unlikely(ptr == NULL))
return rzalloc_size(ctx, new_size);
assert(ralloc_parent(ptr) == ctx);
ptr = resize(ptr, new_size);
if (new_size > old_size)
memset((char *)ptr + old_size, 0, new_size - old_size);
return ptr;
}
void *
ralloc_array_size(const void *ctx, size_t size, unsigned count)
{
if (count > SIZE_MAX/size)
return NULL;
return ralloc_size(ctx, size * count);
}
void *
rzalloc_array_size(const void *ctx, size_t size, unsigned count)
{
if (count > SIZE_MAX/size)
return NULL;
return rzalloc_size(ctx, size * count);
}
void *
reralloc_array_size(const void *ctx, void *ptr, size_t size, unsigned count)
{
if (count > SIZE_MAX/size)
return NULL;
return reralloc_size(ctx, ptr, size * count);
}
void *
rerzalloc_array_size(const void *ctx, void *ptr, size_t size,
unsigned old_count, unsigned new_count)
{
if (new_count > SIZE_MAX/size)
return NULL;
return rerzalloc_size(ctx, ptr, size * old_count, size * new_count);
}
void
ralloc_free(void *ptr)
{
ralloc_header *info;
if (ptr == NULL)
return;
info = get_header(ptr);
unlink_block(info);
unsafe_free(info);
}
static void
unlink_block(ralloc_header *info)
{
/* Unlink from parent & siblings */
if (info->parent != NULL) {
if (info->parent->child == info)
info->parent->child = info->next;
if (info->prev != NULL)
info->prev->next = info->next;
if (info->next != NULL)
info->next->prev = info->prev;
}
info->parent = NULL;
info->prev = NULL;
info->next = NULL;
}
static void
unsafe_free(ralloc_header *info)
{
/* Recursively free any children...don't waste time unlinking them. */
ralloc_header *temp;
while (info->child != NULL) {
temp = info->child;
info->child = temp->next;
unsafe_free(temp);
}
/* Free the block itself. Call the destructor first, if any. */
if (info->destructor != NULL)
info->destructor(PTR_FROM_HEADER(info));
free(info);
}
void
ralloc_steal(const void *new_ctx, void *ptr)
{
ralloc_header *info, *parent;
if (unlikely(ptr == NULL))
return;
info = get_header(ptr);
parent = new_ctx ? get_header(new_ctx) : NULL;
unlink_block(info);
add_child(parent, info);
}
void
ralloc_adopt(const void *new_ctx, void *old_ctx)
{
ralloc_header *new_info, *old_info, *child;
if (unlikely(old_ctx == NULL))
return;
old_info = get_header(old_ctx);
new_info = get_header(new_ctx);
/* If there are no children, bail. */
if (unlikely(old_info->child == NULL))
return;
/* Set all the children's parent to new_ctx; get a pointer to the last child. */
for (child = old_info->child; child->next != NULL; child = child->next) {
child->parent = new_info;
}
child->parent = new_info;
/* Connect the two lists together; parent them to new_ctx; make old_ctx empty. */
child->next = new_info->child;
if (child->next)
child->next->prev = child;
new_info->child = old_info->child;
old_info->child = NULL;
}
void *
ralloc_parent(const void *ptr)
{
ralloc_header *info;
if (unlikely(ptr == NULL))
return NULL;
info = get_header(ptr);
return info->parent ? PTR_FROM_HEADER(info->parent) : NULL;
}
void
ralloc_set_destructor(const void *ptr, void(*destructor)(void *))
{
ralloc_header *info = get_header(ptr);
info->destructor = destructor;
}
char *
ralloc_strdup(const void *ctx, const char *str)
{
size_t n;
char *ptr;
if (unlikely(str == NULL))
return NULL;
n = strlen(str);
ptr = ralloc_array(ctx, char, n + 1);
memcpy(ptr, str, n);
ptr[n] = '\0';
return ptr;
}
char *
ralloc_strndup(const void *ctx, const char *str, size_t max)
{
size_t n;
char *ptr;
if (unlikely(str == NULL))
return NULL;
n = strnlen(str, max);
ptr = ralloc_array(ctx, char, n + 1);
memcpy(ptr, str, n);
ptr[n] = '\0';
return ptr;
}
/* helper routine for strcat/strncat - n is the exact amount to copy */
static bool
cat(char **dest, const char *str, size_t n)
{
char *both;
size_t existing_length;
assert(dest != NULL && *dest != NULL);
existing_length = strlen(*dest);
both = resize(*dest, existing_length + n + 1);
if (unlikely(both == NULL))
return false;
memcpy(both + existing_length, str, n);
both[existing_length + n] = '\0';
*dest = both;
return true;
}
bool
ralloc_strcat(char **dest, const char *str)
{
return cat(dest, str, strlen(str));
}
bool
ralloc_strncat(char **dest, const char *str, size_t n)
{
return cat(dest, str, strnlen(str, n));
}
bool
ralloc_str_append(char **dest, const char *str,
size_t existing_length, size_t str_size)
{
char *both;
assert(dest != NULL && *dest != NULL);
both = resize(*dest, existing_length + str_size + 1);
if (unlikely(both == NULL))
return false;
memcpy(both + existing_length, str, str_size);
both[existing_length + str_size] = '\0';
*dest = both;
return true;
}
char *
ralloc_asprintf(const void *ctx, const char *fmt, ...)
{
char *ptr;
va_list args;
va_start(args, fmt);
ptr = ralloc_vasprintf(ctx, fmt, args);
va_end(args);
return ptr;
}
/* Return the length of the string that would be generated by a printf-style
* format and argument list, not including the \0 byte.
*/
static size_t
printf_length(const char *fmt, va_list untouched_args)
{
int size;
char junk;
/* Make a copy of the va_list so the original caller can still use it */
va_list args;
va_copy(args, untouched_args);
#ifdef _WIN32
/* We need to use _vcsprintf to calculate the size as vsnprintf returns -1
* if the number of characters to write is greater than count.
*/
size = _vscprintf(fmt, args);
(void)junk;
#else
size = vsnprintf(&junk, 1, fmt, args);
#endif
assert(size >= 0);
va_end(args);
return size;
}
char *
ralloc_vasprintf(const void *ctx, const char *fmt, va_list args)
{
size_t size = printf_length(fmt, args) + 1;
char *ptr = ralloc_size(ctx, size);
if (ptr != NULL)
vsnprintf(ptr, size, fmt, args);
return ptr;
}
bool
ralloc_asprintf_append(char **str, const char *fmt, ...)
{
bool success;
va_list args;
va_start(args, fmt);
success = ralloc_vasprintf_append(str, fmt, args);
va_end(args);
return success;
}
bool
ralloc_vasprintf_append(char **str, const char *fmt, va_list args)
{
size_t existing_length;
assert(str != NULL);
existing_length = *str ? strlen(*str) : 0;
return ralloc_vasprintf_rewrite_tail(str, &existing_length, fmt, args);
}
bool
ralloc_asprintf_rewrite_tail(char **str, size_t *start, const char *fmt, ...)
{
bool success;
va_list args;
va_start(args, fmt);
success = ralloc_vasprintf_rewrite_tail(str, start, fmt, args);
va_end(args);
return success;
}
bool
ralloc_vasprintf_rewrite_tail(char **str, size_t *start, const char *fmt,
va_list args)
{
size_t new_length;
char *ptr;
assert(str != NULL);
if (unlikely(*str == NULL)) {
// Assuming a NULL context is probably bad, but it's expected behavior.
*str = ralloc_vasprintf(NULL, fmt, args);
*start = strlen(*str);
return true;
}
new_length = printf_length(fmt, args);
ptr = resize(*str, *start + new_length + 1);
if (unlikely(ptr == NULL))
return false;
vsnprintf(ptr + *start, new_length + 1, fmt, args);
*str = ptr;
*start += new_length;
return true;
}
/***************************************************************************
* Linear allocator for short-lived allocations.
***************************************************************************
*
* The allocator consists of a parent node (2K buffer), which requires
* a ralloc parent, and child nodes (allocations). Child nodes can't be freed
* directly, because the parent doesn't track them. You have to release
* the parent node in order to release all its children.
*
* The allocator uses a fixed-sized buffer with a monotonically increasing
* offset after each allocation. If the buffer is all used, another buffer
* is allocated, sharing the same ralloc parent, so all buffers are at
* the same level in the ralloc hierarchy.
*
* The linear parent node is always the first buffer and keeps track of all
* other buffers.
*/
#define MIN_LINEAR_BUFSIZE 2048
#define SUBALLOC_ALIGNMENT 8
#define LMAGIC 0x87b9c7d3
struct
#ifdef _MSC_VER
__declspec(align(8))
#elif defined(__LP64__)
__attribute__((aligned(16)))
#else
__attribute__((aligned(8)))
#endif
linear_header {
#ifndef NDEBUG
unsigned magic; /* for debugging */
#endif
unsigned offset; /* points to the first unused byte in the buffer */
unsigned size; /* size of the buffer */
void *ralloc_parent; /* new buffers will use this */
struct linear_header *next; /* next buffer if we have more */
struct linear_header *latest; /* the only buffer that has free space */
/* After this structure, the buffer begins.
* Each suballocation consists of linear_size_chunk as its header followed
* by the suballocation, so it goes:
*
* - linear_size_chunk
* - allocated space
* - linear_size_chunk
* - allocated space
* etc.
*
* linear_size_chunk is only needed by linear_realloc.
*/
};
struct linear_size_chunk {
unsigned size; /* for realloc */
unsigned _padding;
};
typedef struct linear_header linear_header;
typedef struct linear_size_chunk linear_size_chunk;
#define LINEAR_PARENT_TO_HEADER(parent) \
(linear_header*) \
((char*)(parent) - sizeof(linear_size_chunk) - sizeof(linear_header))
/* Allocate the linear buffer with its header. */
static linear_header *
create_linear_node(void *ralloc_ctx, unsigned min_size)
{
linear_header *node;
min_size += sizeof(linear_size_chunk);
if (likely(min_size < MIN_LINEAR_BUFSIZE))
min_size = MIN_LINEAR_BUFSIZE;
node = ralloc_size(ralloc_ctx, sizeof(linear_header) + min_size);
if (unlikely(!node))
return NULL;
#ifndef NDEBUG
node->magic = LMAGIC;
#endif
node->offset = 0;
node->size = min_size;
node->ralloc_parent = ralloc_ctx;
node->next = NULL;
node->latest = node;
return node;
}
void *
linear_alloc_child(void *parent, unsigned size)
{
linear_header *first = LINEAR_PARENT_TO_HEADER(parent);
linear_header *latest = first->latest;
linear_header *new_node;
linear_size_chunk *ptr;
unsigned full_size;
assert(first->magic == LMAGIC);
assert(!latest->next);
size = ALIGN_POT(size, SUBALLOC_ALIGNMENT);
full_size = sizeof(linear_size_chunk) + size;
if (unlikely(latest->offset + full_size > latest->size)) {
/* allocate a new node */
new_node = create_linear_node(latest->ralloc_parent, size);
if (unlikely(!new_node))
return NULL;
first->latest = new_node;
latest->latest = new_node;
latest->next = new_node;
latest = new_node;
}
ptr = (linear_size_chunk *)((char*)&latest[1] + latest->offset);
ptr->size = size;
latest->offset += full_size;
assert((uintptr_t)&ptr[1] % SUBALLOC_ALIGNMENT == 0);
return &ptr[1];
}
void *
linear_alloc_parent(void *ralloc_ctx, unsigned size)
{
linear_header *node;
if (unlikely(!ralloc_ctx))
return NULL;
size = ALIGN_POT(size, SUBALLOC_ALIGNMENT);
node = create_linear_node(ralloc_ctx, size);
if (unlikely(!node))
return NULL;
return linear_alloc_child((char*)node +
sizeof(linear_header) +
sizeof(linear_size_chunk), size);
}
void *
linear_zalloc_child(void *parent, unsigned size)
{
void *ptr = linear_alloc_child(parent, size);
if (likely(ptr))
memset(ptr, 0, size);
return ptr;
}
void *
linear_zalloc_parent(void *parent, unsigned size)
{
void *ptr = linear_alloc_parent(parent, size);
if (likely(ptr))
memset(ptr, 0, size);
return ptr;
}
void
linear_free_parent(void *ptr)
{
linear_header *node;
if (unlikely(!ptr))
return;
node = LINEAR_PARENT_TO_HEADER(ptr);
assert(node->magic == LMAGIC);
while (node) {
void *ptr = node;
node = node->next;
ralloc_free(ptr);
}
}
void
ralloc_steal_linear_parent(void *new_ralloc_ctx, void *ptr)
{
linear_header *node;
if (unlikely(!ptr))
return;
node = LINEAR_PARENT_TO_HEADER(ptr);
assert(node->magic == LMAGIC);
while (node) {
ralloc_steal(new_ralloc_ctx, node);
node->ralloc_parent = new_ralloc_ctx;
node = node->next;
}
}
void *
ralloc_parent_of_linear_parent(void *ptr)
{
linear_header *node = LINEAR_PARENT_TO_HEADER(ptr);
assert(node->magic == LMAGIC);
return node->ralloc_parent;
}
void *
linear_realloc(void *parent, void *old, unsigned new_size)
{
unsigned old_size = 0;
ralloc_header *new_ptr;
new_ptr = linear_alloc_child(parent, new_size);
if (unlikely(!old))
return new_ptr;
old_size = ((linear_size_chunk*)old)[-1].size;
if (likely(new_ptr && old_size))
memcpy(new_ptr, old, MIN2(old_size, new_size));
return new_ptr;
}
/* All code below is pretty much copied from ralloc and only the alloc
* calls are different.
*/
char *
linear_strdup(void *parent, const char *str)
{
unsigned n;
char *ptr;
if (unlikely(!str))
return NULL;
n = strlen(str);
ptr = linear_alloc_child(parent, n + 1);
if (unlikely(!ptr))
return NULL;
memcpy(ptr, str, n);
ptr[n] = '\0';
return ptr;
}
char *
linear_asprintf(void *parent, const char *fmt, ...)
{
char *ptr;
va_list args;
va_start(args, fmt);
ptr = linear_vasprintf(parent, fmt, args);
va_end(args);
return ptr;
}
char *
linear_vasprintf(void *parent, const char *fmt, va_list args)
{
unsigned size = printf_length(fmt, args) + 1;
char *ptr = linear_alloc_child(parent, size);
if (ptr != NULL)
vsnprintf(ptr, size, fmt, args);
return ptr;
}
bool
linear_asprintf_append(void *parent, char **str, const char *fmt, ...)
{
bool success;
va_list args;
va_start(args, fmt);
success = linear_vasprintf_append(parent, str, fmt, args);
va_end(args);
return success;
}
bool
linear_vasprintf_append(void *parent, char **str, const char *fmt, va_list args)
{
size_t existing_length;
assert(str != NULL);
existing_length = *str ? strlen(*str) : 0;
return linear_vasprintf_rewrite_tail(parent, str, &existing_length, fmt, args);
}
bool
linear_asprintf_rewrite_tail(void *parent, char **str, size_t *start,
const char *fmt, ...)
{
bool success;
va_list args;
va_start(args, fmt);
success = linear_vasprintf_rewrite_tail(parent, str, start, fmt, args);
va_end(args);
return success;
}
bool
linear_vasprintf_rewrite_tail(void *parent, char **str, size_t *start,
const char *fmt, va_list args)
{
size_t new_length;
char *ptr;
assert(str != NULL);
if (unlikely(*str == NULL)) {
*str = linear_vasprintf(parent, fmt, args);
*start = strlen(*str);
return true;
}
new_length = printf_length(fmt, args);
ptr = linear_realloc(parent, *str, *start + new_length + 1);
if (unlikely(ptr == NULL))
return false;
vsnprintf(ptr + *start, new_length + 1, fmt, args);
*str = ptr;
*start += new_length;
return true;
}
/* helper routine for strcat/strncat - n is the exact amount to copy */
static bool
linear_cat(void *parent, char **dest, const char *str, unsigned n)
{
char *both;
unsigned existing_length;
assert(dest != NULL && *dest != NULL);
existing_length = strlen(*dest);
both = linear_realloc(parent, *dest, existing_length + n + 1);
if (unlikely(both == NULL))
return false;
memcpy(both + existing_length, str, n);
both[existing_length + n] = '\0';
*dest = both;
return true;
}
bool
linear_strcat(void *parent, char **dest, const char *str)
{
return linear_cat(parent, dest, str, strlen(str));
}

@ -1,604 +0,0 @@
/*
* Copyright © 2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* \file ralloc.h
*
* ralloc: a recursive memory allocator
*
* The ralloc memory allocator creates a hierarchy of allocated
* objects. Every allocation is in reference to some parent, and
* every allocated object can in turn be used as the parent of a
* subsequent allocation. This allows for extremely convenient
* discarding of an entire tree/sub-tree of allocations by calling
* ralloc_free on any particular object to free it and all of its
* children.
*
* The conceptual working of ralloc was directly inspired by Andrew
* Tridgell's talloc, but ralloc is an independent implementation
* released under the MIT license and tuned for Mesa.
*
* talloc is more sophisticated than ralloc in that it includes reference
* counting and useful debugging features. However, it is released under
* a non-permissive open source license.
*/
#ifndef RALLOC_H
#define RALLOC_H
#include <stddef.h>
#include <stdarg.h>
#include <stdbool.h>
#include "macros.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* \def ralloc(ctx, type)
* Allocate a new object chained off of the given context.
*
* This is equivalent to:
* \code
* ((type *) ralloc_size(ctx, sizeof(type))
* \endcode
*/
#define ralloc(ctx, type) ((type *) ralloc_size(ctx, sizeof(type)))
/**
* \def rzalloc(ctx, type)
* Allocate a new object out of the given context and initialize it to zero.
*
* This is equivalent to:
* \code
* ((type *) rzalloc_size(ctx, sizeof(type))
* \endcode
*/
#define rzalloc(ctx, type) ((type *) rzalloc_size(ctx, sizeof(type)))
/**
* Allocate a new ralloc context.
*
* While any ralloc'd pointer can be used as a context, sometimes it is useful
* to simply allocate a context with no associated memory.
*
* It is equivalent to:
* \code
* ((type *) ralloc_size(ctx, 0)
* \endcode
*/
void *ralloc_context(const void *ctx);
/**
* Allocate memory chained off of the given context.
*
* This is the core allocation routine which is used by all others. It
* simply allocates storage for \p size bytes and returns the pointer,
* similar to \c malloc.
*/
void *ralloc_size(const void *ctx, size_t size) MALLOCLIKE;
/**
* Allocate zero-initialized memory chained off of the given context.
*
* This is similar to \c calloc with a size of 1.
*/
void *rzalloc_size(const void *ctx, size_t size) MALLOCLIKE;
/**
* Resize a piece of ralloc-managed memory, preserving data.
*
* Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
* memory. Instead, it resizes it to a 0-byte ralloc context, just like
* calling ralloc_size(ctx, 0). This is different from talloc.
*
* \param ctx The context to use for new allocation. If \p ptr != NULL,
* it must be the same as ralloc_parent(\p ptr).
* \param ptr Pointer to the memory to be resized. May be NULL.
* \param size The amount of memory to allocate, in bytes.
*/
void *reralloc_size(const void *ctx, void *ptr, size_t size);
/**
* Resize a ralloc-managed array, preserving data and initializing any newly
* allocated data to zero.
*
* Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
* memory. Instead, it resizes it to a 0-byte ralloc context, just like
* calling ralloc_size(ctx, 0). This is different from talloc.
*
* \param ctx The context to use for new allocation. If \p ptr != NULL,
* it must be the same as ralloc_parent(\p ptr).
* \param ptr Pointer to the memory to be resized. May be NULL.
* \param old_size The amount of memory in the previous allocation, in bytes.
* \param new_size The amount of memory to allocate, in bytes.
*/
void *rerzalloc_size(const void *ctx, void *ptr,
size_t old_size, size_t new_size);
/// \defgroup array Array Allocators @{
/**
* \def ralloc_array(ctx, type, count)
* Allocate an array of objects chained off the given context.
*
* Similar to \c calloc, but does not initialize the memory to zero.
*
* More than a convenience function, this also checks for integer overflow when
* multiplying \c sizeof(type) and \p count. This is necessary for security.
*
* This is equivalent to:
* \code
* ((type *) ralloc_array_size(ctx, sizeof(type), count)
* \endcode
*/
#define ralloc_array(ctx, type, count) \
((type *) ralloc_array_size(ctx, sizeof(type), count))
/**
* \def rzalloc_array(ctx, type, count)
* Allocate a zero-initialized array chained off the given context.
*
* Similar to \c calloc.
*
* More than a convenience function, this also checks for integer overflow when
* multiplying \c sizeof(type) and \p count. This is necessary for security.
*
* This is equivalent to:
* \code
* ((type *) rzalloc_array_size(ctx, sizeof(type), count)
* \endcode
*/
#define rzalloc_array(ctx, type, count) \
((type *) rzalloc_array_size(ctx, sizeof(type), count))
/**
* \def reralloc(ctx, ptr, type, count)
* Resize a ralloc-managed array, preserving data.
*
* Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
* memory. Instead, it resizes it to a 0-byte ralloc context, just like
* calling ralloc_size(ctx, 0). This is different from talloc.
*
* More than a convenience function, this also checks for integer overflow when
* multiplying \c sizeof(type) and \p count. This is necessary for security.
*
* \param ctx The context to use for new allocation. If \p ptr != NULL,
* it must be the same as ralloc_parent(\p ptr).
* \param ptr Pointer to the array to be resized. May be NULL.
* \param type The element type.
* \param count The number of elements to allocate.
*/
#define reralloc(ctx, ptr, type, count) \
((type *) reralloc_array_size(ctx, ptr, sizeof(type), count))
/**
* \def rerzalloc(ctx, ptr, type, count)
* Resize a ralloc-managed array, preserving data and initializing any newly
* allocated data to zero.
*
* Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
* memory. Instead, it resizes it to a 0-byte ralloc context, just like
* calling ralloc_size(ctx, 0). This is different from talloc.
*
* More than a convenience function, this also checks for integer overflow when
* multiplying \c sizeof(type) and \p count. This is necessary for security.
*
* \param ctx The context to use for new allocation. If \p ptr != NULL,
* it must be the same as ralloc_parent(\p ptr).
* \param ptr Pointer to the array to be resized. May be NULL.
* \param type The element type.
* \param old_count The number of elements in the previous allocation.
* \param new_count The number of elements to allocate.
*/
#define rerzalloc(ctx, ptr, type, old_count, new_count) \
((type *) rerzalloc_array_size(ctx, ptr, sizeof(type), old_count, new_count))
/**
* Allocate memory for an array chained off the given context.
*
* Similar to \c calloc, but does not initialize the memory to zero.
*
* More than a convenience function, this also checks for integer overflow when
* multiplying \p size and \p count. This is necessary for security.
*/
void *ralloc_array_size(const void *ctx, size_t size, unsigned count) MALLOCLIKE;
/**
* Allocate a zero-initialized array chained off the given context.
*
* Similar to \c calloc.
*
* More than a convenience function, this also checks for integer overflow when
* multiplying \p size and \p count. This is necessary for security.
*/
void *rzalloc_array_size(const void *ctx, size_t size, unsigned count) MALLOCLIKE;
/**
* Resize a ralloc-managed array, preserving data.
*
* Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
* memory. Instead, it resizes it to a 0-byte ralloc context, just like
* calling ralloc_size(ctx, 0). This is different from talloc.
*
* More than a convenience function, this also checks for integer overflow when
* multiplying \c sizeof(type) and \p count. This is necessary for security.
*
* \param ctx The context to use for new allocation. If \p ptr != NULL,
* it must be the same as ralloc_parent(\p ptr).
* \param ptr Pointer to the array to be resized. May be NULL.
* \param size The size of an individual element.
* \param count The number of elements to allocate.
*
* \return True unless allocation failed.
*/
void *reralloc_array_size(const void *ctx, void *ptr, size_t size,
unsigned count);
/**
* Resize a ralloc-managed array, preserving data and initializing any newly
* allocated data to zero.
*
* Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
* memory. Instead, it resizes it to a 0-byte ralloc context, just like
* calling ralloc_size(ctx, 0). This is different from talloc.
*
* More than a convenience function, this also checks for integer overflow when
* multiplying \c sizeof(type) and \p count. This is necessary for security.
*
* \param ctx The context to use for new allocation. If \p ptr != NULL,
* it must be the same as ralloc_parent(\p ptr).
* \param ptr Pointer to the array to be resized. May be NULL.
* \param size The size of an individual element.
* \param old_count The number of elements in the previous allocation.
* \param new_count The number of elements to allocate.
*
* \return True unless allocation failed.
*/
void *rerzalloc_array_size(const void *ctx, void *ptr, size_t size,
unsigned old_count, unsigned new_count);
/// @}
/**
* Free a piece of ralloc-managed memory.
*
* This will also free the memory of any children allocated this context.
*/
void ralloc_free(void *ptr);
/**
* "Steal" memory from one context, changing it to another.
*
* This changes \p ptr's context to \p new_ctx. This is quite useful if
* memory is allocated out of a temporary context.
*/
void ralloc_steal(const void *new_ctx, void *ptr);
/**
* Reparent all children from one context to another.
*
* This effectively calls ralloc_steal(new_ctx, child) for all children of \p old_ctx.
*/
void ralloc_adopt(const void *new_ctx, void *old_ctx);
/**
* Return the given pointer's ralloc context.
*/
void *ralloc_parent(const void *ptr);
/**
* Set a callback to occur just before an object is freed.
*/
void ralloc_set_destructor(const void *ptr, void(*destructor)(void *));
/// \defgroup array String Functions @{
/**
* Duplicate a string, allocating the memory from the given context.
*/
char *ralloc_strdup(const void *ctx, const char *str) MALLOCLIKE;
/**
* Duplicate a string, allocating the memory from the given context.
*
* Like \c strndup, at most \p n characters are copied. If \p str is longer
* than \p n characters, \p n are copied, and a termining \c '\0' byte is added.
*/
char *ralloc_strndup(const void *ctx, const char *str, size_t n) MALLOCLIKE;
/**
* Concatenate two strings, allocating the necessary space.
*
* This appends \p str to \p *dest, similar to \c strcat, using ralloc_resize
* to expand \p *dest to the appropriate size. \p dest will be updated to the
* new pointer unless allocation fails.
*
* The result will always be null-terminated.
*
* \return True unless allocation failed.
*/
bool ralloc_strcat(char **dest, const char *str);
/**
* Concatenate two strings, allocating the necessary space.
*
* This appends at most \p n bytes of \p str to \p *dest, using ralloc_resize
* to expand \p *dest to the appropriate size. \p dest will be updated to the
* new pointer unless allocation fails.
*
* The result will always be null-terminated; \p str does not need to be null
* terminated if it is longer than \p n.
*
* \return True unless allocation failed.
*/
bool ralloc_strncat(char **dest, const char *str, size_t n);
/**
* Concatenate two strings, allocating the necessary space.
*
* This appends \p n bytes of \p str to \p *dest, using ralloc_resize
* to expand \p *dest to the appropriate size. \p dest will be updated to the
* new pointer unless allocation fails.
*
* The result will always be null-terminated.
*
* This function differs from ralloc_strcat() and ralloc_strncat() in that it
* does not do any strlen() calls which can become costly on large strings.
*
* \return True unless allocation failed.
*/
bool
ralloc_str_append(char **dest, const char *str,
size_t existing_length, size_t str_size);
/**
* Print to a string.
*
* This is analogous to \c sprintf, but allocates enough space (using \p ctx
* as the context) for the resulting string.
*
* \return The newly allocated string.
*/
char *ralloc_asprintf (const void *ctx, const char *fmt, ...) PRINTFLIKE(2, 3) MALLOCLIKE;
/**
* Print to a string, given a va_list.
*
* This is analogous to \c vsprintf, but allocates enough space (using \p ctx
* as the context) for the resulting string.
*
* \return The newly allocated string.
*/
char *ralloc_vasprintf(const void *ctx, const char *fmt, va_list args) MALLOCLIKE;
/**
* Rewrite the tail of an existing string, starting at a given index.
*
* Overwrites the contents of *str starting at \p start with newly formatted
* text, including a new null-terminator. Allocates more memory as necessary.
*
* This can be used to append formatted text when the length of the existing
* string is already known, saving a strlen() call.
*
* \sa ralloc_asprintf_append
*
* \param str The string to be updated.
* \param start The index to start appending new data at.
* \param fmt A printf-style formatting string
*
* \p str will be updated to the new pointer unless allocation fails.
* \p start will be increased by the length of the newly formatted text.
*
* \return True unless allocation failed.
*/
bool ralloc_asprintf_rewrite_tail(char **str, size_t *start,
const char *fmt, ...)
PRINTFLIKE(3, 4);
/**
* Rewrite the tail of an existing string, starting at a given index.
*
* Overwrites the contents of *str starting at \p start with newly formatted
* text, including a new null-terminator. Allocates more memory as necessary.
*
* This can be used to append formatted text when the length of the existing
* string is already known, saving a strlen() call.
*
* \sa ralloc_vasprintf_append
*
* \param str The string to be updated.
* \param start The index to start appending new data at.
* \param fmt A printf-style formatting string
* \param args A va_list containing the data to be formatted
*
* \p str will be updated to the new pointer unless allocation fails.
* \p start will be increased by the length of the newly formatted text.
*
* \return True unless allocation failed.
*/
bool ralloc_vasprintf_rewrite_tail(char **str, size_t *start, const char *fmt,
va_list args);
/**
* Append formatted text to the supplied string.
*
* This is equivalent to
* \code
* ralloc_asprintf_rewrite_tail(str, strlen(*str), fmt, ...)
* \endcode
*
* \sa ralloc_asprintf
* \sa ralloc_asprintf_rewrite_tail
* \sa ralloc_strcat
*
* \p str will be updated to the new pointer unless allocation fails.
*
* \return True unless allocation failed.
*/
bool ralloc_asprintf_append (char **str, const char *fmt, ...)
PRINTFLIKE(2, 3);
/**
* Append formatted text to the supplied string, given a va_list.
*
* This is equivalent to
* \code
* ralloc_vasprintf_rewrite_tail(str, strlen(*str), fmt, args)
* \endcode
*
* \sa ralloc_vasprintf
* \sa ralloc_vasprintf_rewrite_tail
* \sa ralloc_strcat
*
* \p str will be updated to the new pointer unless allocation fails.
*
* \return True unless allocation failed.
*/
bool ralloc_vasprintf_append(char **str, const char *fmt, va_list args);
/// @}
/**
* Declare C++ new and delete operators which use ralloc.
*
* Placing this macro in the body of a class makes it possible to do:
*
* TYPE *var = new(mem_ctx) TYPE(...);
* delete var;
*
* which is more idiomatic in C++ than calling ralloc.
*/
#define DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(TYPE, ALLOC_FUNC) \
private: \
static void _ralloc_destructor(void *p) \
{ \
reinterpret_cast<TYPE *>(p)->TYPE::~TYPE(); \
} \
public: \
static void* operator new(size_t size, void *mem_ctx) \
{ \
void *p = ALLOC_FUNC(mem_ctx, size); \
assert(p != NULL); \
if (!HAS_TRIVIAL_DESTRUCTOR(TYPE)) \
ralloc_set_destructor(p, _ralloc_destructor); \
return p; \
} \
\
static void operator delete(void *p) \
{ \
/* The object's destructor is guaranteed to have already been \
* called by the delete operator at this point -- Make sure it's \
* not called again. \
*/ \
if (!HAS_TRIVIAL_DESTRUCTOR(TYPE)) \
ralloc_set_destructor(p, NULL); \
ralloc_free(p); \
}
#define DECLARE_RALLOC_CXX_OPERATORS(type) \
DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, ralloc_size)
#define DECLARE_RZALLOC_CXX_OPERATORS(type) \
DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, rzalloc_size)
#define DECLARE_LINEAR_ALLOC_CXX_OPERATORS(type) \
DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, linear_alloc_child)
#define DECLARE_LINEAR_ZALLOC_CXX_OPERATORS(type) \
DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, linear_zalloc_child)
/**
* Do a fast allocation from the linear buffer, also known as the child node
* from the allocator's point of view. It can't be freed directly. You have
* to free the parent or the ralloc parent.
*
* \param parent parent node of the linear allocator
* \param size size to allocate (max 32 bits)
*/
void *linear_alloc_child(void *parent, unsigned size);
/**
* Allocate a parent node that will hold linear buffers. The returned
* allocation is actually the first child node, but it's also the handle
* of the parent node. Use it for all child node allocations.
*
* \param ralloc_ctx ralloc context, must not be NULL
* \param size size to allocate (max 32 bits)
*/
void *linear_alloc_parent(void *ralloc_ctx, unsigned size);
/**
* Same as linear_alloc_child, but also clears memory.
*/
void *linear_zalloc_child(void *parent, unsigned size);
/**
* Same as linear_alloc_parent, but also clears memory.
*/
void *linear_zalloc_parent(void *ralloc_ctx, unsigned size);
/**
* Free the linear parent node. This will free all child nodes too.
* Freeing the ralloc parent will also free this.
*/
void linear_free_parent(void *ptr);
/**
* Same as ralloc_steal, but steals the linear parent node.
*/
void ralloc_steal_linear_parent(void *new_ralloc_ctx, void *ptr);
/**
* Return the ralloc parent of the linear parent node.
*/
void *ralloc_parent_of_linear_parent(void *ptr);
/**
* Same as realloc except that the linear allocator doesn't free child nodes,
* so it's reduced to memory duplication. It's used in places where
* reallocation is required. Don't use it often. It's much slower than
* realloc.
*/
void *linear_realloc(void *parent, void *old, unsigned new_size);
/* The functions below have the same semantics as their ralloc counterparts,
* except that they always allocate a linear child node.
*/
char *linear_strdup(void *parent, const char *str);
char *linear_asprintf(void *parent, const char *fmt, ...);
char *linear_vasprintf(void *parent, const char *fmt, va_list args);
bool linear_asprintf_append(void *parent, char **str, const char *fmt, ...);
bool linear_vasprintf_append(void *parent, char **str, const char *fmt,
va_list args);
bool linear_asprintf_rewrite_tail(void *parent, char **str, size_t *start,
const char *fmt, ...);
bool linear_vasprintf_rewrite_tail(void *parent, char **str, size_t *start,
const char *fmt, va_list args);
bool linear_strcat(void *parent, char **dest, const char *str);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif

@ -1,147 +0,0 @@
/*
* Copyright © 2015 Intel
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _SIMPLE_MTX_H
#define _SIMPLE_MTX_H
#include "../util/futex.h"
#include "../c11/threads.h"
#if defined(__GNUC__) && defined(HAVE_LINUX_FUTEX_H)
/* mtx_t - Fast, simple mutex
*
* While modern pthread mutexes are very fast (implemented using futex), they
* still incur a call to an external DSO and overhead of the generality and
* features of pthread mutexes. Most mutexes in mesa only needs lock/unlock,
* and the idea here is that we can inline the atomic operation and make the
* fast case just two intructions. Mutexes are subtle and finicky to
* implement, so we carefully copy the implementation from Ulrich Dreppers
* well-written and well-reviewed paper:
*
* "Futexes Are Tricky"
* http://www.akkadia.org/drepper/futex.pdf
*
* We implement "mutex3", which gives us a mutex that has no syscalls on
* uncontended lock or unlock. Further, the uncontended case boils down to a
* locked cmpxchg and an untaken branch, the uncontended unlock is just a
* locked decr and an untaken branch. We use __builtin_expect() to indicate
* that contention is unlikely so that gcc will put the contention code out of
* the main code flow.
*
* A fast mutex only supports lock/unlock, can't be recursive or used with
* condition variables.
*/
typedef struct {
uint32_t val;
} simple_mtx_t;
#define _SIMPLE_MTX_INITIALIZER_NP { 0 }
#define _SIMPLE_MTX_INVALID_VALUE 0xd0d0d0d0
static inline void
simple_mtx_init(simple_mtx_t *mtx, ASSERTED int type)
{
assert(type == mtx_plain);
mtx->val = 0;
}
static inline void
simple_mtx_destroy(ASSERTED simple_mtx_t *mtx)
{
#ifndef NDEBUG
mtx->val = _SIMPLE_MTX_INVALID_VALUE;
#endif
}
static inline void
simple_mtx_lock(simple_mtx_t *mtx)
{
uint32_t c;
c = __sync_val_compare_and_swap(&mtx->val, 0, 1);
assert(c != _SIMPLE_MTX_INVALID_VALUE);
if (__builtin_expect(c != 0, 0)) {
if (c != 2)
c = __sync_lock_test_and_set(&mtx->val, 2);
while (c != 0) {
futex_wait(&mtx->val, 2, NULL);
c = __sync_lock_test_and_set(&mtx->val, 2);
}
}
}
static inline void
simple_mtx_unlock(simple_mtx_t *mtx)
{
uint32_t c;
c = __sync_fetch_and_sub(&mtx->val, 1);
assert(c != _SIMPLE_MTX_INVALID_VALUE);
if (__builtin_expect(c != 1, 0)) {
mtx->val = 0;
futex_wake(&mtx->val, 1);
}
}
#else
typedef mtx_t simple_mtx_t;
#define _SIMPLE_MTX_INITIALIZER_NP _MTX_INITIALIZER_NP
static inline void
simple_mtx_init(simple_mtx_t *mtx, int type)
{
mtx_init(mtx, type);
}
static inline void
simple_mtx_destroy(simple_mtx_t *mtx)
{
mtx_destroy(mtx);
}
static inline void
simple_mtx_lock(simple_mtx_t *mtx)
{
mtx_lock(mtx);
}
static inline void
simple_mtx_unlock(simple_mtx_t *mtx)
{
mtx_unlock(mtx);
}
#endif
#endif

@ -27,6 +27,9 @@
#include <thread>
#include <chrono>
#include <unordered_map>
#include <mutex>
#include <vector>
#include <list>
#include <vulkan/vulkan.h>
#include <vulkan/vk_layer.h>
@ -37,12 +40,10 @@
#include "font_default.h"
// #include "util/debug.h"
#include "mesa/util/hash_table.h"
#include "mesa/util/list.h"
#include "mesa/util/ralloc.h"
#include <inttypes.h>
#include "mesa/util/macros.h"
#include "mesa/util/os_time.h"
#include "mesa/util/os_socket.h"
#include "mesa/util/simple_mtx.h"
#include "vk_enum_to_str.h"
#include <vulkan/vk_util.h>
@ -107,8 +108,7 @@ struct device_data {
struct queue_data *graphic_queue;
struct queue_data **queues;
uint32_t n_queues;
std::vector<struct queue_data *> queues;
/* For a single frame */
struct frame_stat frame_stats;
@ -116,6 +116,7 @@ struct device_data {
};
/* Mapped from VkCommandBuffer */
struct queue_data;
struct command_buffer_data {
struct device_data *device;
@ -127,7 +128,7 @@ struct command_buffer_data {
struct frame_stat stats;
struct list_head link; /* link into queue_data::running_command_buffer */
struct queue_data *queue_data;
};
/* Mapped from VkQueue */
@ -141,12 +142,10 @@ struct queue_data {
VkFence queries_fence;
struct list_head running_command_buffer;
std::list<command_buffer_data *> running_command_buffer;
};
struct overlay_draw {
struct list_head link;
VkCommandBuffer command_buffer;
VkSemaphore semaphore;
@ -169,10 +168,9 @@ struct swapchain_data {
unsigned width, height;
VkFormat format;
uint32_t n_images;
VkImage *images;
VkImageView *image_views;
VkFramebuffer *framebuffers;
std::vector<VkImage> images;
std::vector<VkImageView> image_views;
std::vector<VkFramebuffer> framebuffers;
VkRenderPass render_pass;
@ -187,7 +185,7 @@ struct swapchain_data {
VkCommandPool command_pool;
struct list_head draws; /* List of struct overlay_draw */
std::list<overlay_draw *> draws; /* List of struct overlay_draw */
ImFont* font = nullptr;
ImFont* font1 = nullptr;
@ -240,42 +238,32 @@ static const VkQueryPipelineStatisticFlags overlay_query_flags =
VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
#define OVERLAY_QUERY_COUNT (11)
static struct hash_table_u64 *vk_object_to_data = NULL;
static simple_mtx_t vk_object_to_data_mutex = _SIMPLE_MTX_INITIALIZER_NP;
// single global lock, for simplicity
std::mutex global_lock;
typedef std::lock_guard<std::mutex> scoped_lock;
std::unordered_map<uint64_t, void *> vk_object_to_data;
thread_local ImGuiContext* __MesaImGui;
static inline void ensure_vk_object_map(void)
{
if (!vk_object_to_data)
vk_object_to_data = _mesa_hash_table_u64_create(NULL);
}
#define HKEY(obj) ((uint64_t)(obj))
#define FIND(type, obj) ((type *)find_object_data(HKEY(obj)))
#define FIND(type, obj) (reinterpret_cast<type *>(find_object_data(HKEY(obj))))
static void *find_object_data(uint64_t obj)
{
simple_mtx_lock(&vk_object_to_data_mutex);
ensure_vk_object_map();
void *data = _mesa_hash_table_u64_search(vk_object_to_data, obj);
simple_mtx_unlock(&vk_object_to_data_mutex);
return data;
scoped_lock lk(global_lock);
return vk_object_to_data[obj];
}
static void map_object(uint64_t obj, void *data)
{
simple_mtx_lock(&vk_object_to_data_mutex);
ensure_vk_object_map();
_mesa_hash_table_u64_insert(vk_object_to_data, obj, data);
simple_mtx_unlock(&vk_object_to_data_mutex);
scoped_lock lk(global_lock);
vk_object_to_data[obj] = data;
}
static void unmap_object(uint64_t obj)
{
simple_mtx_lock(&vk_object_to_data_mutex);
_mesa_hash_table_u64_remove(vk_object_to_data, obj);
simple_mtx_unlock(&vk_object_to_data_mutex);
scoped_lock lk(global_lock);
vk_object_to_data.erase(obj);
}
/**/
@ -376,10 +364,10 @@ static void instance_data_map_physical_devices(struct instance_data *instance_da
&physicalDeviceCount,
NULL);
VkPhysicalDevice *physicalDevices = (VkPhysicalDevice *) malloc(sizeof(VkPhysicalDevice) * physicalDeviceCount);
std::vector<VkPhysicalDevice> physicalDevices(physicalDeviceCount);
instance_data->vtable.EnumeratePhysicalDevices(instance_data->instance,
&physicalDeviceCount,
physicalDevices);
physicalDevices.data());
for (uint32_t i = 0; i < physicalDeviceCount; i++) {
if (map)
@ -387,14 +375,12 @@ static void instance_data_map_physical_devices(struct instance_data *instance_da
else
unmap_object(HKEY(physicalDevices[i]));
}
free(physicalDevices);
}
/**/
static struct device_data *new_device_data(VkDevice device, struct instance_data *instance)
{
struct device_data *data = rzalloc(NULL, struct device_data);
struct device_data *data = new device_data();
data->instance = instance;
data->device = device;
map_object(HKEY(data->device), data);
@ -406,13 +392,12 @@ static struct queue_data *new_queue_data(VkQueue queue,
uint32_t family_index,
struct device_data *device_data)
{
struct queue_data *data = rzalloc(device_data, struct queue_data);
struct queue_data *data = new queue_data();
data->device = device_data;
data->queue = queue;
data->flags = family_props->queueFlags;
data->timestamp_mask = (1ull << family_props->timestampValidBits) - 1;
data->family_index = family_index;
list_inithead(&data->running_command_buffer);
map_object(HKEY(data->queue), data);
/* Fence synchronizing access to queries on that queue. */
@ -435,26 +420,26 @@ static void destroy_queue(struct queue_data *data)
struct device_data *device_data = data->device;
device_data->vtable.DestroyFence(device_data->device, data->queries_fence, NULL);
unmap_object(HKEY(data->queue));
ralloc_free(data);
delete data;
}
static void device_map_queues(struct device_data *data,
const VkDeviceCreateInfo *pCreateInfo)
{
uint32_t n_queues = 0;
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
data->n_queues += pCreateInfo->pQueueCreateInfos[i].queueCount;
data->queues = ralloc_array(data, struct queue_data *, data->n_queues);
n_queues += pCreateInfo->pQueueCreateInfos[i].queueCount;
data->queues.resize(n_queues);
struct instance_data *instance_data = data->instance;
uint32_t n_family_props;
instance_data->vtable.GetPhysicalDeviceQueueFamilyProperties(data->physical_device,
&n_family_props,
NULL);
VkQueueFamilyProperties *family_props =
(VkQueueFamilyProperties *)malloc(sizeof(VkQueueFamilyProperties) * n_family_props);
std::vector<VkQueueFamilyProperties> family_props(n_family_props);
instance_data->vtable.GetPhysicalDeviceQueueFamilyProperties(data->physical_device,
&n_family_props,
family_props);
family_props.data());
uint32_t queue_index = 0;
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
@ -471,20 +456,18 @@ static void device_map_queues(struct device_data *data,
pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex, data);
}
}
free(family_props);
}
static void device_unmap_queues(struct device_data *data)
{
for (uint32_t i = 0; i < data->n_queues; i++)
destroy_queue(data->queues[i]);
for (auto q : data->queues)
destroy_queue(q);
}
static void destroy_device_data(struct device_data *data)
{
unmap_object(HKEY(data->device));
ralloc_free(data);
delete data;
}
/**/
@ -494,13 +477,12 @@ static struct command_buffer_data *new_command_buffer_data(VkCommandBuffer cmd_b
uint32_t query_index,
struct device_data *device_data)
{
struct command_buffer_data *data = rzalloc(NULL, struct command_buffer_data);
struct command_buffer_data *data = new command_buffer_data();
data->device = device_data;
data->cmd_buffer = cmd_buffer;
data->level = level;
data->timestamp_query_pool = timestamp_query_pool;
data->query_index = query_index;
list_inithead(&data->link);
map_object(HKEY(data->cmd_buffer), data);
return data;
}
@ -508,8 +490,9 @@ static struct command_buffer_data *new_command_buffer_data(VkCommandBuffer cmd_b
static void destroy_command_buffer_data(struct command_buffer_data *data)
{
unmap_object(HKEY(data->cmd_buffer));
list_delinit(&data->link);
ralloc_free(data);
if (data->queue_data)
data->queue_data->running_command_buffer.remove(data);
delete data;
}
/**/
@ -517,11 +500,10 @@ static struct swapchain_data *new_swapchain_data(VkSwapchainKHR swapchain,
struct device_data *device_data)
{
struct instance_data *instance_data = device_data->instance;
struct swapchain_data *data = rzalloc(NULL, struct swapchain_data);
struct swapchain_data *data = new swapchain_data();
data->device = device_data;
data->swapchain = swapchain;
data->window_size = ImVec2(instance_data->params.width, instance_data->params.height);
list_inithead(&data->draws);
map_object(HKEY(data->swapchain), data);
return data;
}
@ -529,27 +511,27 @@ static struct swapchain_data *new_swapchain_data(VkSwapchainKHR swapchain,
static void destroy_swapchain_data(struct swapchain_data *data)
{
unmap_object(HKEY(data->swapchain));
ralloc_free(data);
delete data;
}
struct overlay_draw *get_overlay_draw(struct swapchain_data *data)
{
struct device_data *device_data = data->device;
struct overlay_draw *draw = list_is_empty(&data->draws) ?
NULL : list_first_entry(&data->draws, struct overlay_draw, link);
struct overlay_draw *draw = data->draws.empty() ?
nullptr : data->draws.front();
VkSemaphoreCreateInfo sem_info = {};
sem_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
if (draw && device_data->vtable.GetFenceStatus(device_data->device, draw->fence) == VK_SUCCESS) {
list_del(&draw->link);
VK_CHECK(device_data->vtable.ResetFences(device_data->device,
1, &draw->fence));
list_addtail(&draw->link, &data->draws);
data->draws.pop_front();
data->draws.push_back(draw);
return draw;
}
draw = rzalloc(data, struct overlay_draw);
draw = new overlay_draw();
VkCommandBufferAllocateInfo cmd_buffer_info = {};
cmd_buffer_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
@ -573,7 +555,7 @@ struct overlay_draw *get_overlay_draw(struct swapchain_data *data)
VK_CHECK(device_data->vtable.CreateSemaphore(device_data->device, &sem_info,
NULL, &draw->semaphore));
list_addtail(&draw->link, &data->draws);
data->draws.push_back(draw);
return draw;
}
@ -2007,19 +1989,27 @@ static void setup_swapchain_data(struct swapchain_data *data,
setup_swapchain_data_pipeline(data);
uint32_t n_images = 0;
VK_CHECK(device_data->vtable.GetSwapchainImagesKHR(device_data->device,
data->swapchain,
&data->n_images,
&n_images,
NULL));
data->images = ralloc_array(data, VkImage, data->n_images);
data->image_views = ralloc_array(data, VkImageView, data->n_images);
data->framebuffers = ralloc_array(data, VkFramebuffer, data->n_images);
data->images.resize(n_images);
data->image_views.resize(n_images);
data->framebuffers.resize(n_images);
VK_CHECK(device_data->vtable.GetSwapchainImagesKHR(device_data->device,
data->swapchain,
&data->n_images,
data->images));
&n_images,
data->images.data()));
if (n_images != data->images.size()) {
data->images.resize(n_images);
data->image_views.resize(n_images);
data->framebuffers.resize(n_images);
}
/* Image views */
VkImageViewCreateInfo view_info = {};
@ -2031,7 +2021,7 @@ static void setup_swapchain_data(struct swapchain_data *data,
view_info.components.b = VK_COMPONENT_SWIZZLE_B;
view_info.components.a = VK_COMPONENT_SWIZZLE_A;
view_info.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
for (uint32_t i = 0; i < data->n_images; i++) {
for (size_t i = 0; i < data->images.size(); i++) {
view_info.image = data->images[i];
VK_CHECK(device_data->vtable.CreateImageView(device_data->device,
&view_info, NULL,
@ -2048,7 +2038,7 @@ static void setup_swapchain_data(struct swapchain_data *data,
fb_info.width = data->width;
fb_info.height = data->height;
fb_info.layers = 1;
for (uint32_t i = 0; i < data->n_images; i++) {
for (size_t i = 0; i < data->image_views.size(); i++) {
attachment[0] = data->image_views[i];
VK_CHECK(device_data->vtable.CreateFramebuffer(device_data->device, &fb_info,
NULL, &data->framebuffers[i]));
@ -2068,16 +2058,17 @@ static void shutdown_swapchain_data(struct swapchain_data *data)
{
struct device_data *device_data = data->device;
list_for_each_entry_safe(struct overlay_draw, draw, &data->draws, link) {
for (auto draw : data->draws) {
device_data->vtable.DestroySemaphore(device_data->device, draw->semaphore, NULL);
device_data->vtable.DestroyFence(device_data->device, draw->fence, NULL);
device_data->vtable.DestroyBuffer(device_data->device, draw->vertex_buffer, NULL);
device_data->vtable.DestroyBuffer(device_data->device, draw->index_buffer, NULL);
device_data->vtable.FreeMemory(device_data->device, draw->vertex_buffer_mem, NULL);
device_data->vtable.FreeMemory(device_data->device, draw->index_buffer_mem, NULL);
delete draw;
}
for (uint32_t i = 0; i < data->n_images; i++) {
for (size_t i = 0; i < data->images.size(); i++) {
device_data->vtable.DestroyImageView(device_data->device, data->image_views[i], NULL);
device_data->vtable.DestroyFramebuffer(device_data->device, data->framebuffers[i], NULL);
}
@ -2181,7 +2172,7 @@ static VkResult overlay_QueuePresentKHR(
device_data->frame_stats.stats[OVERLAY_PARAM_ENABLED_frame]++;
if (list_length(&queue_data->running_command_buffer) > 0) {
if (!queue_data->running_command_buffer.empty()) {
/* Before getting the query results, make sure the operations have
* completed.
*/
@ -2193,9 +2184,9 @@ static VkResult overlay_QueuePresentKHR(
VK_FALSE, UINT64_MAX));
/* Now get the results. */
list_for_each_entry_safe(struct command_buffer_data, cmd_buffer_data,
&queue_data->running_command_buffer, link) {
list_delinit(&cmd_buffer_data->link);
while (!queue_data->running_command_buffer.empty()) {
auto cmd_buffer_data = queue_data->running_command_buffer.front();
queue_data->running_command_buffer.pop_front();
if (cmd_buffer_data->timestamp_query_pool) {
uint64_t gpu_timestamps[2] = { 0 };
@ -2472,9 +2463,10 @@ static VkResult overlay_QueueSubmit(
if (!cmd_buffer_data->timestamp_query_pool)
continue;
if (list_is_empty(&cmd_buffer_data->link)) {
list_addtail(&cmd_buffer_data->link,
&queue_data->running_command_buffer);
auto& q = queue_data->running_command_buffer;
if (std::find(q.begin(), q.end(), cmd_buffer_data) == q.end()) {
cmd_buffer_data->queue_data = queue_data;
q.push_back(cmd_buffer_data);
} else {
fprintf(stderr, "Command buffer submitted multiple times before present.\n"
"This could lead to invalid data.\n");

Loading…
Cancel
Save