mirror of
https://github.com/nyanmisaka/ffmpeg-rockchip.git
synced 2025-10-18 14:54:19 +08:00
lavu/buffer: drop USE_ATOMICS
USE_ATOMICS is only set if there is no thread implementation enabled, in which case you can't expect any lock mechanism from FFmpeg. This is also conflicting with the incoming use of stdatomic.
This commit is contained in:
@@ -284,44 +284,6 @@ void av_buffer_pool_uninit(AVBufferPool **ppool)
|
||||
buffer_pool_free(pool);
|
||||
}
|
||||
|
||||
#if USE_ATOMICS
|
||||
/* remove the whole buffer list from the pool and return it */
|
||||
static BufferPoolEntry *get_pool(AVBufferPool *pool)
|
||||
{
|
||||
BufferPoolEntry *cur = *(void * volatile *)&pool->pool, *last = NULL;
|
||||
|
||||
while (cur != last) {
|
||||
last = cur;
|
||||
cur = avpriv_atomic_ptr_cas((void * volatile *)&pool->pool, last, NULL);
|
||||
if (!cur)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cur;
|
||||
}
|
||||
|
||||
static void add_to_pool(BufferPoolEntry *buf)
|
||||
{
|
||||
AVBufferPool *pool;
|
||||
BufferPoolEntry *cur, *end = buf;
|
||||
|
||||
if (!buf)
|
||||
return;
|
||||
pool = buf->pool;
|
||||
|
||||
while (end->next)
|
||||
end = end->next;
|
||||
|
||||
while (avpriv_atomic_ptr_cas((void * volatile *)&pool->pool, NULL, buf)) {
|
||||
/* pool is not empty, retrieve it and append it to our list */
|
||||
cur = get_pool(pool);
|
||||
end->next = cur;
|
||||
while (end->next)
|
||||
end = end->next;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void pool_release_buffer(void *opaque, uint8_t *data)
|
||||
{
|
||||
BufferPoolEntry *buf = opaque;
|
||||
@@ -330,14 +292,10 @@ static void pool_release_buffer(void *opaque, uint8_t *data)
|
||||
if(CONFIG_MEMORY_POISONING)
|
||||
memset(buf->data, FF_MEMORY_POISON, pool->size);
|
||||
|
||||
#if USE_ATOMICS
|
||||
add_to_pool(buf);
|
||||
#else
|
||||
ff_mutex_lock(&pool->mutex);
|
||||
buf->next = pool->pool;
|
||||
pool->pool = buf;
|
||||
ff_mutex_unlock(&pool->mutex);
|
||||
#endif
|
||||
|
||||
if (!avpriv_atomic_int_add_and_fetch(&pool->refcount, -1))
|
||||
buffer_pool_free(pool);
|
||||
@@ -369,11 +327,6 @@ static AVBufferRef *pool_alloc_buffer(AVBufferPool *pool)
|
||||
ret->buffer->opaque = buf;
|
||||
ret->buffer->free = pool_release_buffer;
|
||||
|
||||
#if USE_ATOMICS
|
||||
avpriv_atomic_int_add_and_fetch(&pool->refcount, 1);
|
||||
avpriv_atomic_int_add_and_fetch(&pool->nb_allocated, 1);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -382,29 +335,6 @@ AVBufferRef *av_buffer_pool_get(AVBufferPool *pool)
|
||||
AVBufferRef *ret;
|
||||
BufferPoolEntry *buf;
|
||||
|
||||
#if USE_ATOMICS
|
||||
/* check whether the pool is empty */
|
||||
buf = get_pool(pool);
|
||||
if (!buf && pool->refcount <= pool->nb_allocated) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Pool race dectected, spining to avoid overallocation and eventual OOM\n");
|
||||
while (!buf && avpriv_atomic_int_get(&pool->refcount) <= avpriv_atomic_int_get(&pool->nb_allocated))
|
||||
buf = get_pool(pool);
|
||||
}
|
||||
|
||||
if (!buf)
|
||||
return pool_alloc_buffer(pool);
|
||||
|
||||
/* keep the first entry, return the rest of the list to the pool */
|
||||
add_to_pool(buf->next);
|
||||
buf->next = NULL;
|
||||
|
||||
ret = av_buffer_create(buf->data, pool->size, pool_release_buffer,
|
||||
buf, 0);
|
||||
if (!ret) {
|
||||
add_to_pool(buf);
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
ff_mutex_lock(&pool->mutex);
|
||||
buf = pool->pool;
|
||||
if (buf) {
|
||||
@@ -418,7 +348,6 @@ AVBufferRef *av_buffer_pool_get(AVBufferPool *pool)
|
||||
ret = pool_alloc_buffer(pool);
|
||||
}
|
||||
ff_mutex_unlock(&pool->mutex);
|
||||
#endif
|
||||
|
||||
if (ret)
|
||||
avpriv_atomic_int_add_and_fetch(&pool->refcount, 1);
|
||||
|
Reference in New Issue
Block a user