Skip to content
Commits on Source (79)
......@@ -55,7 +55,7 @@
latest-meson:
stage: build
image: base/archlinux:latest
image: archlinux/base:latest
before_script:
- pacman -Syu --noconfirm --needed
base-devel
......@@ -69,7 +69,7 @@ latest-meson:
latest-autotools:
stage: build
image: base/archlinux:latest
image: archlinux/base:latest
artifacts: *artifacts-autotools
before_script:
- pacman -Syu --noconfirm --needed
......
......@@ -21,6 +21,11 @@
# IN THE SOFTWARE.
#
LIBDRM_ANDROID_MAJOR_VERSION := $(word 1, $(subst ., , $(PLATFORM_VERSION)))
ifneq ($(filter 2 4, $(LIBDRM_ANDROID_MAJOR_VERSION)),)
$(error "Android 4.4 and earlier not supported")
endif
LIBDRM_COMMON_MK := $(call my-dir)/Android.common.mk
LOCAL_PATH := $(call my-dir)
......
......@@ -38,6 +38,7 @@ amdgpu_cs_create_syncobj2
amdgpu_cs_ctx_create
amdgpu_cs_ctx_create2
amdgpu_cs_ctx_free
amdgpu_cs_ctx_override_priority
amdgpu_cs_destroy_semaphore
amdgpu_cs_destroy_syncobj
amdgpu_cs_export_syncobj
......@@ -51,9 +52,15 @@ amdgpu_cs_submit
amdgpu_cs_submit_raw
amdgpu_cs_submit_raw2
amdgpu_cs_syncobj_export_sync_file
amdgpu_cs_syncobj_export_sync_file2
amdgpu_cs_syncobj_import_sync_file
amdgpu_cs_syncobj_import_sync_file2
amdgpu_cs_syncobj_query
amdgpu_cs_syncobj_reset
amdgpu_cs_syncobj_signal
amdgpu_cs_syncobj_timeline_signal
amdgpu_cs_syncobj_timeline_wait
amdgpu_cs_syncobj_transfer
amdgpu_cs_syncobj_wait
amdgpu_cs_wait_fences
amdgpu_cs_wait_semaphore
......
......@@ -87,8 +87,8 @@ enum amdgpu_bo_handle_type {
/** DMA-buf fd handle */
amdgpu_bo_handle_type_dma_buf_fd = 2,
/** KMS handle, but re-importing as a DMABUF handle through
* drmPrimeHandleToFD is forbidden. (Glamor does that)
/** Deprecated in favour of and same behaviour as
* amdgpu_bo_handle_type_kms, use that instead of this
*/
amdgpu_bo_handle_type_kms_noimport = 3,
};
......@@ -702,7 +702,7 @@ int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
uint64_t *offset_in_bo);
/**
* Free previosuly allocated memory
* Free previously allocated memory
*
* \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
* \param buf_handle - \c [in] Buffer handle to free
......@@ -732,7 +732,7 @@ int amdgpu_bo_free(amdgpu_bo_handle buf_handle);
void amdgpu_bo_inc_ref(amdgpu_bo_handle bo);
/**
* Request CPU access to GPU accessable memory
* Request CPU access to GPU accessible memory
*
* \param buf_handle - \c [in] Buffer handle
* \param cpu - \c [out] CPU address to be used for access
......@@ -911,6 +911,21 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
*/
int amdgpu_cs_ctx_free(amdgpu_context_handle context);
/**
* Override the submission priority for the given context using a master fd.
*
* \param dev - \c [in] device handle
* \param context - \c [in] context handle for context id
* \param master_fd - \c [in] The master fd to authorize the override.
* \param priority - \c [in] The priority to assign to the context.
*
* \return 0 on success or a a negative Posix error code on failure.
*/
int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
amdgpu_context_handle context,
int master_fd,
unsigned priority);
/**
* Query reset state for the specific GPU Context
*
......@@ -1263,7 +1278,7 @@ int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
* \notes \n
* It is client responsibility to correctly handle VA assignments and usage.
* Neither kernel driver nor libdrm_amdpgu are able to prevent and
* detect wrong va assignemnt.
* detect wrong va assignment.
*
* It is client responsibility to correctly handle multi-GPU cases and to pass
* the corresponding arrays of all devices handles where corresponding VA will
......@@ -1501,6 +1516,23 @@ int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs, uint32_t syncobj_count);
/**
* Signal kernel timeline sync objects.
*
* \param dev - \c [in] device handle
* \param syncobjs - \c [in] array of sync object handles
* \param points - \c [in] array of timeline points
* \param syncobj_count - \c [in] number of handles in syncobjs
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs,
uint64_t *points,
uint32_t syncobj_count);
/**
* Wait for one or all sync objects to signal.
*
......@@ -1521,6 +1553,45 @@ int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled);
/**
* Wait for one or all sync objects on their points to signal.
*
* \param dev - \c [in] self-explanatory
* \param handles - \c [in] array of sync object handles
* \param points - \c [in] array of sync points to wait
* \param num_handles - \c [in] self-explanatory
* \param timeout_nsec - \c [in] self-explanatory
* \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_*
* \param first_signaled - \c [in] self-explanatory
*
* \return 0 on success\n
* -ETIME - Timeout
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled);
/**
* Query sync objects payloads.
*
* \param dev - \c [in] self-explanatory
* \param handles - \c [in] array of sync object handles
* \param points - \c [out] array of sync points returned, which presents
* syncobj payload.
* \param num_handles - \c [in] self-explanatory
*
* \return 0 on success\n
* -ETIME - Timeout
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles);
/**
* Export kernel sync object to shareable fd.
*
......@@ -1579,6 +1650,62 @@ int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
uint32_t syncobj,
int sync_file_fd);
/**
* Export kernel timeline sync object to a sync_file.
*
* \param dev - \c [in] device handle
* \param syncobj - \c [in] sync object handle
* \param point - \c [in] timeline point
* \param flags - \c [in] flags
* \param sync_file_fd - \c [out] sync_file file descriptor.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
uint32_t flags,
int *sync_file_fd);
/**
* Import kernel timeline sync object from a sync_file.
*
* \param dev - \c [in] device handle
* \param syncobj - \c [in] sync object handle
* \param point - \c [in] timeline point
* \param sync_file_fd - \c [in] sync_file file descriptor.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
int sync_file_fd);
/**
* transfer between syncbojs.
*
* \param dev - \c [in] device handle
* \param dst_handle - \c [in] sync object handle
* \param dst_point - \c [in] timeline point, 0 presents dst is binary
* \param src_handle - \c [in] sync object handle
* \param src_point - \c [in] timeline point, 0 presents src is binary
* \param flags - \c [in] flags
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
uint32_t dst_handle,
uint64_t dst_point,
uint32_t src_handle,
uint64_t src_point,
uint32_t flags);
/**
* Export an amdgpu fence as a handle (syncobj or fd).
......
......@@ -39,13 +39,12 @@
#include "amdgpu_internal.h"
#include "util_math.h"
static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
uint32_t handle)
static int amdgpu_close_kms_handle(int fd, uint32_t handle)
{
struct drm_gem_close args = {};
args.handle = handle;
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &args);
}
static int amdgpu_bo_create(amdgpu_device_handle dev,
......@@ -54,11 +53,18 @@ static int amdgpu_bo_create(amdgpu_device_handle dev,
amdgpu_bo_handle *buf_handle)
{
struct amdgpu_bo *bo;
int r;
bo = calloc(1, sizeof(struct amdgpu_bo));
if (!bo)
return -ENOMEM;
r = handle_table_insert(&dev->bo_handles, handle, bo);
if (r) {
free(bo);
return r;
}
atomic_set(&bo->refcount, 1);
bo->dev = dev;
bo->alloc_size = size;
......@@ -90,19 +96,14 @@ drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
if (r)
goto out;
pthread_mutex_lock(&dev->bo_table_mutex);
r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
buf_handle);
pthread_mutex_unlock(&dev->bo_table_mutex);
if (r) {
amdgpu_close_kms_handle(dev, args.out.handle);
goto out;
amdgpu_close_kms_handle(dev->fd, args.out.handle);
}
pthread_mutex_lock(&dev->bo_table_mutex);
r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
*buf_handle);
pthread_mutex_unlock(&dev->bo_table_mutex);
if (r)
amdgpu_bo_free(*buf_handle);
out:
return r;
}
......@@ -214,11 +215,8 @@ static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
bo->flink_name = flink.name;
if (bo->dev->flink_fd != bo->dev->fd) {
struct drm_gem_close args = {};
args.handle = handle;
drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
}
if (bo->dev->flink_fd != bo->dev->fd)
amdgpu_close_kms_handle(bo->dev->flink_fd, handle);
pthread_mutex_lock(&bo->dev->bo_table_mutex);
r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
......@@ -261,7 +259,6 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
struct amdgpu_bo_import_result *output)
{
struct drm_gem_open open_arg = {};
struct drm_gem_close close_arg = {};
struct amdgpu_bo *bo = NULL;
uint32_t handle = 0, flink_name = 0;
uint64_t alloc_size = 0;
......@@ -345,12 +342,12 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
close(dma_fd);
if (r)
goto free_bo_handle;
close_arg.handle = open_arg.handle;
r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE,
&close_arg);
r = amdgpu_close_kms_handle(dev->flink_fd,
open_arg.handle);
if (r)
goto free_bo_handle;
}
open_arg.handle = 0;
break;
case amdgpu_bo_handle_type_dma_buf_fd:
......@@ -368,15 +365,12 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
if (r)
goto free_bo_handle;
r = handle_table_insert(&dev->bo_handles, bo->handle, bo);
if (r)
goto free_bo_handle;
if (flink_name) {
bo->flink_name = flink_name;
r = handle_table_insert(&dev->bo_flink_names, flink_name,
bo);
if (r)
goto remove_handle;
goto free_bo_handle;
}
......@@ -385,17 +379,14 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
pthread_mutex_unlock(&dev->bo_table_mutex);
return 0;
remove_handle:
handle_table_remove(&dev->bo_handles, bo->handle);
free_bo_handle:
if (flink_name && !close_arg.handle && open_arg.handle) {
close_arg.handle = open_arg.handle;
drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &close_arg);
}
if (flink_name && open_arg.handle)
amdgpu_close_kms_handle(dev->flink_fd, open_arg.handle);
if (bo)
amdgpu_bo_free(bo);
else
amdgpu_close_kms_handle(dev, handle);
amdgpu_close_kms_handle(dev->fd, handle);
unlock:
pthread_mutex_unlock(&dev->bo_table_mutex);
return r;
......@@ -424,12 +415,13 @@ drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
amdgpu_bo_cpu_unmap(bo);
}
amdgpu_close_kms_handle(dev, bo->handle);
amdgpu_close_kms_handle(dev->fd, bo->handle);
pthread_mutex_destroy(&bo->cpu_access_mutex);
free(bo);
}
pthread_mutex_unlock(&dev->bo_table_mutex);
return 0;
}
......@@ -602,18 +594,13 @@ drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
if (r)
goto out;
pthread_mutex_lock(&dev->bo_table_mutex);
r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
pthread_mutex_unlock(&dev->bo_table_mutex);
if (r) {
amdgpu_close_kms_handle(dev, args.handle);
goto out;
amdgpu_close_kms_handle(dev->fd, args.handle);
}
pthread_mutex_lock(&dev->bo_table_mutex);
r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
*buf_handle);
pthread_mutex_unlock(&dev->bo_table_mutex);
if (r)
amdgpu_bo_free(*buf_handle);
out:
return r;
}
......
......@@ -142,6 +142,31 @@ drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
return r;
}
drm_public int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
amdgpu_context_handle context,
int master_fd,
unsigned priority)
{
union drm_amdgpu_sched args;
int r;
if (!dev || !context || master_fd < 0)
return -EINVAL;
memset(&args, 0, sizeof(args));
args.in.op = AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE;
args.in.fd = dev->fd;
args.in.priority = priority;
args.in.ctx_id = context->id;
r = drmCommandWrite(master_fd, DRM_AMDGPU_SCHED, &args, sizeof(args));
if (r)
return r;
return 0;
}
drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
uint32_t *state, uint32_t *hangs)
{
......@@ -649,6 +674,18 @@ drm_public int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
return drmSyncobjSignal(dev->fd, syncobjs, syncobj_count);
}
drm_public int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs,
uint64_t *points,
uint32_t syncobj_count)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTimelineSignal(dev->fd, syncobjs,
points, syncobj_count);
}
drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
uint32_t *handles, unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
......@@ -661,6 +698,29 @@ drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
flags, first_signaled);
}
drm_public int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTimelineWait(dev->fd, handles, points, num_handles,
timeout_nsec, flags, first_signaled);
}
drm_public int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjQuery(dev->fd, handles, points, num_handles);
}
drm_public int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
uint32_t handle,
int *shared_fd)
......@@ -701,6 +761,78 @@ drm_public int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
}
drm_public int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
uint32_t flags,
int *sync_file_fd)
{
uint32_t binary_handle;
int ret;
if (NULL == dev)
return -EINVAL;
if (!point)
return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
if (ret)
return ret;
ret = drmSyncobjTransfer(dev->fd, binary_handle, 0,
syncobj, point, flags);
if (ret)
goto out;
ret = drmSyncobjExportSyncFile(dev->fd, binary_handle, sync_file_fd);
out:
drmSyncobjDestroy(dev->fd, binary_handle);
return ret;
}
drm_public int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
int sync_file_fd)
{
uint32_t binary_handle;
int ret;
if (NULL == dev)
return -EINVAL;
if (!point)
return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
if (ret)
return ret;
ret = drmSyncobjImportSyncFile(dev->fd, binary_handle, sync_file_fd);
if (ret)
goto out;
ret = drmSyncobjTransfer(dev->fd, syncobj, point,
binary_handle, 0, 0);
out:
drmSyncobjDestroy(dev->fd, binary_handle);
return ret;
}
drm_public int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
uint32_t dst_handle,
uint64_t dst_point,
uint32_t src_handle,
uint64_t src_point,
uint32_t flags)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTransfer(dev->fd,
dst_handle, dst_point,
src_handle, src_point,
flags);
}
drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
amdgpu_context_handle context,
amdgpu_bo_list_handle bo_list_handle,
......@@ -708,12 +840,13 @@ drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
struct drm_amdgpu_cs_chunk *chunks,
uint64_t *seq_no)
{
union drm_amdgpu_cs cs = {0};
union drm_amdgpu_cs cs;
uint64_t *chunk_array;
int i, r;
if (num_chunks == 0)
return -EINVAL;
memset(&cs, 0, sizeof(cs));
chunk_array = alloca(sizeof(uint64_t) * num_chunks);
for (i = 0; i < num_chunks; i++)
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
......@@ -738,10 +871,11 @@ drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
struct drm_amdgpu_cs_chunk *chunks,
uint64_t *seq_no)
{
union drm_amdgpu_cs cs = {0};
union drm_amdgpu_cs cs;
uint64_t *chunk_array;
int i, r;
memset(&cs, 0, sizeof(cs));
chunk_array = alloca(sizeof(uint64_t) * num_chunks);
for (i = 0; i < num_chunks; i++)
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
......@@ -778,9 +912,10 @@ drm_public int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
uint32_t what,
uint32_t *out_handle)
{
union drm_amdgpu_fence_to_handle fth = {0};
union drm_amdgpu_fence_to_handle fth;
int r;
memset(&fth, 0, sizeof(fth));
fth.in.fence.ctx_id = fence->context->id;
fth.in.fence.ip_type = fence->ip_type;
fth.in.fence.ip_instance = fence->ip_instance;
......
......@@ -43,8 +43,8 @@
#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
static pthread_mutex_t fd_mutex = PTHREAD_MUTEX_INITIALIZER;
static amdgpu_device_handle fd_list;
static pthread_mutex_t dev_mutex = PTHREAD_MUTEX_INITIALIZER;
static amdgpu_device_handle dev_list;
static int fd_compare(int fd1, int fd2)
{
......@@ -95,13 +95,13 @@ static int amdgpu_get_auth(int fd, int *auth)
static void amdgpu_device_free_internal(amdgpu_device_handle dev)
{
amdgpu_device_handle *node = &fd_list;
amdgpu_device_handle *node = &dev_list;
pthread_mutex_lock(&fd_mutex);
pthread_mutex_lock(&dev_mutex);
while (*node != dev && (*node)->next)
node = &(*node)->next;
*node = (*node)->next;
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
close(dev->fd);
if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
......@@ -155,16 +155,16 @@ drm_public int amdgpu_device_initialize(int fd,
*device_handle = NULL;
pthread_mutex_lock(&fd_mutex);
pthread_mutex_lock(&dev_mutex);
r = amdgpu_get_auth(fd, &flag_auth);
if (r) {
fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
__func__, r);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return r;
}
for (dev = fd_list; dev; dev = dev->next)
for (dev = dev_list; dev; dev = dev->next)
if (fd_compare(dev->fd, fd) == 0)
break;
......@@ -173,7 +173,7 @@ drm_public int amdgpu_device_initialize(int fd,
if (r) {
fprintf(stderr, "%s: amdgpu_get_auth (2) failed (%i)\n",
__func__, r);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return r;
}
if ((flag_auth) && (!flag_authexist)) {
......@@ -182,14 +182,14 @@ drm_public int amdgpu_device_initialize(int fd,
*major_version = dev->major_version;
*minor_version = dev->minor_version;
amdgpu_device_reference(device_handle, dev);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return 0;
}
dev = calloc(1, sizeof(struct amdgpu_device));
if (!dev) {
fprintf(stderr, "%s: calloc failed\n", __func__);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return -ENOMEM;
}
......@@ -265,9 +265,9 @@ drm_public int amdgpu_device_initialize(int fd,
*major_version = dev->major_version;
*minor_version = dev->minor_version;
*device_handle = dev;
dev->next = fd_list;
fd_list = dev;
pthread_mutex_unlock(&fd_mutex);
dev->next = dev_list;
dev_list = dev;
pthread_mutex_unlock(&dev_mutex);
return 0;
......@@ -275,7 +275,7 @@ cleanup:
if (dev->fd >= 0)
close(dev->fd);
free(dev);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return r;
}
......
......@@ -20,7 +20,7 @@
AC_PREREQ([2.63])
AC_INIT([libdrm],
[2.4.97],
[2.4.99],
[https://bugs.freedesktop.org/enter_bug.cgi?product=DRI],
[libdrm])
......@@ -541,7 +541,7 @@ else
AC_DEFINE(HAVE_VISIBILITY, 0)
fi
CFLAGS="$CFLAGS -include config.h"
CPPFLAGS="$CPPFLAGS -include config.h"
AC_SUBST(WARN_CFLAGS)
AC_CONFIG_FILES([
......
......@@ -4,6 +4,10 @@
# device_id, revision_id, product_name <-- single tab after comma
1.0.0
15DD, 81, AMD Ryzen Embedded V1807B with Radeon Vega Gfx
15DD, 82, AMD Ryzen Embedded V1756B with Radeon Vega Gfx
15DD, 83, AMD Ryzen Embedded V1605B with Radeon Vega Gfx
15DD, 85, AMD Ryzen Embedded V1202B with Radeon Vega Gfx
6600, 0, AMD Radeon HD 8600/8700M
6600, 81, AMD Radeon (TM) R7 M370
6601, 0, AMD Radeon (TM) HD 8500M/8700M
......@@ -41,6 +45,7 @@
6665, 83, AMD Radeon (TM) R5 M320
6667, 0, AMD Radeon R5 M200 Series
666F, 0, AMD Radeon HD 8500M
66AF, C1, AMD Radeon VII
6780, 0, ATI FirePro V (FireGL V) Graphics Adapter
678A, 0, ATI FirePro V (FireGL V) Graphics Adapter
6798, 0, AMD Radeon HD 7900 Series
......
libdrm (2.4.99-1) unstable; urgency=medium
* New upstream release. (Closes: #934494)
* 02_kfreebsd.diff: Dropped, upstream.
* symbols: Updated.
-- Timo Aaltonen <tjaalton@debian.org> Mon, 12 Aug 2019 12:01:03 +0300
libdrm (2.4.97-1) unstable; urgency=medium
[ Andreas Boll ]
......
......@@ -4,6 +4,7 @@ libdrm_amdgpu.so.1 libdrm-amdgpu1 #MINVER#
amdgpu_bo_cpu_unmap@Base 2.4.63
amdgpu_bo_export@Base 2.4.63
amdgpu_bo_free@Base 2.4.63
amdgpu_cs_ctx_override_priority@Base 2.4.99
amdgpu_bo_import@Base 2.4.63
amdgpu_bo_inc_ref@Base 2.4.95
amdgpu_bo_list_create@Base 2.4.63
......@@ -36,10 +37,16 @@ libdrm_amdgpu.so.1 libdrm-amdgpu1 #MINVER#
amdgpu_cs_submit@Base 2.4.63
amdgpu_cs_submit_raw2@Base 2.4.97
amdgpu_cs_submit_raw@Base 2.4.82
amdgpu_cs_syncobj_export_sync_file2@Base 2.4.99
amdgpu_cs_syncobj_export_sync_file@Base 2.4.84
amdgpu_cs_syncobj_import_sync_file2@Base 2.4.99
amdgpu_cs_syncobj_import_sync_file@Base 2.4.84
amdgpu_cs_syncobj_query@Base 2.4.99
amdgpu_cs_syncobj_reset@Base 2.4.89
amdgpu_cs_syncobj_signal@Base 2.4.89
amdgpu_cs_syncobj_timeline_signal@Base 2.4.99
amdgpu_cs_syncobj_timeline_wait@Base 2.4.99
amdgpu_cs_syncobj_transfer@Base 2.4.99
amdgpu_cs_syncobj_wait@Base 2.4.84
amdgpu_cs_wait_fences@Base 2.4.81
amdgpu_cs_wait_semaphore@Base 2.4.67
......
......@@ -84,6 +84,7 @@ libdrm.so.2 libdrm2 #MINVER#
drmHashLookup@Base 2.3.1
drmHashNext@Base 2.3.1
drmIoctl@Base 2.4.3
drmIsMaster@Base 2.4.99
drmMalloc@Base 2.3.1
drmMap@Base 2.3.1
drmMapBufs@Base 2.3.1
......@@ -180,8 +181,12 @@ libdrm.so.2 libdrm2 #MINVER#
drmSyncobjFDToHandle@Base 2.4.82
drmSyncobjHandleToFD@Base 2.4.82
drmSyncobjImportSyncFile@Base 2.4.82
drmSyncobjQuery@Base 2.4.99
drmSyncobjReset@Base 2.4.89
drmSyncobjSignal@Base 2.4.89
drmSyncobjTimelineSignal@Base 2.4.99
drmSyncobjTimelineWait@Base 2.4.99
drmSyncobjTransfer@Base 2.4.99
drmSyncobjWait@Base 2.4.84
drmUnlock@Base 2.3.1
drmUnmap@Base 2.3.1
......
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -44,6 +44,7 @@ typedef unsigned int drm_handle_t;
#else /* One of the BSDs */
+#include <stdint.h>
#include <sys/ioccom.h>
#include <sys/types.h>
typedef int8_t __s8;
01_default_perms.diff
02_kfreebsd.diff
......@@ -25,7 +25,6 @@ etna_pipe_del
etna_pipe_wait
etna_pipe_wait_ns
etna_bo_new
etna_bo_from_handle
etna_bo_from_name
etna_bo_from_dmabuf
etna_bo_ref
......
......@@ -150,11 +150,7 @@ static uint32_t bo2idx(struct etna_cmd_stream *stream, struct etna_bo *bo,
pthread_mutex_lock(&idx_lock);
if (!bo->current_stream) {
idx = append_bo(stream, bo);
bo->current_stream = stream;
bo->idx = idx;
} else if (bo->current_stream == stream) {
if (bo->current_stream == stream) {
idx = bo->idx;
} else {
/* slow-path: */
......@@ -165,6 +161,8 @@ static uint32_t bo2idx(struct etna_cmd_stream *stream, struct etna_bo *bo,
/* not found */
idx = append_bo(stream, bo);
}
bo->current_stream = stream;
bo->idx = idx;
}
pthread_mutex_unlock(&idx_lock);
......
......@@ -115,8 +115,6 @@ int etna_pipe_wait_ns(struct etna_pipe *pipe, uint32_t timestamp, uint64_t ns);
struct etna_bo *etna_bo_new(struct etna_device *dev,
uint32_t size, uint32_t flags);
struct etna_bo *etna_bo_from_handle(struct etna_device *dev,
uint32_t handle, uint32_t size);
struct etna_bo *etna_bo_from_name(struct etna_device *dev, uint32_t name);
struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd);
struct etna_bo *etna_bo_ref(struct etna_bo *bo);
......
......@@ -150,7 +150,7 @@ struct etna_cmd_stream_priv {
struct etna_bo **bos;
uint32_t nr_bos, max_bos;
/* notify callback if buffer reset happend */
/* notify callback if buffer reset happened */
void (*reset_notify)(struct etna_cmd_stream *stream, void *priv);
void *reset_notify_priv;
};
......
......@@ -276,7 +276,7 @@ drm_public uint32_t exynos_bo_handle(struct exynos_bo *bo)
* @bo: a exynos buffer object including a gem object handle to be mmapped
* to user space.
*
* if true, user pointer mmaped else NULL.
* if true, user pointer mmapped else NULL.
*/
drm_public void *exynos_bo_map(struct exynos_bo *bo)
{
......
......@@ -64,7 +64,7 @@ struct drm_exynos_gem_info {
/**
* A structure for user connection request of virtual display.
*
* @connection: indicate whether doing connetion or not by user.
* @connection: indicate whether doing connection or not by user.
* @extensions: if this value is 1 then the vidi driver would need additional
* 128bytes edid data.
* @edid: the edid data pointer from user side.
......