Import libdrm 2.4.100

This commit is contained in:
jsg 2019-11-27 02:09:48 +00:00
parent 974a819012
commit a628e08ec1
46 changed files with 1785 additions and 197 deletions

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -u
@ -46,15 +46,22 @@ amdgpu_cs_fence_to_handle
amdgpu_cs_import_syncobj
amdgpu_cs_query_fence_status
amdgpu_cs_query_reset_state
amdgpu_cs_query_reset_state2
amdgpu_query_sw_info
amdgpu_cs_signal_semaphore
amdgpu_cs_submit
amdgpu_cs_submit_raw
amdgpu_cs_submit_raw2
amdgpu_cs_syncobj_export_sync_file
amdgpu_cs_syncobj_export_sync_file2
amdgpu_cs_syncobj_import_sync_file
amdgpu_cs_syncobj_import_sync_file2
amdgpu_cs_syncobj_query
amdgpu_cs_syncobj_reset
amdgpu_cs_syncobj_signal
amdgpu_cs_syncobj_timeline_signal
amdgpu_cs_syncobj_timeline_wait
amdgpu_cs_syncobj_transfer
amdgpu_cs_syncobj_wait
amdgpu_cs_wait_fences
amdgpu_cs_wait_semaphore

View File

@ -87,8 +87,8 @@ enum amdgpu_bo_handle_type {
/** DMA-buf fd handle */
amdgpu_bo_handle_type_dma_buf_fd = 2,
/** KMS handle, but re-importing as a DMABUF handle through
* drmPrimeHandleToFD is forbidden. (Glamor does that)
/** Deprecated in favour of and same behaviour as
* amdgpu_bo_handle_type_kms, use that instead of this
*/
amdgpu_bo_handle_type_kms_noimport = 3,
};
@ -942,6 +942,21 @@ int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
uint32_t *state, uint32_t *hangs);
/**
* Query reset state for the specific GPU Context.
*
* \param context - \c [in] GPU Context handle
* \param flags - \c [out] A combination of AMDGPU_CTX_QUERY2_FLAGS_*
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
* \sa amdgpu_cs_ctx_create()
*
*/
int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
uint64_t *flags);
/*
* Command Buffers Management
*
@ -1516,6 +1531,23 @@ int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs, uint32_t syncobj_count);
/**
* Signal kernel timeline sync objects.
*
* \param dev - \c [in] device handle
* \param syncobjs - \c [in] array of sync object handles
* \param points - \c [in] array of timeline points
* \param syncobj_count - \c [in] number of handles in syncobjs
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs,
uint64_t *points,
uint32_t syncobj_count);
/**
* Wait for one or all sync objects to signal.
*
@ -1536,6 +1568,45 @@ int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled);
/**
* Wait for one or all sync objects on their points to signal.
*
* \param dev - \c [in] self-explanatory
* \param handles - \c [in] array of sync object handles
* \param points - \c [in] array of sync points to wait
* \param num_handles - \c [in] self-explanatory
* \param timeout_nsec - \c [in] self-explanatory
* \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_*
* \param first_signaled - \c [in] self-explanatory
*
* \return 0 on success\n
* -ETIME - Timeout
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled);
/**
* Query sync objects payloads.
*
* \param dev - \c [in] self-explanatory
* \param handles - \c [in] array of sync object handles
* \param points - \c [out] array of sync points returned, which presents
* syncobj payload.
* \param num_handles - \c [in] self-explanatory
*
* \return 0 on success\n
* -ETIME - Timeout
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles);
/**
* Export kernel sync object to shareable fd.
*
@ -1594,6 +1665,62 @@ int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
uint32_t syncobj,
int sync_file_fd);
/**
* Export kernel timeline sync object to a sync_file.
*
* \param dev - \c [in] device handle
* \param syncobj - \c [in] sync object handle
* \param point - \c [in] timeline point
* \param flags - \c [in] flags
* \param sync_file_fd - \c [out] sync_file file descriptor.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
uint32_t flags,
int *sync_file_fd);
/**
* Import kernel timeline sync object from a sync_file.
*
* \param dev - \c [in] device handle
* \param syncobj - \c [in] sync object handle
* \param point - \c [in] timeline point
* \param sync_file_fd - \c [in] sync_file file descriptor.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
int sync_file_fd);
/**
* transfer between syncbojs.
*
* \param dev - \c [in] device handle
* \param dst_handle - \c [in] sync object handle
* \param dst_point - \c [in] timeline point, 0 presents dst is binary
* \param src_handle - \c [in] sync object handle
* \param src_point - \c [in] timeline point, 0 presents src is binary
* \param flags - \c [in] flags
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
uint32_t dst_handle,
uint64_t dst_point,
uint32_t src_handle,
uint64_t src_point,
uint32_t flags);
/**
* Export an amdgpu fence as a handle (syncobj or fd).

View File

@ -147,12 +147,12 @@ drm_public int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
int master_fd,
unsigned priority)
{
union drm_amdgpu_sched args;
int r;
if (!dev || !context || master_fd < 0)
return -EINVAL;
union drm_amdgpu_sched args;
memset(&args, 0, sizeof(args));
args.in.op = AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE;
@ -188,6 +188,25 @@ drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
return r;
}
drm_public int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
uint64_t *flags)
{
union drm_amdgpu_ctx args;
int r;
if (!context)
return -EINVAL;
memset(&args, 0, sizeof(args));
args.in.op = AMDGPU_CTX_OP_QUERY_STATE2;
args.in.ctx_id = context->id;
r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
&args, sizeof(args));
if (!r)
*flags = args.out.state.flags;
return r;
}
/**
* Submit command to kernel DRM
* \param dev - \c [in] Device handle
@ -674,6 +693,18 @@ drm_public int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
return drmSyncobjSignal(dev->fd, syncobjs, syncobj_count);
}
drm_public int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs,
uint64_t *points,
uint32_t syncobj_count)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTimelineSignal(dev->fd, syncobjs,
points, syncobj_count);
}
drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
uint32_t *handles, unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
@ -686,6 +717,29 @@ drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
flags, first_signaled);
}
drm_public int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTimelineWait(dev->fd, handles, points, num_handles,
timeout_nsec, flags, first_signaled);
}
drm_public int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjQuery(dev->fd, handles, points, num_handles);
}
drm_public int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
uint32_t handle,
int *shared_fd)
@ -726,6 +780,78 @@ drm_public int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
}
drm_public int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
uint32_t flags,
int *sync_file_fd)
{
uint32_t binary_handle;
int ret;
if (NULL == dev)
return -EINVAL;
if (!point)
return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
if (ret)
return ret;
ret = drmSyncobjTransfer(dev->fd, binary_handle, 0,
syncobj, point, flags);
if (ret)
goto out;
ret = drmSyncobjExportSyncFile(dev->fd, binary_handle, sync_file_fd);
out:
drmSyncobjDestroy(dev->fd, binary_handle);
return ret;
}
drm_public int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
int sync_file_fd)
{
uint32_t binary_handle;
int ret;
if (NULL == dev)
return -EINVAL;
if (!point)
return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
if (ret)
return ret;
ret = drmSyncobjImportSyncFile(dev->fd, binary_handle, sync_file_fd);
if (ret)
goto out;
ret = drmSyncobjTransfer(dev->fd, syncobj, point,
binary_handle, 0, 0);
out:
drmSyncobjDestroy(dev->fd, binary_handle);
return ret;
}
drm_public int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
uint32_t dst_handle,
uint64_t dst_point,
uint32_t src_handle,
uint64_t src_point,
uint32_t flags)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTransfer(dev->fd,
dst_handle, dst_point,
src_handle, src_point,
flags);
}
drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
amdgpu_context_handle context,
amdgpu_bo_list_handle bo_list_handle,
@ -733,12 +859,13 @@ drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
struct drm_amdgpu_cs_chunk *chunks,
uint64_t *seq_no)
{
union drm_amdgpu_cs cs = {0};
union drm_amdgpu_cs cs;
uint64_t *chunk_array;
int i, r;
if (num_chunks == 0)
return -EINVAL;
memset(&cs, 0, sizeof(cs));
chunk_array = alloca(sizeof(uint64_t) * num_chunks);
for (i = 0; i < num_chunks; i++)
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
@ -763,10 +890,11 @@ drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
struct drm_amdgpu_cs_chunk *chunks,
uint64_t *seq_no)
{
union drm_amdgpu_cs cs = {0};
union drm_amdgpu_cs cs;
uint64_t *chunk_array;
int i, r;
memset(&cs, 0, sizeof(cs));
chunk_array = alloca(sizeof(uint64_t) * num_chunks);
for (i = 0; i < num_chunks; i++)
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
@ -803,9 +931,10 @@ drm_public int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
uint32_t what,
uint32_t *out_handle)
{
union drm_amdgpu_fence_to_handle fth = {0};
union drm_amdgpu_fence_to_handle fth;
int r;
memset(&fth, 0, sizeof(fth));
fth.in.fence.ctx_id = fence->context->id;
fth.in.fence.ip_type = fence->ip_type;
fth.in.fence.ip_instance = fence->ip_instance;

View File

@ -43,8 +43,8 @@
#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
static pthread_mutex_t fd_mutex = PTHREAD_MUTEX_INITIALIZER;
static amdgpu_device_handle fd_list;
static pthread_mutex_t dev_mutex = PTHREAD_MUTEX_INITIALIZER;
static amdgpu_device_handle dev_list;
static int fd_compare(int fd1, int fd2)
{
@ -95,13 +95,13 @@ static int amdgpu_get_auth(int fd, int *auth)
static void amdgpu_device_free_internal(amdgpu_device_handle dev)
{
amdgpu_device_handle *node = &fd_list;
amdgpu_device_handle *node = &dev_list;
pthread_mutex_lock(&fd_mutex);
pthread_mutex_lock(&dev_mutex);
while (*node != dev && (*node)->next)
node = &(*node)->next;
*node = (*node)->next;
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
close(dev->fd);
if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
@ -155,16 +155,16 @@ drm_public int amdgpu_device_initialize(int fd,
*device_handle = NULL;
pthread_mutex_lock(&fd_mutex);
pthread_mutex_lock(&dev_mutex);
r = amdgpu_get_auth(fd, &flag_auth);
if (r) {
fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
__func__, r);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return r;
}
for (dev = fd_list; dev; dev = dev->next)
for (dev = dev_list; dev; dev = dev->next)
if (fd_compare(dev->fd, fd) == 0)
break;
@ -173,7 +173,7 @@ drm_public int amdgpu_device_initialize(int fd,
if (r) {
fprintf(stderr, "%s: amdgpu_get_auth (2) failed (%i)\n",
__func__, r);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return r;
}
if ((flag_auth) && (!flag_authexist)) {
@ -182,14 +182,14 @@ drm_public int amdgpu_device_initialize(int fd,
*major_version = dev->major_version;
*minor_version = dev->minor_version;
amdgpu_device_reference(device_handle, dev);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return 0;
}
dev = calloc(1, sizeof(struct amdgpu_device));
if (!dev) {
fprintf(stderr, "%s: calloc failed\n", __func__);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return -ENOMEM;
}
@ -265,9 +265,9 @@ drm_public int amdgpu_device_initialize(int fd,
*major_version = dev->major_version;
*minor_version = dev->minor_version;
*device_handle = dev;
dev->next = fd_list;
fd_list = dev;
pthread_mutex_unlock(&fd_mutex);
dev->next = dev_list;
dev_list = dev;
pthread_mutex_unlock(&dev_mutex);
return 0;
@ -275,7 +275,7 @@ cleanup:
if (dev->fd >= 0)
close(dev->fd);
free(dev);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return r;
}

View File

@ -59,7 +59,7 @@ ext_libdrm_amdgpu = declare_dependency(
test(
'amdgpu-symbol-check',
prog_bash,
find_program('amdgpu-symbol-check'),
env : env_test,
args : [files('amdgpu-symbol-check'), libdrm_amdgpu]
args : libdrm_amdgpu,
)

View File

@ -1,9 +1,9 @@
#! /bin/sh
# Wrapper for compilers which do not understand '-c -o'.
scriptversion=2018-03-07.03; # UTC
scriptversion=2012-10-14.11; # UTC
# Copyright (C) 1999-2018 Free Software Foundation, Inc.
# Copyright (C) 1999-2014 Free Software Foundation, Inc.
# Written by Tom Tromey <tromey@cygnus.com>.
#
# This program is free software; you can redistribute it and/or modify
@ -17,7 +17,7 @@ scriptversion=2018-03-07.03; # UTC
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@ -255,8 +255,7 @@ EOF
echo "compile $scriptversion"
exit $?
;;
cl | *[/\\]cl | cl.exe | *[/\\]cl.exe | \
icl | *[/\\]icl | icl.exe | *[/\\]icl.exe )
cl | *[/\\]cl | cl.exe | *[/\\]cl.exe )
func_cl_wrapper "$@" # Doesn't return...
;;
esac
@ -340,9 +339,9 @@ exit $ret
# Local Variables:
# mode: shell-script
# sh-indentation: 2
# eval: (add-hook 'before-save-hook 'time-stamp)
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-time-zone: "UTC0"
# time-stamp-time-zone: "UTC"
# time-stamp-end: "; # UTC"
# End:

View File

@ -1,9 +1,9 @@
#! /bin/sh
# test-driver - basic testsuite driver script.
scriptversion=2018-03-07.03; # UTC
scriptversion=2013-07-13.22; # UTC
# Copyright (C) 2011-2018 Free Software Foundation, Inc.
# Copyright (C) 2011-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@ -16,7 +16,7 @@ scriptversion=2018-03-07.03; # UTC
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@ -140,9 +140,9 @@ echo ":copy-in-global-log: $gcopy" >> $trs_file
# Local Variables:
# mode: shell-script
# sh-indentation: 2
# eval: (add-hook 'before-save-hook 'time-stamp)
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-time-zone: "UTC0"
# time-stamp-time-zone: "UTC"
# time-stamp-end: "; # UTC"
# End:

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -u
@ -25,7 +25,6 @@ etna_pipe_del
etna_pipe_wait
etna_pipe_wait_ns
etna_bo_new
etna_bo_from_handle
etna_bo_from_name
etna_bo_from_dmabuf
etna_bo_ref

View File

@ -150,11 +150,7 @@ static uint32_t bo2idx(struct etna_cmd_stream *stream, struct etna_bo *bo,
pthread_mutex_lock(&idx_lock);
if (!bo->current_stream) {
idx = append_bo(stream, bo);
bo->current_stream = stream;
bo->idx = idx;
} else if (bo->current_stream == stream) {
if (bo->current_stream == stream) {
idx = bo->idx;
} else {
/* slow-path: */
@ -165,6 +161,8 @@ static uint32_t bo2idx(struct etna_cmd_stream *stream, struct etna_bo *bo,
/* not found */
idx = append_bo(stream, bo);
}
bo->current_stream = stream;
bo->idx = idx;
}
pthread_mutex_unlock(&idx_lock);

View File

@ -115,8 +115,6 @@ int etna_pipe_wait_ns(struct etna_pipe *pipe, uint32_t timestamp, uint64_t ns);
struct etna_bo *etna_bo_new(struct etna_device *dev,
uint32_t size, uint32_t flags);
struct etna_bo *etna_bo_from_handle(struct etna_device *dev,
uint32_t handle, uint32_t size);
struct etna_bo *etna_bo_from_name(struct etna_device *dev, uint32_t name);
struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd);
struct etna_bo *etna_bo_ref(struct etna_bo *bo);

View File

@ -54,6 +54,7 @@ ext_libdrm_etnaviv = declare_dependency(
test(
'etnaviv-symbol-check',
prog_bash,
args : [files('etnaviv-symbol-check'), libdrm_etnaviv]
find_program('etnaviv-symbol-check'),
env : env_test,
args : libdrm_etnaviv,
)

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -u

View File

@ -48,7 +48,7 @@ pkg.generate(
test(
'exynos-symbol-check',
prog_bash,
find_program('exynos-symbol-check'),
env : env_test,
args : [files('exynos-symbol-check'), libdrm_exynos]
args : libdrm_exynos,
)

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -u

View File

@ -71,7 +71,7 @@ pkg.generate(
test(
'freedreno-symbol-check',
prog_bash,
find_program('freedreno-symbol-check'),
env : env_test,
args : [files('freedreno-symbol-check'), libdrm_freedreno]
args : libdrm_freedreno,
)

View File

@ -128,6 +128,10 @@ extern "C" {
* for the second page onward should be set to NC.
*/
#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8)
/* Flag that BO may contain sensitive data that must be wiped before
* releasing the memory
*/
#define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@ -204,9 +208,9 @@ union drm_amdgpu_bo_list {
/* unknown cause */
#define AMDGPU_CTX_UNKNOWN_RESET 3
/* indicate gpu reset occurred after ctx created */
/* indicate gpu reset occured after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
/* indicate vram lost occurred after ctx created */
/* indicate vram lost occured after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
/* indicate some job from this context once cause gpu hang */
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
@ -219,7 +223,10 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
#define AMDGPU_CTX_PRIORITY_LOW -512
#define AMDGPU_CTX_PRIORITY_NORMAL 0
/* Selecting a priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER */
/*
* When used in struct drm_amdgpu_ctx_in, a priority above NORMAL requires
* CAP_SYS_NICE or DRM_MASTER
*/
#define AMDGPU_CTX_PRIORITY_HIGH 512
#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
@ -229,6 +236,7 @@ struct drm_amdgpu_ctx_in {
/** For future use, no flags defined so far */
__u32 flags;
__u32 ctx_id;
/** AMDGPU_CTX_PRIORITY_* */
__s32 priority;
};
@ -281,6 +289,7 @@ struct drm_amdgpu_sched_in {
/* AMDGPU_SCHED_OP_* */
__u32 op;
__u32 fd;
/** AMDGPU_CTX_PRIORITY_* */
__s32 priority;
__u32 ctx_id;
};
@ -528,6 +537,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@ -608,6 +619,12 @@ struct drm_amdgpu_cs_chunk_sem {
__u32 handle;
};
struct drm_amdgpu_cs_chunk_syncobj {
__u32 handle;
__u32 flags;
__u64 point;
};
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
@ -904,6 +921,7 @@ struct drm_amdgpu_info_firmware {
#define AMDGPU_VRAM_TYPE_HBM 6
#define AMDGPU_VRAM_TYPE_DDR3 7
#define AMDGPU_VRAM_TYPE_DDR4 8
#define AMDGPU_VRAM_TYPE_GDDR6 9
struct drm_amdgpu_info_device {
/** PCI Device ID */
@ -983,6 +1001,10 @@ struct drm_amdgpu_info_device {
__u64 high_va_offset;
/** The maximum high virtual address */
__u64 high_va_max;
/* gfx10 pa_sc_tile_steering_override */
__u32 pa_sc_tile_steering_override;
/* disabled TCCs */
__u64 tcc_disabled_mask;
};
struct drm_amdgpu_info_hw_ip {
@ -1036,6 +1058,7 @@ struct drm_amdgpu_info_vce_clock_table {
#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
#define AMDGPU_FAMILY_AI 141 /* Vega10 */
#define AMDGPU_FAMILY_RV 142 /* Raven */
#define AMDGPU_FAMILY_NV 143 /* Navi10 */
#if defined(__cplusplus)
}

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -u

View File

@ -35,6 +35,8 @@ static const struct pci_device {
uint16_t gen;
} pciids[] = {
/* Keep ids sorted by gen; latest gen first */
INTEL_TGL_12_IDS(12),
INTEL_EHL_IDS(11),
INTEL_ICL_11_IDS(11),
INTEL_CNL_IDS(10),
INTEL_CFL_IDS(9),

View File

@ -64,43 +64,37 @@ test_decode = executable(
test(
'gen4-3d.batch',
prog_bash,
args : files('tests/gen4-3d.batch.sh'),
find_program('tests/gen4-3d.batch.sh'),
workdir : meson.current_build_dir(),
)
test(
'gen45-3d.batch',
prog_bash,
args : files('tests/gm45-3d.batch.sh'),
find_program('tests/gm45-3d.batch.sh'),
workdir : meson.current_build_dir(),
)
test(
'gen5-3d.batch',
prog_bash,
args : files('tests/gen5-3d.batch.sh'),
find_program('tests/gen5-3d.batch.sh'),
workdir : meson.current_build_dir(),
)
test(
'gen6-3d.batch',
prog_bash,
args : files('tests/gen6-3d.batch.sh'),
find_program('tests/gen6-3d.batch.sh'),
workdir : meson.current_build_dir(),
)
test(
'gen7-3d.batch',
prog_bash,
args : files('tests/gen7-3d.batch.sh'),
find_program('tests/gen7-3d.batch.sh'),
workdir : meson.current_build_dir(),
)
test(
'gen7-2d-copy.batch',
prog_bash,
args : files('tests/gen7-2d-copy.batch.sh'),
find_program('tests/gen7-2d-copy.batch.sh'),
workdir : meson.current_build_dir(),
)
test(
'intel-symbol-check',
prog_bash,
find_program('intel-symbol-check'),
env : env_test,
args : [files('intel-symbol-check'), libdrm_intel]
args : libdrm_intel,
)

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -u

View File

@ -69,7 +69,7 @@ pkg.generate(
test(
'kms-symbol-check',
prog_bash,
find_program('kms-symbol-check'),
env : env_test,
args : [files('kms-symbol-check'), libkms]
args : libkms,
)

View File

@ -21,7 +21,7 @@
project(
'libdrm',
['c'],
version : '2.4.98',
version : '2.4.100',
license : 'MIT',
meson_version : '>= 0.43',
default_options : ['buildtype=debugoptimized', 'c_std=gnu99'],
@ -179,13 +179,21 @@ else
dep_rt = []
endif
dep_m = cc.find_library('m', required : false)
# From Niclas Zeising:
# FreeBSD requires sys/types.h for sys/sysctl.h, add it as part of the
# includes when checking for headers.
foreach header : ['sys/sysctl.h', 'sys/select.h', 'alloca.h']
config.set('HAVE_' + header.underscorify().to_upper(),
cc.compiles('#include <@0@>'.format(header), name : '@0@ works'.format(header)))
cc.compiles('#include <sys/types.h>\n#include <@0@>'.format(header), name : '@0@ works'.format(header)))
endforeach
if cc.has_header_symbol('sys/sysmacros.h', 'major')
if (cc.has_header_symbol('sys/sysmacros.h', 'major') and
cc.has_header_symbol('sys/sysmacros.h', 'minor') and
cc.has_header_symbol('sys/sysmacros.h', 'makedev'))
config.set10('MAJOR_IN_SYSMACROS', true)
elif cc.has_header_symbol('sys/mkdev.h', 'major')
endif
if (cc.has_header_symbol('sys/mkdev.h', 'major') and
cc.has_header_symbol('sys/mkdev.h', 'minor') and
cc.has_header_symbol('sys/mkdev.h', 'makedev'))
config.set10('MAJOR_IN_MKDEV', true)
endif
config.set10('HAVE_OPEN_MEMSTREAM', cc.has_function('open_memstream'))
@ -248,9 +256,6 @@ if prog_xslt.found()
endif
with_man_pages = with_man_pages != 'false' and prog_xslt.found() and prog_sed.found()
# Used for tets
prog_bash = find_program('bash')
config.set10('HAVE_VISIBILITY',
cc.compiles('''int foo_hidden(void) __attribute__((visibility(("hidden"))));''',
name : 'compiler supports __attribute__(("hidden"))'))

View File

@ -53,7 +53,7 @@ pkg.generate(
test(
'nouveau-symbol-check',
prog_bash,
find_program('nouveau-symbol-check'),
env : env_test,
args : [files('nouveau-symbol-check'), libdrm_nouveau]
args : libdrm_nouveau,
)

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -u

View File

@ -48,7 +48,7 @@ pkg.generate(
test(
'omap-symbol-check',
prog_bash,
find_program('omap-symbol-check'),
env : env_test,
args : [files('omap-symbol-check'), libdrm_omap]
args : libdrm_omap,
)

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -u

View File

@ -414,7 +414,7 @@ drm_public int omap_bo_dmabuf(struct omap_bo *bo)
if (bo->fd < 0) {
struct drm_prime_handle req = {
.handle = bo->handle,
.flags = DRM_CLOEXEC,
.flags = DRM_CLOEXEC | DRM_RDWR,
};
int ret;

View File

@ -58,7 +58,7 @@ pkg.generate(
test(
'radeon-symbol-check',
prog_bash,
find_program('radeon-symbol-check'),
env : env_test,
args : [files('radeon-symbol-check'), libdrm_radeon]
args : libdrm_radeon,
)

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -u

View File

@ -47,7 +47,7 @@ pkg.generate(
test(
'tegra-symbol-check',
prog_bash,
find_program('tegra-symbol-check'),
env : env_test,
args : [files('tegra-symbol-check'), libdrm_tegra]
args : libdrm_tegra,
)

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -u

View File

@ -34,4 +34,5 @@ amdgpu_test_SOURCES = \
uve_ib.h \
deadlock_tests.c \
vm_tests.c \
ras_tests.c
ras_tests.c \
syncobj_tests.c

View File

@ -57,6 +57,7 @@
#define DEADLOCK_TESTS_STR "Deadlock Tests"
#define VM_TESTS_STR "VM Tests"
#define RAS_TESTS_STR "RAS Tests"
#define SYNCOBJ_TIMELINE_TESTS_STR "SYNCOBJ TIMELINE Tests"
/**
* Open handles for amdgpu devices
@ -123,6 +124,12 @@ static CU_SuiteInfo suites[] = {
.pCleanupFunc = suite_ras_tests_clean,
.pTests = ras_tests,
},
{
.pName = SYNCOBJ_TIMELINE_TESTS_STR,
.pInitFunc = suite_syncobj_timeline_tests_init,
.pCleanupFunc = suite_syncobj_timeline_tests_clean,
.pTests = syncobj_timeline_tests,
},
CU_SUITE_INFO_NULL,
};
@ -176,6 +183,10 @@ static Suites_Active_Status suites_active_stat[] = {
.pName = RAS_TESTS_STR,
.pActive = suite_ras_tests_enable,
},
{
.pName = SYNCOBJ_TIMELINE_TESTS_STR,
.pActive = suite_syncobj_timeline_tests_enable,
},
};
@ -453,14 +464,22 @@ static void amdgpu_disable_suites()
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
/* This test was ran on GFX9 only */
if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(BASIC_TESTS_STR, "Dispatch Test", CU_FALSE))
if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV) {
if (amdgpu_set_test_active(BASIC_TESTS_STR, "Dispatch Test (GFX)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
if (amdgpu_set_test_active(BASIC_TESTS_STR, "Dispatch Test (Compute)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
}
/* This test was ran on GFX9 only */
if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(BASIC_TESTS_STR, "Draw Test", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
/* This test was ran on GFX9 only */
//if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(BASIC_TESTS_STR, "GPU reset Test", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
}
/* The main() function for setting up and running the tests.

View File

@ -216,6 +216,27 @@ CU_BOOL suite_ras_tests_enable(void);
extern CU_TestInfo ras_tests[];
/**
* Initialize syncobj timeline test suite
*/
int suite_syncobj_timeline_tests_init();
/**
* Deinitialize syncobj timeline test suite
*/
int suite_syncobj_timeline_tests_clean();
/**
* Decide if the suite is enabled by default or not.
*/
CU_BOOL suite_syncobj_timeline_tests_enable(void);
/**
* Tests in syncobj timeline test suite
*/
extern CU_TestInfo syncobj_timeline_tests[];
/**
* Helper functions
*/

View File

@ -24,6 +24,12 @@
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#ifdef MAJOR_IN_SYSMACROS
#include <sys/sysmacros.h>
#endif
#include <sys/stat.h>
#include <fcntl.h>
#ifdef HAVE_ALLOCA_H
# include <alloca.h>
#endif
@ -49,8 +55,10 @@ static void amdgpu_userptr_test(void);
static void amdgpu_semaphore_test(void);
static void amdgpu_sync_dependency_test(void);
static void amdgpu_bo_eviction_test(void);
static void amdgpu_dispatch_test(void);
static void amdgpu_compute_dispatch_test(void);
static void amdgpu_gfx_dispatch_test(void);
static void amdgpu_draw_test(void);
static void amdgpu_gpu_reset_test(void);
static void amdgpu_command_submission_write_linear_helper(unsigned ip_type);
static void amdgpu_command_submission_const_fill_helper(unsigned ip_type);
@ -72,8 +80,10 @@ CU_TestInfo basic_tests[] = {
{ "Command submission Test (SDMA)", amdgpu_command_submission_sdma },
{ "SW semaphore Test", amdgpu_semaphore_test },
{ "Sync dependency Test", amdgpu_sync_dependency_test },
{ "Dispatch Test", amdgpu_dispatch_test },
{ "Dispatch Test (Compute)", amdgpu_compute_dispatch_test },
{ "Dispatch Test (GFX)", amdgpu_gfx_dispatch_test },
{ "Draw Test", amdgpu_draw_test },
{ "GPU reset Test", amdgpu_gpu_reset_test },
CU_TEST_INFO_NULL,
};
#define BUFFER_SIZE (8 * 1024)
@ -329,14 +339,15 @@ static const uint32_t preamblecache_gfx9[] = {
0xc0016900, 0x2d5, 0x10000, 0xc0016900, 0x2dc, 0x0,
0xc0066900, 0x2de, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0026900, 0x2e5, 0x0, 0x0,
0xc0056900, 0x2f9, 0x5, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
0xc0026900, 0x311, 0x3, 0x0, 0xc0026900, 0x316, 0x1e, 0x20,
0xc0036900, 0x311, 0x3, 0, 0x100000, 0xc0026900, 0x316, 0x1e, 0x20,
0xc0016900, 0x349, 0x0, 0xc0016900, 0x358, 0x0, 0xc0016900, 0x367, 0x0,
0xc0016900, 0x376, 0x0, 0xc0016900, 0x385, 0x0, 0xc0016900, 0x19, 0x0,
0xc0056900, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0076900, 0x1e1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0026900, 0x204, 0x90000, 0x4, 0xc0046900, 0x20c, 0x0, 0x0, 0x0, 0x0,
0xc0016900, 0x2b2, 0x0, 0xc0026900, 0x30e, 0xffffffff, 0xffffffff,
0xc0016900, 0x314, 0x0, 0xc0002f00, 0x1, 0xc0016900, 0x1, 0x1,
0xc0016900, 0x314, 0x0, 0xc0016900, 0x2a6, 0, 0xc0016900, 0x210, 0,
0xc0002f00, 0x1, 0xc0016900, 0x1, 0x1,
0xc0016900, 0x18, 0x2, 0xc0016900, 0x206, 0x300, 0xc0017900, 0x20000243, 0x0,
0xc0017900, 0x248, 0xffffffff, 0xc0017900, 0x249, 0x0, 0xc0017900, 0x24a, 0x0,
0xc0017900, 0x24b, 0x0
@ -450,7 +461,7 @@ static const uint32_t cached_cmd_gfx9[] = {
0xc0016900, 0x0, 0x0, 0xc0026900, 0x3, 0x2a, 0x0,
0xc0046900, 0xa, 0x0, 0x0, 0x0, 0x200020,
0xc0016900, 0x83, 0xffff, 0xc0026900, 0x8e, 0xf, 0xf,
0xc0056900, 0x105, 0x0, 0x0, 0x0, 0x0, 0x12,
0xc0056900, 0x105, 0x0, 0x0, 0x0, 0x0, 0x1a,
0xc0026900, 0x10b, 0x0, 0x0, 0xc0016900, 0x1e0, 0x0,
0xc0036900, 0x200, 0x0, 0x10000, 0xcc0011,
0xc0026900, 0x292, 0x20, 0x60201b8,
@ -2094,10 +2105,7 @@ static int amdgpu_dispatch_init(uint32_t *ptr, uint32_t ip_type)
ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 3);
ptr[i++] = 0x204;
i += 3;
/* clear mmCOMPUTE_RESOURCE_LIMITS */
ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 1);
ptr[i++] = 0x215;
ptr[i++] = 0;
/* clear mmCOMPUTE_TMPRING_SIZE */
ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 1);
ptr[i++] = 0x218;
@ -2184,6 +2192,7 @@ static void amdgpu_memset_dispatch_test(amdgpu_device_handle device_handle,
&bo_shader, &ptr_shader,
&mc_address_shader, &va_shader);
CU_ASSERT_EQUAL(r, 0);
memset(ptr_shader, 0, bo_shader_size);
r = amdgpu_dispatch_load_cs_shader(ptr_shader, CS_BUFFERCLEAR);
CU_ASSERT_EQUAL(r, 0);
@ -2220,6 +2229,11 @@ static void amdgpu_memset_dispatch_test(amdgpu_device_handle device_handle,
ptr_cmd[i++] = 0x22222222;
ptr_cmd[i++] = 0x22222222;
/* clear mmCOMPUTE_RESOURCE_LIMITS */
ptr_cmd[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 1);
ptr_cmd[i++] = 0x215;
ptr_cmd[i++] = 0;
/* dispatch direct command */
ptr_cmd[i++] = PACKET3_COMPUTE(PACKET3_DISPATCH_DIRECT, 3);
ptr_cmd[i++] = 0x10;
@ -2321,6 +2335,7 @@ static void amdgpu_memcpy_dispatch_test(amdgpu_device_handle device_handle,
&bo_shader, &ptr_shader,
&mc_address_shader, &va_shader);
CU_ASSERT_EQUAL(r, 0);
memset(ptr_shader, 0, bo_shader_size);
r = amdgpu_dispatch_load_cs_shader(ptr_shader, CS_BUFFERCOPY );
CU_ASSERT_EQUAL(r, 0);
@ -2365,6 +2380,11 @@ static void amdgpu_memcpy_dispatch_test(amdgpu_device_handle device_handle,
ptr_cmd[i++] = 0x400;
ptr_cmd[i++] = 0x74fac;
/* clear mmCOMPUTE_RESOURCE_LIMITS */
ptr_cmd[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 1);
ptr_cmd[i++] = 0x215;
ptr_cmd[i++] = 0;
/* dispatch direct command */
ptr_cmd[i++] = PACKET3_COMPUTE(PACKET3_DISPATCH_DIRECT, 3);
ptr_cmd[i++] = 0x10;
@ -2430,7 +2450,8 @@ static void amdgpu_memcpy_dispatch_test(amdgpu_device_handle device_handle,
r = amdgpu_cs_ctx_free(context_handle);
CU_ASSERT_EQUAL(r, 0);
}
static void amdgpu_dispatch_test(void)
static void amdgpu_compute_dispatch_test(void)
{
int r;
struct drm_amdgpu_info_hw_ip info;
@ -2438,14 +2459,25 @@ static void amdgpu_dispatch_test(void)
r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_COMPUTE, 0, &info);
CU_ASSERT_EQUAL(r, 0);
if (!info.available_rings)
printf("SKIP ... as there's no compute ring\n");
for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
amdgpu_memset_dispatch_test(device_handle, AMDGPU_HW_IP_COMPUTE, ring_id);
amdgpu_memcpy_dispatch_test(device_handle, AMDGPU_HW_IP_COMPUTE, ring_id);
}
}
static void amdgpu_gfx_dispatch_test(void)
{
int r;
struct drm_amdgpu_info_hw_ip info;
uint32_t ring_id;
r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_GFX, 0, &info);
CU_ASSERT_EQUAL(r, 0);
if (!info.available_rings)
printf("SKIP ... as there's no graphics ring\n");
for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
amdgpu_memset_dispatch_test(device_handle, AMDGPU_HW_IP_GFX, ring_id);
@ -2901,12 +2933,14 @@ static void amdgpu_memset_draw_test(amdgpu_device_handle device_handle,
&bo_shader_ps, &ptr_shader_ps,
&mc_address_shader_ps, &va_shader_ps);
CU_ASSERT_EQUAL(r, 0);
memset(ptr_shader_ps, 0, bo_shader_size);
r = amdgpu_bo_alloc_and_map(device_handle, bo_shader_size, 4096,
AMDGPU_GEM_DOMAIN_VRAM, 0,
&bo_shader_vs, &ptr_shader_vs,
&mc_address_shader_vs, &va_shader_vs);
CU_ASSERT_EQUAL(r, 0);
memset(ptr_shader_vs, 0, bo_shader_size);
r = amdgpu_draw_load_ps_shader(ptr_shader_ps, PS_CONST);
CU_ASSERT_EQUAL(r, 0);
@ -2996,7 +3030,7 @@ static void amdgpu_memcpy_draw(amdgpu_device_handle device_handle,
ptr_cmd[i++] = 0x92;
i += 3;
ptr_cmd[i++] = PACKET3(PKT3_SET_SH_REG, 1);
ptr_cmd[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
ptr_cmd[i++] = 0x191;
ptr_cmd[i++] = 0;
@ -3074,12 +3108,14 @@ static void amdgpu_memcpy_draw_test(amdgpu_device_handle device_handle, uint32_t
&bo_shader_ps, &ptr_shader_ps,
&mc_address_shader_ps, &va_shader_ps);
CU_ASSERT_EQUAL(r, 0);
memset(ptr_shader_ps, 0, bo_shader_size);
r = amdgpu_bo_alloc_and_map(device_handle, bo_shader_size, 4096,
AMDGPU_GEM_DOMAIN_VRAM, 0,
&bo_shader_vs, &ptr_shader_vs,
&mc_address_shader_vs, &va_shader_vs);
CU_ASSERT_EQUAL(r, 0);
memset(ptr_shader_vs, 0, bo_shader_size);
r = amdgpu_draw_load_ps_shader(ptr_shader_ps, PS_TEX);
CU_ASSERT_EQUAL(r, 0);
@ -3105,9 +3141,45 @@ static void amdgpu_draw_test(void)
r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_GFX, 0, &info);
CU_ASSERT_EQUAL(r, 0);
if (!info.available_rings)
printf("SKIP ... as there's no graphics ring\n");
for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
amdgpu_memset_draw_test(device_handle, ring_id);
amdgpu_memcpy_draw_test(device_handle, ring_id);
}
}
static void amdgpu_gpu_reset_test(void)
{
int r;
char debugfs_path[256], tmp[10];
int fd;
struct stat sbuf;
amdgpu_context_handle context_handle;
uint32_t hang_state, hangs;
r = amdgpu_cs_ctx_create(device_handle, &context_handle);
CU_ASSERT_EQUAL(r, 0);
r = fstat(drm_amdgpu[0], &sbuf);
CU_ASSERT_EQUAL(r, 0);
sprintf(debugfs_path, "/sys/kernel/debug/dri/%d/amdgpu_gpu_recover", minor(sbuf.st_rdev));
fd = open(debugfs_path, O_RDONLY);
CU_ASSERT(fd >= 0);
r = read(fd, tmp, sizeof(tmp)/sizeof(char));
CU_ASSERT(r > 0);
r = amdgpu_cs_query_reset_state(context_handle, &hang_state, &hangs);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_EQUAL(hang_state, AMDGPU_CTX_UNKNOWN_RESET);
close(fd);
r = amdgpu_cs_ctx_free(context_handle);
CU_ASSERT_EQUAL(r, 0);
amdgpu_compute_dispatch_test();
amdgpu_gfx_dispatch_test();
}

View File

@ -267,7 +267,6 @@ static void amdgpu_memory_alloc(void)
static void amdgpu_mem_fail_alloc(void)
{
amdgpu_bo_handle bo;
int r;
struct amdgpu_bo_alloc_request req = {0};
amdgpu_bo_handle buf_handle;
@ -282,7 +281,7 @@ static void amdgpu_mem_fail_alloc(void)
CU_ASSERT_EQUAL(r, -ENOMEM);
if (!r) {
r = amdgpu_bo_free(bo);
r = amdgpu_bo_free(buf_handle);
CU_ASSERT_EQUAL(r, 0);
}
}

View File

@ -24,7 +24,7 @@ if dep_cunit.found()
files(
'amdgpu_test.c', 'basic_tests.c', 'bo_tests.c', 'cs_tests.c',
'vce_tests.c', 'uvd_enc_tests.c', 'vcn_tests.c', 'deadlock_tests.c',
'vm_tests.c', 'ras_tests.c',
'vm_tests.c', 'ras_tests.c', 'syncobj_tests.c',
),
dependencies : [dep_cunit, dep_threads],
include_directories : [inc_root, inc_drm, include_directories('../../amdgpu')],

View File

@ -31,6 +31,8 @@
#include <stdio.h>
#include "xf86drm.h"
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
const char *ras_block_string[] = {
"umc",
"sdma",
@ -72,11 +74,251 @@ enum amdgpu_ras_block {
#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
enum amdgpu_ras_gfx_subblock {
/* CPC */
AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
AMDGPU_RAS_BLOCK__GFX_CPC_SCRATCH =
AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_CPC_UCODE,
AMDGPU_RAS_BLOCK__GFX_DC_STATE_ME1,
AMDGPU_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME1,
AMDGPU_RAS_BLOCK__GFX_DC_STATE_ME2,
AMDGPU_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME2,
AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_END =
AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME2,
/* CPF */
AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_CPF_ROQ_ME2 =
AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_CPF_ROQ_ME1,
AMDGPU_RAS_BLOCK__GFX_CPF_TAG,
AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_END = AMDGPU_RAS_BLOCK__GFX_CPF_TAG,
/* CPG */
AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_CPG_DMA_ROQ =
AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_CPG_DMA_TAG,
AMDGPU_RAS_BLOCK__GFX_CPG_TAG,
AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_END = AMDGPU_RAS_BLOCK__GFX_CPG_TAG,
/* GDS */
AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_GDS_MEM = AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
AMDGPU_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
AMDGPU_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
AMDGPU_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_END =
AMDGPU_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
/* SPI */
AMDGPU_RAS_BLOCK__GFX_SPI_SR_MEM,
/* SQ */
AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_SQ_SGPR = AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_SQ_LDS_D,
AMDGPU_RAS_BLOCK__GFX_SQ_LDS_I,
AMDGPU_RAS_BLOCK__GFX_SQ_VGPR,
AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_END = AMDGPU_RAS_BLOCK__GFX_SQ_VGPR,
/* SQC (3 ranges) */
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_START,
/* SQC range 0 */
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_START =
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_START,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_END =
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
/* SQC range 1 */
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_START,
AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_START,
AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_END =
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
/* SQC range 2 */
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_START,
AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_START,
AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_END =
AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_END =
AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_END,
/* TA */
AMDGPU_RAS_BLOCK__GFX_TA_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_TA_FS_DFIFO =
AMDGPU_RAS_BLOCK__GFX_TA_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_TA_FS_AFIFO,
AMDGPU_RAS_BLOCK__GFX_TA_FL_LFIFO,
AMDGPU_RAS_BLOCK__GFX_TA_FX_LFIFO,
AMDGPU_RAS_BLOCK__GFX_TA_FS_CFIFO,
AMDGPU_RAS_BLOCK__GFX_TA_INDEX_END = AMDGPU_RAS_BLOCK__GFX_TA_FS_CFIFO,
/* TCA */
AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_TCA_HOLE_FIFO =
AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_TCA_REQ_FIFO,
AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_END =
AMDGPU_RAS_BLOCK__GFX_TCA_REQ_FIFO,
/* TCC (5 sub-ranges) */
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_START,
/* TCC range 0 */
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_START =
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA =
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_START,
AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
AMDGPU_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
AMDGPU_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_END =
AMDGPU_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
/* TCC range 1 */
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_START,
AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_DEC =
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_START,
AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_END =
AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
/* TCC range 2 */
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_START,
AMDGPU_RAS_BLOCK__GFX_TCC_RETURN_DATA =
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_START,
AMDGPU_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
AMDGPU_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
AMDGPU_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
AMDGPU_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
AMDGPU_RAS_BLOCK__GFX_TCC_SRC_FIFO,
AMDGPU_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_END =
AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
/* TCC range 3 */
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_START,
AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO =
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_START,
AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_END =
AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
/* TCC range 4 */
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_START,
AMDGPU_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_START,
AMDGPU_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_END =
AMDGPU_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_END =
AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_END,
/* TCI */
AMDGPU_RAS_BLOCK__GFX_TCI_WRITE_RAM,
/* TCP */
AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_TCP_CACHE_RAM =
AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
AMDGPU_RAS_BLOCK__GFX_TCP_CMD_FIFO,
AMDGPU_RAS_BLOCK__GFX_TCP_VM_FIFO,
AMDGPU_RAS_BLOCK__GFX_TCP_DB_RAM,
AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_END =
AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
/* TD */
AMDGPU_RAS_BLOCK__GFX_TD_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_TD_SS_FIFO_LO =
AMDGPU_RAS_BLOCK__GFX_TD_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
AMDGPU_RAS_BLOCK__GFX_TD_CS_FIFO,
AMDGPU_RAS_BLOCK__GFX_TD_INDEX_END = AMDGPU_RAS_BLOCK__GFX_TD_CS_FIFO,
/* EA (3 sub-ranges) */
AMDGPU_RAS_BLOCK__GFX_EA_INDEX_START,
/* EA range 0 */
AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_START =
AMDGPU_RAS_BLOCK__GFX_EA_INDEX_START,
AMDGPU_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM =
AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_START,
AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
AMDGPU_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
AMDGPU_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
AMDGPU_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_END =
AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
/* EA range 1 */
AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_START,
AMDGPU_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM =
AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_START,
AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
AMDGPU_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
AMDGPU_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
AMDGPU_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
AMDGPU_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_END =
AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
/* EA range 2 */
AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_START,
AMDGPU_RAS_BLOCK__GFX_EA_MAM_D0MEM =
AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_START,
AMDGPU_RAS_BLOCK__GFX_EA_MAM_D1MEM,
AMDGPU_RAS_BLOCK__GFX_EA_MAM_D2MEM,
AMDGPU_RAS_BLOCK__GFX_EA_MAM_D3MEM,
AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_END =
AMDGPU_RAS_BLOCK__GFX_EA_MAM_D3MEM,
AMDGPU_RAS_BLOCK__GFX_EA_INDEX_END =
AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_END,
/* UTC VM L2 bank */
AMDGPU_RAS_BLOCK__UTC_VML2_BANK_CACHE,
/* UTC VM walker */
AMDGPU_RAS_BLOCK__UTC_VML2_WALKER,
/* UTC ATC L2 2MB cache */
AMDGPU_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
/* UTC ATC L2 4KB cache */
AMDGPU_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
AMDGPU_RAS_BLOCK__GFX_MAX
};
enum amdgpu_ras_error_type {
AMDGPU_RAS_ERROR__NONE = 0,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE = 2,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE = 4,
AMDGPU_RAS_ERROR__POISON = 8,
AMDGPU_RAS_ERROR__NONE = 0,
AMDGPU_RAS_ERROR__PARITY = 1,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE = 2,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE = 4,
AMDGPU_RAS_ERROR__POISON = 8,
};
struct ras_inject_test_config {
char name[64];
char block[32];
int sub_block;
enum amdgpu_ras_error_type type;
uint64_t address;
uint64_t value;
};
struct ras_common_if {
@ -100,8 +342,10 @@ struct ras_debug_if {
int op;
};
/* for now, only umc, gfx, sdma has implemented. */
#define DEFAULT_RAS_BLOCK_MASK_INJECT (1 << AMDGPU_RAS_BLOCK__UMC)
#define DEFAULT_RAS_BLOCK_MASK_QUERY (1 << AMDGPU_RAS_BLOCK__UMC)
#define DEFAULT_RAS_BLOCK_MASK_INJECT ((1 << AMDGPU_RAS_BLOCK__UMC) |\
(1 << AMDGPU_RAS_BLOCK__GFX))
#define DEFAULT_RAS_BLOCK_MASK_QUERY ((1 << AMDGPU_RAS_BLOCK__UMC) |\
(1 << AMDGPU_RAS_BLOCK__GFX))
#define DEFAULT_RAS_BLOCK_MASK_BASIC (1 << AMDGPU_RAS_BLOCK__UMC |\
(1 << AMDGPU_RAS_BLOCK__SDMA) |\
(1 << AMDGPU_RAS_BLOCK__GFX))
@ -146,12 +390,78 @@ struct ras_DID_test_mask{
DEFAULT_RAS_BLOCK_MASK_BASIC\
}
static const struct ras_inject_test_config umc_ras_inject_test[] = {
{"ras_umc.1.0", "umc", 0, AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
};
static const struct ras_inject_test_config gfx_ras_inject_test[] = {
{"ras_gfx.2.0", "gfx", AMDGPU_RAS_BLOCK__GFX_CPC_UCODE,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.1", "gfx", AMDGPU_RAS_BLOCK__GFX_CPF_TAG,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.2", "gfx", AMDGPU_RAS_BLOCK__GFX_CPG_TAG,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.3", "gfx", AMDGPU_RAS_BLOCK__GFX_SQ_LDS_D,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.4", "gfx", AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.5", "gfx", AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.6", "gfx", AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.7", "gfx", AMDGPU_RAS_BLOCK__GFX_TA_FS_DFIFO,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.8", "gfx", AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.9", "gfx", AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.10", "gfx", AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.11", "gfx", AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.12", "gfx", AMDGPU_RAS_BLOCK__GFX_TCP_CACHE_RAM,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.13", "gfx", AMDGPU_RAS_BLOCK__GFX_TD_SS_FIFO_LO,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
{"ras_gfx.2.14", "gfx", AMDGPU_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM,
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 0, 0},
};
static const struct ras_DID_test_mask ras_DID_array[] = {
{0x66a1, 0x00, RAS_BLOCK_MASK_ALL},
{0x66a1, 0x01, RAS_BLOCK_MASK_ALL},
{0x66a1, 0x04, RAS_BLOCK_MASK_ALL},
};
static uint32_t amdgpu_ras_find_block_id_by_name(const char *name)
{
int i;
for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
if (strcmp(name, ras_block_string[i]) == 0)
return i;
}
return ARRAY_SIZE(ras_block_string);
}
static char *amdgpu_ras_get_error_type_id(enum amdgpu_ras_error_type type)
{
switch (type) {
case AMDGPU_RAS_ERROR__PARITY:
return "parity";
case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
return "single_correctable";
case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
return "multi_uncorrectable";
case AMDGPU_RAS_ERROR__POISON:
return "poison";
case AMDGPU_RAS_ERROR__NONE:
default:
return NULL;
}
}
static struct ras_test_mask amdgpu_ras_get_test_mask(drmDevicePtr device)
{
int i;
@ -453,6 +763,34 @@ static int amdgpu_ras_query_err_count(enum amdgpu_ras_block block,
return 0;
}
static int amdgpu_ras_inject(enum amdgpu_ras_block block,
uint32_t sub_block, enum amdgpu_ras_error_type type,
uint64_t address, uint64_t value)
{
struct ras_debug_if data = { .op = 2, };
struct ras_inject_if *inject = &data.inject;
int ret;
if (amdgpu_ras_is_feature_enabled(block) <= 0) {
fprintf(stderr, "block id(%d) is not valid\n", block);
return -1;
}
inject->head.block = block;
inject->head.type = type;
inject->head.sub_block_index = sub_block;
strncpy(inject->head.name, ras_block_str(block), 32);
inject->address = address;
inject->value = value;
ret = amdgpu_ras_invoke(&data);
CU_ASSERT_EQUAL(ret, 0);
if (ret)
return -1;
return 0;
}
//tests
static void amdgpu_ras_features_test(int enable)
{
@ -503,69 +841,83 @@ static void amdgpu_ras_enable_test(void)
}
}
static void __amdgpu_ras_inject_test(void)
static void __amdgpu_ras_ip_inject_test(const struct ras_inject_test_config *ip_test,
uint32_t size)
{
struct ras_debug_if data;
int ret;
int i;
unsigned long ue, ce, ue_old, ce_old;
int i, ret;
unsigned long old_ue, old_ce;
unsigned long ue, ce;
uint32_t block;
int timeout;
bool pass;
data.op = 2;
for (i = 0; i < AMDGPU_RAS_BLOCK__LAST; i++) {
int timeout = 3;
struct ras_inject_if inject = {
.head = {
.block = i,
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
.sub_block_index = 0,
.name = "",
},
.address = 0,
.value = 0,
};
for (i = 0; i < size; i++) {
timeout = 3;
pass = false;
if (amdgpu_ras_is_feature_enabled(i) <= 0)
continue;
block = amdgpu_ras_find_block_id_by_name(ip_test[i].block);
if (!((1 << i) & ras_block_mask_inject))
continue;
/* Ensure one valid ip block */
if (block == ARRAY_SIZE(ras_block_string))
break;
data.inject = inject;
/* Ensure RAS feature for the IP block is enabled by kernel */
if (amdgpu_ras_is_feature_supported(block) <= 0)
break;
ret = amdgpu_ras_query_err_count(i, &ue_old, &ce_old);
ret = amdgpu_ras_query_err_count(block, &old_ue, &old_ce);
CU_ASSERT_EQUAL(ret, 0);
if (ret)
continue;
break;
ret = amdgpu_ras_invoke(&data);
ret = amdgpu_ras_inject(block,
ip_test[i].sub_block,
ip_test[i].type,
ip_test[i].address,
ip_test[i].value);
CU_ASSERT_EQUAL(ret, 0);
if (ret)
continue;
break;
loop:
while (timeout > 0) {
ret = amdgpu_ras_query_err_count(i, &ue, &ce);
CU_ASSERT_EQUAL(ret, 0);
sleep(5);
ret = amdgpu_ras_query_err_count(block, &ue, &ce);
CU_ASSERT_EQUAL(ret, 0);
if (ret)
continue;
if (ue_old != ue) {
/*recovery takes ~10s*/
sleep(10);
break;
if (old_ue != ue || old_ce != ce) {
pass = true;
sleep(20);
break;
}
sleep(1);
timeout -= 1;
}
CU_ASSERT_EQUAL(ue_old + 1, ue);
CU_ASSERT_EQUAL(ce_old, ce);
printf("\t Test %s@block %s, subblock %d, error_type %s, address %ld, value %ld: %s\n",
ip_test[i].name,
ip_test[i].block,
ip_test[i].sub_block,
amdgpu_ras_get_error_type_id(ip_test[i].type),
ip_test[i].address,
ip_test[i].value,
pass ? "Pass" : "Fail");
}
}
static void __amdgpu_ras_inject_test(void)
{
printf("...\n");
/* run UMC ras inject test */
__amdgpu_ras_ip_inject_test(umc_ras_inject_test,
ARRAY_SIZE(umc_ras_inject_test));
/* run GFX ras inject test */
__amdgpu_ras_ip_inject_test(gfx_ras_inject_test,
ARRAY_SIZE(gfx_ras_inject_test));
}
static void amdgpu_ras_inject_test(void)
{
int i;

View File

@ -0,0 +1,298 @@
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "CUnit/Basic.h"
#include "xf86drm.h"
#include "amdgpu_test.h"
#include "amdgpu_drm.h"
#include "amdgpu_internal.h"
#include <pthread.h>
static amdgpu_device_handle device_handle;
static uint32_t major_version;
static uint32_t minor_version;
static void amdgpu_syncobj_timeline_test(void);
CU_BOOL suite_syncobj_timeline_tests_enable(void)
{
int r;
uint64_t cap = 0;
r = drmGetCap(drm_amdgpu[0], DRM_CAP_SYNCOBJ_TIMELINE, &cap);
if (r || cap == 0)
return CU_FALSE;
return CU_TRUE;
}
int suite_syncobj_timeline_tests_init(void)
{
int r;
r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
&minor_version, &device_handle);
if (r) {
if ((r == -EACCES) && (errno == EACCES))
printf("\n\nError:%s. "
"Hint:Try to run this test program as root.",
strerror(errno));
return CUE_SINIT_FAILED;
}
return CUE_SUCCESS;
}
int suite_syncobj_timeline_tests_clean(void)
{
int r = amdgpu_device_deinitialize(device_handle);
if (r == 0)
return CUE_SUCCESS;
else
return CUE_SCLEAN_FAILED;
}
CU_TestInfo syncobj_timeline_tests[] = {
{ "syncobj timeline test", amdgpu_syncobj_timeline_test },
CU_TEST_INFO_NULL,
};
#define GFX_COMPUTE_NOP 0xffff1000
#define SDMA_NOP 0x0
static int syncobj_command_submission_helper(uint32_t syncobj_handle, bool
wait_or_signal, uint64_t point)
{
amdgpu_context_handle context_handle;
amdgpu_bo_handle ib_result_handle;
void *ib_result_cpu;
uint64_t ib_result_mc_address;
struct drm_amdgpu_cs_chunk chunks[2];
struct drm_amdgpu_cs_chunk_data chunk_data;
struct drm_amdgpu_cs_chunk_syncobj syncobj_data;
struct amdgpu_cs_fence fence_status;
amdgpu_bo_list_handle bo_list;
amdgpu_va_handle va_handle;
uint32_t expired, flags;
int i, r;
uint64_t seq_no;
static uint32_t *ptr;
r = amdgpu_cs_ctx_create(device_handle, &context_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
AMDGPU_GEM_DOMAIN_GTT, 0,
&ib_result_handle, &ib_result_cpu,
&ib_result_mc_address, &va_handle);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
&bo_list);
CU_ASSERT_EQUAL(r, 0);
ptr = ib_result_cpu;
for (i = 0; i < 16; ++i)
ptr[i] = wait_or_signal ? GFX_COMPUTE_NOP: SDMA_NOP;
chunks[0].chunk_id = AMDGPU_CHUNK_ID_IB;
chunks[0].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
chunks[0].chunk_data = (uint64_t)(uintptr_t)&chunk_data;
chunk_data.ib_data._pad = 0;
chunk_data.ib_data.va_start = ib_result_mc_address;
chunk_data.ib_data.ib_bytes = 16 * 4;
chunk_data.ib_data.ip_type = wait_or_signal ? AMDGPU_HW_IP_GFX :
AMDGPU_HW_IP_DMA;
chunk_data.ib_data.ip_instance = 0;
chunk_data.ib_data.ring = 0;
chunk_data.ib_data.flags = 0;
chunks[1].chunk_id = wait_or_signal ?
AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT :
AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL;
chunks[1].length_dw = sizeof(struct drm_amdgpu_cs_chunk_syncobj) / 4;
chunks[1].chunk_data = (uint64_t)(uintptr_t)&syncobj_data;
syncobj_data.handle = syncobj_handle;
syncobj_data.point = point;
syncobj_data.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
r = amdgpu_cs_submit_raw(device_handle,
context_handle,
bo_list,
2,
chunks,
&seq_no);
CU_ASSERT_EQUAL(r, 0);
memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
fence_status.context = context_handle;
fence_status.ip_type = wait_or_signal ? AMDGPU_HW_IP_GFX:
AMDGPU_HW_IP_DMA;
fence_status.ip_instance = 0;
fence_status.ring = 0;
fence_status.fence = seq_no;
r = amdgpu_cs_query_fence_status(&fence_status,
AMDGPU_TIMEOUT_INFINITE,0, &expired);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_list_destroy(bo_list);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
ib_result_mc_address, 4096);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_cs_ctx_free(context_handle);
CU_ASSERT_EQUAL(r, 0);
return r;
}
struct syncobj_point {
uint32_t syncobj_handle;
uint64_t point;
};
static void *syncobj_wait(void *data)
{
struct syncobj_point *sp = (struct syncobj_point *)data;
int r;
r = syncobj_command_submission_helper(sp->syncobj_handle, true,
sp->point);
CU_ASSERT_EQUAL(r, 0);
return (void *)(long)r;
}
static void *syncobj_signal(void *data)
{
struct syncobj_point *sp = (struct syncobj_point *)data;
int r;
r = syncobj_command_submission_helper(sp->syncobj_handle, false,
sp->point);
CU_ASSERT_EQUAL(r, 0);
return (void *)(long)r;
}
static void amdgpu_syncobj_timeline_test(void)
{
static pthread_t wait_thread;
static pthread_t signal_thread;
static pthread_t c_thread;
struct syncobj_point sp1, sp2, sp3;
uint32_t syncobj_handle;
uint64_t payload;
uint64_t wait_point, signal_point;
uint64_t timeout;
struct timespec tp;
int r, sync_fd;
void *tmp;
r = amdgpu_cs_create_syncobj2(device_handle, 0, &syncobj_handle);
CU_ASSERT_EQUAL(r, 0);
// wait on point 5
sp1.syncobj_handle = syncobj_handle;
sp1.point = 5;
r = pthread_create(&wait_thread, NULL, syncobj_wait, &sp1);
CU_ASSERT_EQUAL(r, 0);
// signal on point 10
sp2.syncobj_handle = syncobj_handle;
sp2.point = 10;
r = pthread_create(&signal_thread, NULL, syncobj_signal, &sp2);
CU_ASSERT_EQUAL(r, 0);
r = pthread_join(wait_thread, &tmp);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_EQUAL(tmp, 0);
r = pthread_join(signal_thread, &tmp);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_EQUAL(tmp, 0);
//query timeline payload
r = amdgpu_cs_syncobj_query(device_handle, &syncobj_handle,
&payload, 1);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_EQUAL(payload, 10);
//signal on point 16
sp3.syncobj_handle = syncobj_handle;
sp3.point = 16;
r = pthread_create(&c_thread, NULL, syncobj_signal, &sp3);
CU_ASSERT_EQUAL(r, 0);
//CPU wait on point 16
wait_point = 16;
timeout = 0;
clock_gettime(CLOCK_MONOTONIC, &tp);
timeout = tp.tv_sec * 1000000000ULL + tp.tv_nsec;
timeout += 0x10000000000; //10s
r = amdgpu_cs_syncobj_timeline_wait(device_handle, &syncobj_handle,
&wait_point, 1, timeout,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
NULL);
CU_ASSERT_EQUAL(r, 0);
r = pthread_join(c_thread, &tmp);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_EQUAL(tmp, 0);
// export point 16 and import to point 18
r = amdgpu_cs_syncobj_export_sync_file2(device_handle, syncobj_handle,
16,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
&sync_fd);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_cs_syncobj_import_sync_file2(device_handle, syncobj_handle,
18, sync_fd);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_cs_syncobj_query(device_handle, &syncobj_handle,
&payload, 1);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_EQUAL(payload, 18);
// CPU signal on point 20
signal_point = 20;
r = amdgpu_cs_syncobj_timeline_signal(device_handle, &syncobj_handle,
&signal_point, 1);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_cs_syncobj_query(device_handle, &syncobj_handle,
&payload, 1);
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_EQUAL(payload, 20);
r = amdgpu_cs_destroy_syncobj(device_handle, syncobj_handle);
CU_ASSERT_EQUAL(r, 0);
}

View File

@ -44,6 +44,14 @@ struct amdgpu_vcn_bo {
uint8_t *ptr;
};
struct amdgpu_vcn_reg {
uint32_t data0;
uint32_t data1;
uint32_t cmd;
uint32_t nop;
uint32_t cntl;
};
static amdgpu_device_handle device_handle;
static uint32_t major_version;
static uint32_t minor_version;
@ -57,6 +65,7 @@ static uint32_t *ib_cpu;
static amdgpu_bo_handle resources[MAX_RESOURCES];
static unsigned num_resources;
static struct amdgpu_vcn_reg reg;
static void amdgpu_cs_vcn_dec_create(void);
static void amdgpu_cs_vcn_dec_decode(void);
@ -96,6 +105,21 @@ CU_BOOL suite_vcn_tests_enable(void)
return CU_FALSE;
}
if (family_id == AMDGPU_FAMILY_RV) {
reg.data0 = 0x81c4;
reg.data1 = 0x81c5;
reg.cmd = 0x81c3;
reg.nop = 0x81ff;
reg.cntl = 0x81c6;
} else if (family_id == AMDGPU_FAMILY_NV) {
reg.data0 = 0x504;
reg.data1 = 0x505;
reg.cmd = 0x503;
reg.nop = 0x53f;
reg.cntl = 0x506;
} else
return CU_FALSE;
return CU_TRUE;
}
@ -237,11 +261,11 @@ static void free_resource(struct amdgpu_vcn_bo *vcn_bo)
static void vcn_dec_cmd(uint64_t addr, unsigned cmd, int *idx)
{
ib_cpu[(*idx)++] = 0x81C4;
ib_cpu[(*idx)++] = reg.data0;
ib_cpu[(*idx)++] = addr;
ib_cpu[(*idx)++] = 0x81C5;
ib_cpu[(*idx)++] = reg.data1;
ib_cpu[(*idx)++] = addr >> 32;
ib_cpu[(*idx)++] = 0x81C3;
ib_cpu[(*idx)++] = reg.cmd;
ib_cpu[(*idx)++] = cmd << 1;
}
@ -262,14 +286,14 @@ static void amdgpu_cs_vcn_dec_create(void)
memcpy(msg_buf.ptr, vcn_dec_create_msg, sizeof(vcn_dec_create_msg));
len = 0;
ib_cpu[len++] = 0x81C4;
ib_cpu[len++] = reg.data0;
ib_cpu[len++] = msg_buf.addr;
ib_cpu[len++] = 0x81C5;
ib_cpu[len++] = reg.data1;
ib_cpu[len++] = msg_buf.addr >> 32;
ib_cpu[len++] = 0x81C3;
ib_cpu[len++] = reg.cmd;
ib_cpu[len++] = 0;
for (; len % 16; ) {
ib_cpu[len++] = 0x81ff;
ib_cpu[len++] = reg.nop;
ib_cpu[len++] = 0;
}
@ -336,10 +360,10 @@ static void amdgpu_cs_vcn_dec_decode(void)
vcn_dec_cmd(it_addr, 0x204, &len);
vcn_dec_cmd(ctx_addr, 0x206, &len);
ib_cpu[len++] = 0x81C6;
ib_cpu[len++] = reg.cntl;
ib_cpu[len++] = 0x1;
for (; len % 16; ) {
ib_cpu[len++] = 0x81ff;
ib_cpu[len++] = reg.nop;
ib_cpu[len++] = 0;
}
@ -371,14 +395,14 @@ static void amdgpu_cs_vcn_dec_destroy(void)
memcpy(msg_buf.ptr, vcn_dec_destroy_msg, sizeof(vcn_dec_destroy_msg));
len = 0;
ib_cpu[len++] = 0x81C4;
ib_cpu[len++] = reg.data0;
ib_cpu[len++] = msg_buf.addr;
ib_cpu[len++] = 0x81C5;
ib_cpu[len++] = reg.data1;
ib_cpu[len++] = msg_buf.addr >> 32;
ib_cpu[len++] = 0x81C3;
ib_cpu[len++] = reg.cmd;
ib_cpu[len++] = 0;
for (; len % 16; ) {
ib_cpu[len++] = 0x81ff;
ib_cpu[len++] = reg.nop;
ib_cpu[len++] = 0;
}

View File

@ -55,8 +55,10 @@ static int kms_plane_probe(struct kms_plane *plane)
}
plane->formats = calloc(p->count_formats, sizeof(uint32_t));
if (!plane->formats)
if (!plane->formats) {
drmModeFreePlane(p);
return -ENOMEM;
}
for (i = 0; i < p->count_formats; i++)
plane->formats[i] = p->formats[i];

View File

@ -135,6 +135,7 @@ bo_create(int fd, unsigned int format,
int ret;
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
@ -193,6 +194,13 @@ bo_create(int fd, unsigned int format,
bpp = 32;
break;
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_ABGR16161616F:
bpp = 64;
break;
default:
fprintf(stderr, "unsupported format 0x%08x\n", format);
return NULL;
@ -275,6 +283,7 @@ bo_create(int fd, unsigned int format,
planes[2] = virtual + offsets[2];
break;
case DRM_FORMAT_C8:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ABGR4444:
@ -311,6 +320,10 @@ bo_create(int fd, unsigned int format,
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_ABGR16161616F:
offsets[0] = 0;
handles[0] = bo->handle;
pitches[0] = bo->pitch;

View File

@ -67,6 +67,9 @@
#include "buffers.h"
#include "cursor.h"
static enum util_fill_pattern primary_fill = UTIL_PATTERN_SMPTE;
static enum util_fill_pattern secondary_fill = UTIL_PATTERN_TILES;
struct crtc {
drmModeCrtc *crtc;
drmModeObjectProperties *props;
@ -948,9 +951,10 @@ struct property_arg {
char name[DRM_PROP_NAME_LEN+1];
uint32_t prop_id;
uint64_t value;
bool optional;
};
static void set_property(struct device *dev, struct property_arg *p)
static bool set_property(struct device *dev, struct property_arg *p)
{
drmModeObjectProperties *props = NULL;
drmModePropertyRes **props_info = NULL;
@ -982,13 +986,13 @@ static void set_property(struct device *dev, struct property_arg *p)
if (p->obj_type == 0) {
fprintf(stderr, "Object %i not found, can't set property\n",
p->obj_id);
return;
return false;
}
if (!props) {
fprintf(stderr, "%s %i has no properties\n",
obj_type, p->obj_id);
return;
return false;
}
for (i = 0; i < (int)props->count_props; ++i) {
@ -999,9 +1003,10 @@ static void set_property(struct device *dev, struct property_arg *p)
}
if (i == (int)props->count_props) {
fprintf(stderr, "%s %i has no %s property\n",
obj_type, p->obj_id, p->name);
return;
if (!p->optional)
fprintf(stderr, "%s %i has no %s property\n",
obj_type, p->obj_id, p->name);
return false;
}
p->prop_id = props->props[i];
@ -1015,6 +1020,8 @@ static void set_property(struct device *dev, struct property_arg *p)
if (ret < 0)
fprintf(stderr, "failed to set %s %i property %s to %" PRIu64 ": %s\n",
obj_type, p->obj_id, p->name, p->value, strerror(errno));
return true;
}
/* -------------------------------------------------------------------------- */
@ -1072,6 +1079,55 @@ static void add_property(struct device *dev, uint32_t obj_id,
set_property(dev, &p);
}
static bool add_property_optional(struct device *dev, uint32_t obj_id,
const char *name, uint64_t value)
{
struct property_arg p;
p.obj_id = obj_id;
strcpy(p.name, name);
p.value = value;
p.optional = true;
return set_property(dev, &p);
}
static void set_gamma(struct device *dev, unsigned crtc_id, unsigned fourcc)
{
unsigned blob_id = 0;
/* TODO: support 1024-sized LUTs, when the use-case arises */
struct drm_color_lut gamma_lut[256];
int i, ret;
if (fourcc == DRM_FORMAT_C8) {
/* TODO: Add C8 support for more patterns */
util_smpte_c8_gamma(256, gamma_lut);
drmModeCreatePropertyBlob(dev->fd, gamma_lut, sizeof(gamma_lut), &blob_id);
} else {
for (i = 0; i < 256; i++) {
gamma_lut[i].red =
gamma_lut[i].green =
gamma_lut[i].blue = i << 8;
}
}
add_property_optional(dev, crtc_id, "DEGAMMA_LUT", 0);
add_property_optional(dev, crtc_id, "CTM", 0);
if (!add_property_optional(dev, crtc_id, "GAMMA_LUT", blob_id)) {
uint16_t r[256], g[256], b[256];
for (i = 0; i < 256; i++) {
r[i] = gamma_lut[i].red;
g[i] = gamma_lut[i].green;
b[i] = gamma_lut[i].blue;
}
ret = drmModeCrtcSetGamma(dev->fd, crtc_id, 256, r, g, b);
if (ret)
fprintf(stderr, "failed to set gamma: %s\n", strerror(errno));
}
}
static int atomic_set_plane(struct device *dev, struct plane_arg *p,
int pattern, bool update)
{
@ -1206,7 +1262,7 @@ static int set_plane(struct device *dev, struct plane_arg *p)
p->w, p->h, p->format_str, plane_id);
plane_bo = bo_create(dev->fd, p->fourcc, p->w, p->h, handles,
pitches, offsets, UTIL_PATTERN_TILES);
pitches, offsets, secondary_fill);
if (plane_bo == NULL)
return -1;
@ -1247,12 +1303,14 @@ static int set_plane(struct device *dev, struct plane_arg *p)
static void atomic_set_planes(struct device *dev, struct plane_arg *p,
unsigned int count, bool update)
{
unsigned int i, pattern = UTIL_PATTERN_SMPTE;
unsigned int i, pattern = primary_fill;
/* set up planes */
for (i = 0; i < count; i++) {
if (i > 0)
pattern = UTIL_PATTERN_TILES;
pattern = secondary_fill;
else
set_gamma(dev, p[i].crtc_id, p[i].fourcc);
if (atomic_set_plane(dev, &p[i], pattern, update))
return;
@ -1335,8 +1393,8 @@ static void atomic_set_mode(struct device *dev, struct pipe_arg *pipes, unsigned
if (pipe->mode == NULL)
continue;
printf("setting mode %s-%dHz@%s on connectors ",
pipe->mode_str, pipe->mode->vrefresh, pipe->format_str);
printf("setting mode %s-%dHz on connectors ",
pipe->mode_str, pipe->mode->vrefresh);
for (j = 0; j < pipe->num_cons; ++j) {
printf("%s, ", pipe->cons[j]);
add_property(dev, pipe->con_ids[j], "CRTC_ID", pipe->crtc->crtc->crtc_id);
@ -1395,7 +1453,7 @@ static void set_mode(struct device *dev, struct pipe_arg *pipes, unsigned int co
bo = bo_create(dev->fd, pipes[0].fourcc, dev->mode.width,
dev->mode.height, handles, pitches, offsets,
UTIL_PATTERN_SMPTE);
primary_fill);
if (bo == NULL)
return;
@ -1437,6 +1495,8 @@ static void set_mode(struct device *dev, struct pipe_arg *pipes, unsigned int co
fprintf(stderr, "failed to set mode: %s\n", strerror(errno));
return;
}
set_gamma(dev, pipe->crtc->crtc->crtc_id, pipe->fourcc);
}
}
@ -1711,11 +1771,8 @@ static int parse_plane(struct plane_arg *plane, const char *p)
}
if (*end == '@') {
p = end + 1;
if (strlen(p) != 4)
return -EINVAL;
strcpy(plane->format_str, p);
strncpy(plane->format_str, end + 1, 4);
plane->format_str[4] = '\0';
} else {
strcpy(plane->format_str, "XR24");
}
@ -1740,6 +1797,18 @@ static int parse_property(struct property_arg *p, const char *arg)
return 0;
}
static void parse_fill_patterns(char *arg)
{
char *fill = strtok(arg, ",");
if (!fill)
return;
primary_fill = util_pattern_enum(fill);
fill = strtok(NULL, ",");
if (!fill)
return;
secondary_fill = util_pattern_enum(fill);
}
static void usage(char *name)
{
fprintf(stderr, "usage: %s [-acDdefMPpsCvw]\n", name);
@ -1757,6 +1826,7 @@ static void usage(char *name)
fprintf(stderr, "\t-v\ttest vsynced page flipping\n");
fprintf(stderr, "\t-w <obj_id>:<prop_name>:<value>\tset property\n");
fprintf(stderr, "\t-a \tuse atomic API\n");
fprintf(stderr, "\t-F pattern1,pattern2\tspecify fill patterns\n");
fprintf(stderr, "\n Generic options:\n\n");
fprintf(stderr, "\t-d\tdrop master after mode set\n");
@ -1820,7 +1890,7 @@ static int pipe_resolve_connectors(struct device *dev, struct pipe_arg *pipe)
return 0;
}
static char optstr[] = "acdD:efM:P:ps:Cvw:";
static char optstr[] = "acdD:efF:M:P:ps:Cvw:";
int main(int argc, char **argv)
{
@ -1869,6 +1939,9 @@ int main(int argc, char **argv)
case 'f':
framebuffers = 1;
break;
case 'F':
parse_fill_patterns(optarg);
break;
case 'M':
module = optarg;
/* Preserve the default behaviour of dumping all information. */

View File

@ -39,6 +39,8 @@
.yuv = { (order), (xsub), (ysub), (chroma_stride) }
static const struct util_format_info format_info[] = {
/* Indexed */
{ DRM_FORMAT_C8, "C8" },
/* YUV packed */
{ DRM_FORMAT_UYVY, "UYVY", MAKE_YUV_INFO(YUV_YCbCr | YUV_CY, 2, 2, 2) },
{ DRM_FORMAT_VYUY, "VYUY", MAKE_YUV_INFO(YUV_YCrCb | YUV_CY, 2, 2, 2) },
@ -91,6 +93,11 @@ static const struct util_format_info format_info[] = {
{ DRM_FORMAT_RGBX1010102, "RX30", MAKE_RGB_INFO(10, 22, 10, 12, 10, 2, 0, 0) },
{ DRM_FORMAT_BGRA1010102, "BA30", MAKE_RGB_INFO(10, 2, 10, 12, 10, 22, 2, 0) },
{ DRM_FORMAT_BGRX1010102, "BX30", MAKE_RGB_INFO(10, 2, 10, 12, 10, 22, 0, 0) },
{ DRM_FORMAT_XRGB16161616F, "XR4H", MAKE_RGB_INFO(16, 32, 16, 16, 16, 0, 0, 0) },
{ DRM_FORMAT_XBGR16161616F, "XB4H", MAKE_RGB_INFO(16, 0, 16, 16, 16, 32, 0, 0) },
{ DRM_FORMAT_ARGB16161616F, "AR4H", MAKE_RGB_INFO(16, 32, 16, 16, 16, 0, 16, 48) },
{ DRM_FORMAT_ABGR16161616F, "AB4H", MAKE_RGB_INFO(16, 0, 16, 16, 16, 32, 16, 48) },
};
uint32_t util_format_fourcc(const char *name)

View File

@ -35,6 +35,7 @@
#include <math.h>
#endif
#include "common.h"
#include "format.h"
#include "pattern.h"
@ -60,15 +61,101 @@ struct color_yuv {
.u = MAKE_YUV_601_U(r, g, b), \
.v = MAKE_YUV_601_V(r, g, b) }
/* This function takes 8-bit color values */
static inline uint32_t shiftcolor8(const struct util_color_component *comp,
uint32_t value)
{
value &= 0xff;
/* Fill the low bits with the high bits. */
value = (value << 8) | value;
/* Shift down to remove unwanted low bits */
value = value >> (16 - comp->length);
/* Shift back up to where the value should be */
return value << comp->offset;
}
/* This function takes 10-bit color values */
static inline uint32_t shiftcolor10(const struct util_color_component *comp,
uint32_t value)
{
value &= 0x3ff;
/* Fill the low bits with the high bits. */
value = (value << 6) | (value >> 4);
/* Shift down to remove unwanted low bits */
value = value >> (16 - comp->length);
/* Shift back up to where the value should be */
return value << comp->offset;
}
/* This function takes 16-bit color values */
static inline uint64_t shiftcolor16(const struct util_color_component *comp,
uint64_t value)
{
value &= 0xffff;
/* Shift down to remove unwanted low bits */
value = value >> (16 - comp->length);
/* Shift back up to where the value should be */
return value << comp->offset;
}
#define MAKE_RGBA10(rgb, r, g, b, a) \
(shiftcolor10(&(rgb)->red, (r)) | \
shiftcolor10(&(rgb)->green, (g)) | \
shiftcolor10(&(rgb)->blue, (b)) | \
shiftcolor10(&(rgb)->alpha, (a)))
#define MAKE_RGBA(rgb, r, g, b, a) \
((((r) >> (8 - (rgb)->red.length)) << (rgb)->red.offset) | \
(((g) >> (8 - (rgb)->green.length)) << (rgb)->green.offset) | \
(((b) >> (8 - (rgb)->blue.length)) << (rgb)->blue.offset) | \
(((a) >> (8 - (rgb)->alpha.length)) << (rgb)->alpha.offset))
(shiftcolor8(&(rgb)->red, (r)) | \
shiftcolor8(&(rgb)->green, (g)) | \
shiftcolor8(&(rgb)->blue, (b)) | \
shiftcolor8(&(rgb)->alpha, (a)))
#define MAKE_RGB24(rgb, r, g, b) \
{ .value = MAKE_RGBA(rgb, r, g, b, 0) }
/**
* Takes a uint16_t, divides by 65536, converts the infinite-precision
* result to fp16 with round-to-zero.
*
* Copied from mesa:src/util/half_float.c
*/
static uint16_t uint16_div_64k_to_half(uint16_t v)
{
/* Zero or subnormal. Set the mantissa to (v << 8) and return. */
if (v < 4)
return v << 8;
/* Count the leading 0s in the uint16_t */
int n = __builtin_clz(v) - 16;
/* Shift the mantissa up so bit 16 is the hidden 1 bit,
* mask it off, then shift back down to 10 bits
*/
int m = ( ((uint32_t)v << (n + 1)) & 0xffff ) >> 6;
/* (0{n} 1 X{15-n}) * 2^-16
* = 1.X * 2^(15-n-16)
* = 1.X * 2^(14-n - 15)
* which is the FP16 form with e = 14 - n
*/
int e = 14 - n;
return (e << 10) | m;
}
#define MAKE_RGBA8FP16(rgb, r, g, b, a) \
(shiftcolor16(&(rgb)->red, uint16_div_64k_to_half((r) << 8)) | \
shiftcolor16(&(rgb)->green, uint16_div_64k_to_half((g) << 8)) | \
shiftcolor16(&(rgb)->blue, uint16_div_64k_to_half((b) << 8)) | \
shiftcolor16(&(rgb)->alpha, uint16_div_64k_to_half((a) << 8)))
#define MAKE_RGBA10FP16(rgb, r, g, b, a) \
(shiftcolor16(&(rgb)->red, uint16_div_64k_to_half((r) << 6)) | \
shiftcolor16(&(rgb)->green, uint16_div_64k_to_half((g) << 6)) | \
shiftcolor16(&(rgb)->blue, uint16_div_64k_to_half((b) << 6)) | \
shiftcolor16(&(rgb)->alpha, uint16_div_64k_to_half((a) << 6)))
static void fill_smpte_yuv_planar(const struct util_yuv_info *yuv,
unsigned char *y_mem, unsigned char *u_mem,
unsigned char *v_mem, unsigned int width,
@ -457,6 +544,140 @@ static void fill_smpte_rgb32(const struct util_rgb_info *rgb, void *mem,
}
}
static void fill_smpte_rgb16fp(const struct util_rgb_info *rgb, void *mem,
unsigned int width, unsigned int height,
unsigned int stride)
{
const uint64_t colors_top[] = {
MAKE_RGBA8FP16(rgb, 192, 192, 192, 255),/* grey */
MAKE_RGBA8FP16(rgb, 192, 192, 0, 255), /* yellow */
MAKE_RGBA8FP16(rgb, 0, 192, 192, 255), /* cyan */
MAKE_RGBA8FP16(rgb, 0, 192, 0, 255), /* green */
MAKE_RGBA8FP16(rgb, 192, 0, 192, 255), /* magenta */
MAKE_RGBA8FP16(rgb, 192, 0, 0, 255), /* red */
MAKE_RGBA8FP16(rgb, 0, 0, 192, 255), /* blue */
};
const uint64_t colors_middle[] = {
MAKE_RGBA8FP16(rgb, 0, 0, 192, 127), /* blue */
MAKE_RGBA8FP16(rgb, 19, 19, 19, 127), /* black */
MAKE_RGBA8FP16(rgb, 192, 0, 192, 127), /* magenta */
MAKE_RGBA8FP16(rgb, 19, 19, 19, 127), /* black */
MAKE_RGBA8FP16(rgb, 0, 192, 192, 127), /* cyan */
MAKE_RGBA8FP16(rgb, 19, 19, 19, 127), /* black */
MAKE_RGBA8FP16(rgb, 192, 192, 192, 127),/* grey */
};
const uint64_t colors_bottom[] = {
MAKE_RGBA8FP16(rgb, 0, 33, 76, 255), /* in-phase */
MAKE_RGBA8FP16(rgb, 255, 255, 255, 255),/* super white */
MAKE_RGBA8FP16(rgb, 50, 0, 106, 255), /* quadrature */
MAKE_RGBA8FP16(rgb, 19, 19, 19, 255), /* black */
MAKE_RGBA8FP16(rgb, 9, 9, 9, 255), /* 3.5% */
MAKE_RGBA8FP16(rgb, 19, 19, 19, 255), /* 7.5% */
MAKE_RGBA8FP16(rgb, 29, 29, 29, 255), /* 11.5% */
MAKE_RGBA8FP16(rgb, 19, 19, 19, 255), /* black */
};
unsigned int x;
unsigned int y;
for (y = 0; y < height * 6 / 9; ++y) {
for (x = 0; x < width; ++x)
((uint64_t *)mem)[x] = colors_top[x * 7 / width];
mem += stride;
}
for (; y < height * 7 / 9; ++y) {
for (x = 0; x < width; ++x)
((uint64_t *)mem)[x] = colors_middle[x * 7 / width];
mem += stride;
}
for (; y < height; ++y) {
for (x = 0; x < width * 5 / 7; ++x)
((uint64_t *)mem)[x] =
colors_bottom[x * 4 / (width * 5 / 7)];
for (; x < width * 6 / 7; ++x)
((uint64_t *)mem)[x] =
colors_bottom[(x - width * 5 / 7) * 3
/ (width / 7) + 4];
for (; x < width; ++x)
((uint64_t *)mem)[x] = colors_bottom[7];
mem += stride;
}
}
static void fill_smpte_c8(void *mem, unsigned int width, unsigned int height,
unsigned int stride)
{
unsigned int x;
unsigned int y;
for (y = 0; y < height * 6 / 9; ++y) {
for (x = 0; x < width; ++x)
((uint8_t *)mem)[x] = x * 7 / width;
mem += stride;
}
for (; y < height * 7 / 9; ++y) {
for (x = 0; x < width; ++x)
((uint8_t *)mem)[x] = 7 + (x * 7 / width);
mem += stride;
}
for (; y < height; ++y) {
for (x = 0; x < width * 5 / 7; ++x)
((uint8_t *)mem)[x] =
14 + (x * 4 / (width * 5 / 7));
for (; x < width * 6 / 7; ++x)
((uint8_t *)mem)[x] =
14 + ((x - width * 5 / 7) * 3
/ (width / 7) + 4);
for (; x < width; ++x)
((uint8_t *)mem)[x] = 14 + 7;
mem += stride;
}
}
void util_smpte_c8_gamma(unsigned size, struct drm_color_lut *lut)
{
if (size < 7 + 7 + 8) {
printf("Error: gamma too small: %d < %d\n", size, 7 + 7 + 8);
return;
}
memset(lut, 0, size * sizeof(struct drm_color_lut));
#define FILL_COLOR(idx, r, g, b) \
lut[idx].red = (r) << 8; \
lut[idx].green = (g) << 8; \
lut[idx].blue = (b) << 8
FILL_COLOR( 0, 192, 192, 192); /* grey */
FILL_COLOR( 1, 192, 192, 0 ); /* yellow */
FILL_COLOR( 2, 0, 192, 192); /* cyan */
FILL_COLOR( 3, 0, 192, 0 ); /* green */
FILL_COLOR( 4, 192, 0, 192); /* magenta */
FILL_COLOR( 5, 192, 0, 0 ); /* red */
FILL_COLOR( 6, 0, 0, 192); /* blue */
FILL_COLOR( 7, 0, 0, 192); /* blue */
FILL_COLOR( 8, 19, 19, 19 ); /* black */
FILL_COLOR( 9, 192, 0, 192); /* magenta */
FILL_COLOR(10, 19, 19, 19 ); /* black */
FILL_COLOR(11, 0, 192, 192); /* cyan */
FILL_COLOR(12, 19, 19, 19 ); /* black */
FILL_COLOR(13, 192, 192, 192); /* grey */
FILL_COLOR(14, 0, 33, 76); /* in-phase */
FILL_COLOR(15, 255, 255, 255); /* super white */
FILL_COLOR(16, 50, 0, 106); /* quadrature */
FILL_COLOR(17, 19, 19, 19); /* black */
FILL_COLOR(18, 9, 9, 9); /* 3.5% */
FILL_COLOR(19, 19, 19, 19); /* 7.5% */
FILL_COLOR(20, 29, 29, 29); /* 11.5% */
FILL_COLOR(21, 19, 19, 19); /* black */
#undef FILL_COLOR
}
static void fill_smpte(const struct util_format_info *info, void *planes[3],
unsigned int width, unsigned int height,
unsigned int stride)
@ -464,6 +685,8 @@ static void fill_smpte(const struct util_format_info *info, void *planes[3],
unsigned char *u, *v;
switch (info->format) {
case DRM_FORMAT_C8:
return fill_smpte_c8(planes[0], width, height, stride);
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_YUYV:
@ -531,6 +754,13 @@ static void fill_smpte(const struct util_format_info *info, void *planes[3],
case DRM_FORMAT_BGRX1010102:
return fill_smpte_rgb32(&info->rgb, planes[0],
width, height, stride);
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_ABGR16161616F:
return fill_smpte_rgb16fp(&info->rgb, planes[0],
width, height, stride);
}
}
@ -559,6 +789,14 @@ static void make_pwetty(void *data, unsigned int width, unsigned int height,
case DRM_FORMAT_BGR565:
cairo_format = CAIRO_FORMAT_RGB16_565;
break;
#if CAIRO_VERSION_MAJOR > 1 || (CAIRO_VERSION_MAJOR == 1 && CAIRO_VERSION_MINOR >= 12)
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_XBGR2101010:
cairo_format = CAIRO_FORMAT_RGB30;
break;
#endif
default:
return;
}
@ -742,6 +980,32 @@ static void fill_tiles_rgb32(const struct util_format_info *info, void *mem,
make_pwetty(mem_base, width, height, stride, info->format);
}
static void fill_tiles_rgb16fp(const struct util_format_info *info, void *mem,
unsigned int width, unsigned int height,
unsigned int stride)
{
const struct util_rgb_info *rgb = &info->rgb;
void *mem_base = mem;
unsigned int x, y;
/* TODO: Give this actual fp16 precision */
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
div_t d = div(x+y, width);
uint32_t rgb32 = 0x00130502 * (d.quot >> 6)
+ 0x000a1120 * (d.rem >> 6);
uint32_t alpha = ((y < height/2) && (x < width/2)) ? 127 : 255;
uint64_t color =
MAKE_RGBA8FP16(rgb, (rgb32 >> 16) & 0xff,
(rgb32 >> 8) & 0xff, rgb32 & 0xff,
alpha);
((uint64_t *)mem)[x] = color;
}
mem += stride;
}
}
static void fill_tiles(const struct util_format_info *info, void *planes[3],
unsigned int width, unsigned int height,
unsigned int stride)
@ -816,14 +1080,146 @@ static void fill_tiles(const struct util_format_info *info, void *planes[3],
case DRM_FORMAT_BGRX1010102:
return fill_tiles_rgb32(info, planes[0],
width, height, stride);
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_ABGR16161616F:
return fill_tiles_rgb16fp(info, planes[0],
width, height, stride);
}
}
static void fill_plain(void *planes[3],
static void fill_plain(const struct util_format_info *info, void *planes[3],
unsigned int height,
unsigned int stride)
{
memset(planes[0], 0x77, stride * height);
switch (info->format) {
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_ABGR16161616F:
/* 0x3838 = 0.5273 */
memset(planes[0], 0x38, stride * height);
break;
default:
memset(planes[0], 0x77, stride * height);
break;
}
}
static void fill_gradient_rgb32(const struct util_rgb_info *rgb,
void *mem,
unsigned int width, unsigned int height,
unsigned int stride)
{
int i, j;
for (i = 0; i < height / 2; i++) {
uint32_t *row = mem;
for (j = 0; j < width / 2; j++) {
uint32_t value = MAKE_RGBA10(rgb, j & 0x3ff, j & 0x3ff, j & 0x3ff, 0);
row[2*j] = row[2*j+1] = value;
}
mem += stride;
}
for (; i < height; i++) {
uint32_t *row = mem;
for (j = 0; j < width / 2; j++) {
uint32_t value = MAKE_RGBA10(rgb, j & 0x3fc, j & 0x3fc, j & 0x3fc, 0);
row[2*j] = row[2*j+1] = value;
}
mem += stride;
}
}
static void fill_gradient_rgb16fp(const struct util_rgb_info *rgb,
void *mem,
unsigned int width, unsigned int height,
unsigned int stride)
{
int i, j;
for (i = 0; i < height / 2; i++) {
uint64_t *row = mem;
for (j = 0; j < width / 2; j++) {
uint64_t value = MAKE_RGBA10FP16(rgb, j & 0x3ff, j & 0x3ff, j & 0x3ff, 0);
row[2*j] = row[2*j+1] = value;
}
mem += stride;
}
for (; i < height; i++) {
uint64_t *row = mem;
for (j = 0; j < width / 2; j++) {
uint64_t value = MAKE_RGBA10FP16(rgb, j & 0x3fc, j & 0x3fc, j & 0x3fc, 0);
row[2*j] = row[2*j+1] = value;
}
mem += stride;
}
}
/* The gradient pattern creates two horizontal gray gradients, split
* into two halves. The top half has 10bpc precision, the bottom half
* has 8bpc precision. When using with a 10bpc fb format, there are 3
* possible outcomes:
*
* - Pixel data is encoded as 8bpc to the display, no dithering. This
* would lead to the top and bottom halves looking identical.
*
* - Pixel data is encoded as 8bpc to the display, with dithering. This
* would lead to there being a visible difference between the two halves,
* but the top half would look a little speck-y due to the dithering.
*
* - Pixel data is encoded at 10bpc+ to the display (which implies
* the display is able to show this level of depth). This should
* lead to the top half being a very clean gradient, and visibly different
* from the bottom half.
*
* Once we support additional fb formats, this approach could be extended
* to distinguish even higher bpc precisions.
*
* Note that due to practical size considerations, for the screens
* where this matters, the pattern actually emits stripes 2-pixels
* wide for each gradient color. Otherwise the difference may be a bit
* hard to notice.
*/
static void fill_gradient(const struct util_format_info *info, void *planes[3],
unsigned int width, unsigned int height,
unsigned int stride)
{
switch (info->format) {
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_BGRX1010102:
return fill_gradient_rgb32(&info->rgb, planes[0],
width, height, stride);
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_ABGR16161616F:
return fill_gradient_rgb16fp(&info->rgb, planes[0],
width, height, stride);
}
}
/*
@ -856,10 +1252,32 @@ void util_fill_pattern(uint32_t format, enum util_fill_pattern pattern,
return fill_smpte(info, planes, width, height, stride);
case UTIL_PATTERN_PLAIN:
return fill_plain(planes, height, stride);
return fill_plain(info, planes, height, stride);
case UTIL_PATTERN_GRADIENT:
return fill_gradient(info, planes, width, height, stride);
default:
printf("Error: unsupported test pattern %u.\n", pattern);
break;
}
}
static const char *pattern_names[] = {
[UTIL_PATTERN_TILES] = "tiles",
[UTIL_PATTERN_SMPTE] = "smpte",
[UTIL_PATTERN_PLAIN] = "plain",
[UTIL_PATTERN_GRADIENT] = "gradient",
};
enum util_fill_pattern util_pattern_enum(const char *name)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(pattern_names); i++)
if (!strcmp(pattern_names[i], name))
return (enum util_fill_pattern)i;
printf("Error: unsupported test pattern %s.\n", name);
return UTIL_PATTERN_SMPTE;
}

View File

@ -26,14 +26,21 @@
#ifndef UTIL_PATTERN_H
#define UTIL_PATTERN_H
#include <drm_mode.h>
enum util_fill_pattern {
UTIL_PATTERN_TILES,
UTIL_PATTERN_PLAIN,
UTIL_PATTERN_SMPTE,
UTIL_PATTERN_GRADIENT,
};
void util_fill_pattern(uint32_t format, enum util_fill_pattern pattern,
void *planes[3], unsigned int width,
unsigned int height, unsigned int stride);
void util_smpte_c8_gamma(unsigned size, struct drm_color_lut *lut);
enum util_fill_pattern util_pattern_enum(const char *name);
#endif /* UTIL_PATTERN_H */