2015-08-21 17:55:36 -06:00
|
|
|
/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Rob Clark <robclark@freedesktop.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <inttypes.h>
|
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
#include "xf86atomic.h"
|
2015-08-21 17:55:36 -06:00
|
|
|
#include "freedreno_ringbuffer.h"
|
|
|
|
#include "msm_priv.h"
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
/* represents a single cmd buffer in the submit ioctl. Each cmd buffer has
|
|
|
|
* a backing bo, and a reloc table.
|
|
|
|
*/
|
|
|
|
struct msm_cmd {
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
struct fd_ringbuffer *ring;
|
|
|
|
struct fd_bo *ring_bo;
|
|
|
|
|
|
|
|
/* reloc's table: */
|
2018-09-13 05:55:15 -06:00
|
|
|
DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
|
2016-10-16 00:01:17 -06:00
|
|
|
|
|
|
|
uint32_t size;
|
2018-11-01 02:22:36 -06:00
|
|
|
|
|
|
|
/* has cmd already been added to parent rb's submit.cmds table? */
|
|
|
|
int is_appended_to_submit;
|
2016-10-16 00:01:17 -06:00
|
|
|
};
|
|
|
|
|
2015-08-21 17:55:36 -06:00
|
|
|
struct msm_ringbuffer {
|
|
|
|
struct fd_ringbuffer base;
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
/* submit ioctl related tables:
|
|
|
|
* Note that bos and cmds are tracked by the parent ringbuffer, since
|
|
|
|
* that is global to the submit ioctl call. The reloc's table is tracked
|
|
|
|
* per cmd-buffer.
|
|
|
|
*/
|
2015-08-21 17:55:36 -06:00
|
|
|
struct {
|
|
|
|
/* bo's table: */
|
2018-09-13 05:55:15 -06:00
|
|
|
DECLARE_ARRAY(struct drm_msm_gem_submit_bo, bos);
|
2015-08-21 17:55:36 -06:00
|
|
|
|
|
|
|
/* cmd's table: */
|
2018-09-13 05:55:15 -06:00
|
|
|
DECLARE_ARRAY(struct drm_msm_gem_submit_cmd, cmds);
|
2015-08-21 17:55:36 -06:00
|
|
|
} submit;
|
|
|
|
|
|
|
|
/* should have matching entries in submit.bos: */
|
2016-10-16 00:01:17 -06:00
|
|
|
/* Note, only in parent ringbuffer */
|
2018-09-13 05:55:15 -06:00
|
|
|
DECLARE_ARRAY(struct fd_bo *, bos);
|
2015-08-21 17:55:36 -06:00
|
|
|
|
|
|
|
/* should have matching entries in submit.cmds: */
|
2018-09-13 05:55:15 -06:00
|
|
|
DECLARE_ARRAY(struct msm_cmd *, cmds);
|
2016-10-16 00:01:17 -06:00
|
|
|
|
2019-04-26 01:31:34 -06:00
|
|
|
/* List of physical cmdstream buffers (msm_cmd) associated with this
|
2016-10-16 00:01:17 -06:00
|
|
|
* logical fd_ringbuffer.
|
|
|
|
*
|
|
|
|
* Note that this is different from msm_ringbuffer::cmds (which
|
|
|
|
* shadows msm_ringbuffer::submit::cmds for tracking submit ioctl
|
|
|
|
* related stuff, and *only* is tracked in the parent ringbuffer.
|
|
|
|
* And only has "completed" cmd buffers (ie. we already know the
|
|
|
|
* size) added via get_cmd().
|
|
|
|
*/
|
|
|
|
struct list_head cmd_list;
|
|
|
|
|
|
|
|
int is_growable;
|
|
|
|
unsigned cmd_count;
|
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
unsigned offset; /* for sub-allocated stateobj rb's */
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
unsigned seqno;
|
|
|
|
|
|
|
|
/* maps fd_bo to idx: */
|
|
|
|
void *bo_table;
|
2018-11-01 02:22:36 -06:00
|
|
|
|
|
|
|
/* maps msm_cmd to drm_msm_gem_submit_cmd in parent rb. Each rb has a
|
|
|
|
* list of msm_cmd's which correspond to each chunk of cmdstream in
|
|
|
|
* a 'growable' rb. For each of those we need to create one
|
|
|
|
* drm_msm_gem_submit_cmd in the parent rb which collects the state
|
|
|
|
* for the submit ioctl. Because we can have multiple IB's to the same
|
|
|
|
* target rb (for example, or same stateobj emit multiple times), and
|
|
|
|
* because in theory we can have multiple different rb's that have a
|
|
|
|
* reference to a given target, we need a hashtable to track this per
|
|
|
|
* rb.
|
|
|
|
*/
|
|
|
|
void *cmd_table;
|
2015-08-21 17:55:36 -06:00
|
|
|
};
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
|
|
|
|
{
|
|
|
|
return (struct msm_ringbuffer *)x;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define INIT_SIZE 0x1000
|
|
|
|
|
2015-08-21 17:55:36 -06:00
|
|
|
static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
|
2016-10-16 00:01:17 -06:00
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
static struct msm_cmd *current_cmd(struct fd_ringbuffer *ring)
|
2016-10-16 00:01:17 -06:00
|
|
|
{
|
2018-11-01 02:22:36 -06:00
|
|
|
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
|
|
|
assert(!LIST_IS_EMPTY(&msm_ring->cmd_list));
|
|
|
|
return LIST_LAST_ENTRY(&msm_ring->cmd_list, struct msm_cmd, list);
|
2016-10-16 00:01:17 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ring_cmd_del(struct msm_cmd *cmd)
|
|
|
|
{
|
2018-11-01 02:22:36 -06:00
|
|
|
fd_bo_del(cmd->ring_bo);
|
2016-10-16 00:01:17 -06:00
|
|
|
list_del(&cmd->list);
|
|
|
|
to_msm_ringbuffer(cmd->ring)->cmd_count--;
|
|
|
|
free(cmd->relocs);
|
|
|
|
free(cmd);
|
|
|
|
}
|
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size,
|
|
|
|
enum fd_ringbuffer_flags flags)
|
2016-10-16 00:01:17 -06:00
|
|
|
{
|
|
|
|
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
|
|
|
struct msm_cmd *cmd = calloc(1, sizeof(*cmd));
|
|
|
|
|
|
|
|
if (!cmd)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cmd->ring = ring;
|
2018-11-01 02:22:36 -06:00
|
|
|
|
|
|
|
/* TODO separate suballoc buffer for small non-streaming state, using
|
|
|
|
* smaller page-sized backing bo's.
|
|
|
|
*/
|
|
|
|
if (flags & FD_RINGBUFFER_STREAMING) {
|
|
|
|
struct msm_pipe *msm_pipe = to_msm_pipe(ring->pipe);
|
|
|
|
unsigned suballoc_offset = 0;
|
|
|
|
struct fd_bo *suballoc_bo = NULL;
|
|
|
|
|
|
|
|
if (msm_pipe->suballoc_ring) {
|
|
|
|
struct msm_ringbuffer *suballoc_ring = to_msm_ringbuffer(msm_pipe->suballoc_ring);
|
|
|
|
|
|
|
|
assert(msm_pipe->suballoc_ring->flags & FD_RINGBUFFER_OBJECT);
|
|
|
|
assert(suballoc_ring->cmd_count == 1);
|
|
|
|
|
|
|
|
suballoc_bo = current_cmd(msm_pipe->suballoc_ring)->ring_bo;
|
|
|
|
|
|
|
|
suballoc_offset = fd_ringbuffer_size(msm_pipe->suballoc_ring) +
|
|
|
|
suballoc_ring->offset;
|
|
|
|
|
|
|
|
suballoc_offset = ALIGN(suballoc_offset, 0x10);
|
|
|
|
|
|
|
|
if ((size + suballoc_offset) > suballoc_bo->size) {
|
|
|
|
suballoc_bo = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!suballoc_bo) {
|
|
|
|
cmd->ring_bo = fd_bo_new_ring(ring->pipe->dev, 0x8000, 0);
|
|
|
|
msm_ring->offset = 0;
|
|
|
|
} else {
|
|
|
|
cmd->ring_bo = fd_bo_ref(suballoc_bo);
|
|
|
|
msm_ring->offset = suballoc_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msm_pipe->suballoc_ring)
|
|
|
|
fd_ringbuffer_del(msm_pipe->suballoc_ring);
|
|
|
|
|
|
|
|
msm_pipe->suballoc_ring = fd_ringbuffer_ref(ring);
|
|
|
|
} else {
|
|
|
|
cmd->ring_bo = fd_bo_new_ring(ring->pipe->dev, size, 0);
|
|
|
|
}
|
2016-10-16 00:01:17 -06:00
|
|
|
if (!cmd->ring_bo)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
list_addtail(&cmd->list, &msm_ring->cmd_list);
|
|
|
|
msm_ring->cmd_count++;
|
|
|
|
|
|
|
|
return cmd;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
ring_cmd_del(cmd);
|
|
|
|
return NULL;
|
|
|
|
}
|
2015-08-21 17:55:36 -06:00
|
|
|
|
|
|
|
static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
|
|
|
|
{
|
|
|
|
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
|
|
|
uint32_t idx;
|
|
|
|
|
|
|
|
idx = APPEND(&msm_ring->submit, bos);
|
|
|
|
idx = APPEND(msm_ring, bos);
|
|
|
|
|
|
|
|
msm_ring->submit.bos[idx].flags = 0;
|
|
|
|
msm_ring->submit.bos[idx].handle = bo->handle;
|
|
|
|
msm_ring->submit.bos[idx].presumed = to_msm_bo(bo)->presumed;
|
|
|
|
|
|
|
|
msm_ring->bos[idx] = fd_bo_ref(bo);
|
|
|
|
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add (if needed) bo, return idx: */
|
|
|
|
static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t flags)
|
|
|
|
{
|
|
|
|
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
|
|
|
struct msm_bo *msm_bo = to_msm_bo(bo);
|
|
|
|
uint32_t idx;
|
|
|
|
pthread_mutex_lock(&idx_lock);
|
2016-10-16 00:01:17 -06:00
|
|
|
if (msm_bo->current_ring_seqno == msm_ring->seqno) {
|
2015-08-21 17:55:36 -06:00
|
|
|
idx = msm_bo->idx;
|
|
|
|
} else {
|
2016-10-16 00:01:17 -06:00
|
|
|
void *val;
|
|
|
|
|
|
|
|
if (!msm_ring->bo_table)
|
|
|
|
msm_ring->bo_table = drmHashCreate();
|
|
|
|
|
|
|
|
if (!drmHashLookup(msm_ring->bo_table, bo->handle, &val)) {
|
|
|
|
/* found */
|
|
|
|
idx = (uint32_t)(uintptr_t)val;
|
|
|
|
} else {
|
2015-08-21 17:55:36 -06:00
|
|
|
idx = append_bo(ring, bo);
|
2016-10-16 00:01:17 -06:00
|
|
|
val = (void *)(uintptr_t)idx;
|
|
|
|
drmHashInsert(msm_ring->bo_table, bo->handle, val);
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
2016-10-16 00:01:17 -06:00
|
|
|
msm_bo->current_ring_seqno = msm_ring->seqno;
|
|
|
|
msm_bo->idx = idx;
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&idx_lock);
|
|
|
|
if (flags & FD_RELOC_READ)
|
|
|
|
msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_READ;
|
|
|
|
if (flags & FD_RELOC_WRITE)
|
|
|
|
msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_WRITE;
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
/* Ensure that submit has corresponding entry in cmds table for the
|
|
|
|
* target cmdstream buffer:
|
2018-09-13 05:55:15 -06:00
|
|
|
*
|
|
|
|
* Returns TRUE if new cmd added (else FALSE if it was already in
|
|
|
|
* the cmds table)
|
2016-10-16 00:01:17 -06:00
|
|
|
*/
|
2018-09-13 05:55:15 -06:00
|
|
|
static int get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
|
2015-08-21 17:55:36 -06:00
|
|
|
uint32_t submit_offset, uint32_t size, uint32_t type)
|
|
|
|
{
|
|
|
|
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
2016-10-16 00:01:17 -06:00
|
|
|
struct drm_msm_gem_submit_cmd *cmd;
|
2015-08-21 17:55:36 -06:00
|
|
|
uint32_t i;
|
2018-11-01 02:22:36 -06:00
|
|
|
void *val;
|
2015-08-21 17:55:36 -06:00
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
if (!msm_ring->cmd_table)
|
|
|
|
msm_ring->cmd_table = drmHashCreate();
|
|
|
|
|
|
|
|
/* figure out if we already have a cmd buf.. short-circuit hash
|
|
|
|
* lookup if:
|
|
|
|
* - target cmd has never been added to submit.cmds
|
|
|
|
* - target cmd is not a streaming stateobj (which unlike longer
|
|
|
|
* lived CSO stateobj, is not expected to be reused with multiple
|
|
|
|
* submits)
|
|
|
|
*/
|
|
|
|
if (target_cmd->is_appended_to_submit &&
|
|
|
|
!(target_cmd->ring->flags & FD_RINGBUFFER_STREAMING) &&
|
|
|
|
!drmHashLookup(msm_ring->cmd_table, (unsigned long)target_cmd, &val)) {
|
|
|
|
i = VOID2U64(val);
|
2015-08-21 17:55:36 -06:00
|
|
|
cmd = &msm_ring->submit.cmds[i];
|
2018-11-01 02:22:36 -06:00
|
|
|
|
|
|
|
assert(cmd->submit_offset == submit_offset);
|
|
|
|
assert(cmd->size == size);
|
|
|
|
assert(cmd->type == type);
|
|
|
|
assert(msm_ring->submit.bos[cmd->submit_idx].handle ==
|
|
|
|
target_cmd->ring_bo->handle);
|
|
|
|
|
|
|
|
return FALSE;
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/* create cmd buf if not: */
|
2016-10-16 00:01:17 -06:00
|
|
|
i = APPEND(&msm_ring->submit, cmds);
|
|
|
|
APPEND(msm_ring, cmds);
|
|
|
|
msm_ring->cmds[i] = target_cmd;
|
|
|
|
cmd = &msm_ring->submit.cmds[i];
|
|
|
|
cmd->type = type;
|
|
|
|
cmd->submit_idx = bo2idx(ring, target_cmd->ring_bo, FD_RELOC_READ);
|
|
|
|
cmd->submit_offset = submit_offset;
|
|
|
|
cmd->size = size;
|
|
|
|
cmd->pad = 0;
|
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
target_cmd->is_appended_to_submit = TRUE;
|
|
|
|
|
|
|
|
if (!(target_cmd->ring->flags & FD_RINGBUFFER_STREAMING)) {
|
|
|
|
drmHashInsert(msm_ring->cmd_table, (unsigned long)target_cmd,
|
|
|
|
U642VOID(i));
|
|
|
|
}
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
target_cmd->size = size;
|
2018-09-13 05:55:15 -06:00
|
|
|
|
|
|
|
return TRUE;
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static void * msm_ringbuffer_hostptr(struct fd_ringbuffer *ring)
|
|
|
|
{
|
2018-11-01 02:22:36 -06:00
|
|
|
struct msm_cmd *cmd = current_cmd(ring);
|
|
|
|
uint8_t *base = fd_bo_map(cmd->ring_bo);
|
|
|
|
return base + to_msm_ringbuffer(ring)->offset;
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
static void delete_cmds(struct msm_ringbuffer *msm_ring)
|
|
|
|
{
|
|
|
|
struct msm_cmd *cmd, *tmp;
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &msm_ring->cmd_list, list) {
|
|
|
|
ring_cmd_del(cmd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-21 17:55:36 -06:00
|
|
|
static void flush_reset(struct fd_ringbuffer *ring)
|
|
|
|
{
|
|
|
|
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
|
|
|
unsigned i;
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
for (i = 0; i < msm_ring->nr_bos; i++) {
|
|
|
|
struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
|
2018-09-13 05:55:15 -06:00
|
|
|
if (!msm_bo)
|
|
|
|
continue;
|
2016-10-16 00:01:17 -06:00
|
|
|
msm_bo->current_ring_seqno = 0;
|
|
|
|
fd_bo_del(&msm_bo->base);
|
|
|
|
}
|
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
for (i = 0; i < msm_ring->nr_cmds; i++) {
|
|
|
|
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
|
|
|
|
|
|
|
|
if (msm_cmd->ring == ring)
|
2018-09-13 05:55:15 -06:00
|
|
|
continue;
|
2018-11-01 02:22:36 -06:00
|
|
|
|
|
|
|
if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT)
|
|
|
|
fd_ringbuffer_del(msm_cmd->ring);
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
msm_ring->submit.nr_cmds = 0;
|
|
|
|
msm_ring->submit.nr_bos = 0;
|
2016-10-16 00:01:17 -06:00
|
|
|
msm_ring->nr_cmds = 0;
|
2015-08-21 17:55:36 -06:00
|
|
|
msm_ring->nr_bos = 0;
|
2016-10-16 00:01:17 -06:00
|
|
|
|
|
|
|
if (msm_ring->bo_table) {
|
|
|
|
drmHashDestroy(msm_ring->bo_table);
|
|
|
|
msm_ring->bo_table = NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
if (msm_ring->cmd_table) {
|
|
|
|
drmHashDestroy(msm_ring->cmd_table);
|
|
|
|
msm_ring->cmd_table = NULL;
|
|
|
|
}
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
if (msm_ring->is_growable) {
|
|
|
|
delete_cmds(msm_ring);
|
|
|
|
} else {
|
|
|
|
/* in old mode, just reset the # of relocs: */
|
|
|
|
current_cmd(ring)->nr_relocs = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void finalize_current_cmd(struct fd_ringbuffer *ring, uint32_t *last_start)
|
|
|
|
{
|
|
|
|
uint32_t submit_offset, size, type;
|
|
|
|
struct fd_ringbuffer *parent;
|
|
|
|
|
|
|
|
if (ring->parent) {
|
|
|
|
parent = ring->parent;
|
|
|
|
type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
|
|
|
|
} else {
|
|
|
|
parent = ring;
|
|
|
|
type = MSM_SUBMIT_CMD_BUF;
|
|
|
|
}
|
|
|
|
|
|
|
|
submit_offset = offset_bytes(last_start, ring->start);
|
|
|
|
size = offset_bytes(ring->cur, last_start);
|
|
|
|
|
|
|
|
get_cmd(parent, current_cmd(ring), submit_offset, size, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dump_submit(struct msm_ringbuffer *msm_ring)
|
|
|
|
{
|
|
|
|
uint32_t i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < msm_ring->submit.nr_bos; i++) {
|
|
|
|
struct drm_msm_gem_submit_bo *bo = &msm_ring->submit.bos[i];
|
|
|
|
ERROR_MSG(" bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
|
|
|
|
}
|
|
|
|
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
|
|
|
struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
|
|
|
|
struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
|
|
|
|
ERROR_MSG(" cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
|
|
|
|
i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
|
|
|
|
for (j = 0; j < cmd->nr_relocs; j++) {
|
|
|
|
struct drm_msm_gem_submit_reloc *r = &relocs[j];
|
|
|
|
ERROR_MSG(" reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
|
|
|
|
", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
|
|
|
|
r->reloc_idx, r->reloc_offset);
|
|
|
|
}
|
|
|
|
}
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
|
|
|
|
2018-09-13 05:55:15 -06:00
|
|
|
static struct drm_msm_gem_submit_reloc *
|
|
|
|
handle_stateobj_relocs(struct fd_ringbuffer *parent, struct fd_ringbuffer *stateobj,
|
|
|
|
struct drm_msm_gem_submit_reloc *orig_relocs, unsigned nr_relocs)
|
|
|
|
{
|
|
|
|
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(stateobj);
|
|
|
|
struct drm_msm_gem_submit_reloc *relocs = malloc(nr_relocs * sizeof(*relocs));
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_relocs; i++) {
|
|
|
|
unsigned idx = orig_relocs[i].reloc_idx;
|
|
|
|
struct fd_bo *bo = msm_ring->bos[idx];
|
|
|
|
unsigned flags = 0;
|
|
|
|
|
|
|
|
if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_READ)
|
|
|
|
flags |= FD_RELOC_READ;
|
|
|
|
if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_WRITE)
|
|
|
|
flags |= FD_RELOC_WRITE;
|
|
|
|
|
|
|
|
relocs[i] = orig_relocs[i];
|
|
|
|
relocs[i].reloc_idx = bo2idx(parent, bo, flags);
|
|
|
|
}
|
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
/* stateobj rb's could have reloc's to other stateobj rb's which didn't
|
|
|
|
* get propagated to the parent rb at _emit_reloc_ring() time (because
|
|
|
|
* the parent wasn't known then), so fix that up now:
|
|
|
|
*/
|
|
|
|
for (i = 0; i < msm_ring->nr_cmds; i++) {
|
|
|
|
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
|
|
|
|
struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
|
|
|
|
|
|
|
|
if (msm_ring->cmds[i]->ring == stateobj)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
assert(msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT);
|
|
|
|
|
|
|
|
if (get_cmd(parent, msm_cmd, cmd->submit_offset, cmd->size, cmd->type)) {
|
|
|
|
fd_ringbuffer_ref(msm_cmd->ring);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-13 05:55:15 -06:00
|
|
|
return relocs;
|
|
|
|
}
|
|
|
|
|
2016-11-18 22:36:14 -07:00
|
|
|
static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start,
|
|
|
|
int in_fence_fd, int *out_fence_fd)
|
2015-08-21 17:55:36 -06:00
|
|
|
{
|
|
|
|
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
2018-11-01 02:22:36 -06:00
|
|
|
struct msm_pipe *msm_pipe = to_msm_pipe(ring->pipe);
|
2015-08-21 17:55:36 -06:00
|
|
|
struct drm_msm_gem_submit req = {
|
2018-11-01 02:22:36 -06:00
|
|
|
.flags = msm_pipe->pipe,
|
|
|
|
.queueid = msm_pipe->queue_id,
|
2015-08-21 17:55:36 -06:00
|
|
|
};
|
2016-10-16 00:01:17 -06:00
|
|
|
uint32_t i;
|
2015-08-21 17:55:36 -06:00
|
|
|
int ret;
|
|
|
|
|
2018-09-13 05:55:15 -06:00
|
|
|
assert(!ring->parent);
|
|
|
|
|
2016-11-18 22:36:14 -07:00
|
|
|
if (in_fence_fd != -1) {
|
|
|
|
req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
|
|
|
|
req.fence_fd = in_fence_fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (out_fence_fd) {
|
|
|
|
req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
|
|
|
|
}
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
finalize_current_cmd(ring, last_start);
|
2015-08-21 17:55:36 -06:00
|
|
|
|
|
|
|
/* for each of the cmd's fix up their reloc's: */
|
|
|
|
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
2016-10-16 00:01:17 -06:00
|
|
|
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
|
2018-11-01 02:22:36 -06:00
|
|
|
struct drm_msm_gem_submit_reloc *relocs = msm_cmd->relocs;
|
|
|
|
struct drm_msm_gem_submit_cmd *cmd;
|
|
|
|
unsigned nr_relocs = msm_cmd->nr_relocs;
|
2018-09-13 05:55:15 -06:00
|
|
|
|
|
|
|
/* for reusable stateobjs, the reloc table has reloc_idx that
|
|
|
|
* points into it's own private bos table, rather than the global
|
|
|
|
* bos table used for the submit, so we need to add the stateobj's
|
|
|
|
* bos to the global table and construct new relocs table with
|
|
|
|
* corresponding reloc_idx
|
|
|
|
*/
|
|
|
|
if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
|
|
|
|
relocs = handle_stateobj_relocs(ring, msm_cmd->ring,
|
|
|
|
relocs, nr_relocs);
|
|
|
|
}
|
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
cmd = &msm_ring->submit.cmds[i];
|
2018-09-13 05:55:15 -06:00
|
|
|
cmd->relocs = VOID2U64(relocs);
|
|
|
|
cmd->nr_relocs = nr_relocs;
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
|
|
|
|
2018-09-13 05:55:15 -06:00
|
|
|
/* needs to be after get_cmd() as that could create bos/cmds table: */
|
|
|
|
req.bos = VOID2U64(msm_ring->submit.bos),
|
|
|
|
req.nr_bos = msm_ring->submit.nr_bos;
|
|
|
|
req.cmds = VOID2U64(msm_ring->submit.cmds),
|
|
|
|
req.nr_cmds = msm_ring->submit.nr_cmds;
|
|
|
|
|
2016-03-20 04:19:44 -06:00
|
|
|
DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
|
2015-08-21 17:55:36 -06:00
|
|
|
|
|
|
|
ret = drmCommandWriteRead(ring->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
|
|
|
|
&req, sizeof(req));
|
|
|
|
if (ret) {
|
|
|
|
ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
|
2016-10-16 00:01:17 -06:00
|
|
|
dump_submit(msm_ring);
|
|
|
|
} else if (!ret) {
|
2015-08-21 17:55:36 -06:00
|
|
|
/* update timestamp on all rings associated with submit: */
|
|
|
|
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
2016-10-16 00:01:17 -06:00
|
|
|
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
|
|
|
|
msm_cmd->ring->last_timestamp = req.fence;
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
2016-11-18 22:36:14 -07:00
|
|
|
|
|
|
|
if (out_fence_fd) {
|
|
|
|
*out_fence_fd = req.fence_fd;
|
|
|
|
}
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
|
|
|
|
2018-09-13 05:55:15 -06:00
|
|
|
/* free dynamically constructed stateobj relocs tables: */
|
|
|
|
for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
|
|
|
|
struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
|
|
|
|
struct msm_cmd *msm_cmd = msm_ring->cmds[i];
|
|
|
|
if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
|
|
|
|
free(U642VOID(cmd->relocs));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-21 17:55:36 -06:00
|
|
|
flush_reset(ring);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
static void msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
|
|
|
|
{
|
|
|
|
assert(to_msm_ringbuffer(ring)->is_growable);
|
|
|
|
finalize_current_cmd(ring, ring->last_start);
|
2018-11-01 02:22:36 -06:00
|
|
|
ring_cmd_new(ring, size, 0);
|
2016-10-16 00:01:17 -06:00
|
|
|
}
|
|
|
|
|
2015-08-21 17:55:36 -06:00
|
|
|
static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
|
|
|
|
{
|
|
|
|
flush_reset(ring);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
|
|
|
|
const struct fd_reloc *r)
|
|
|
|
{
|
|
|
|
struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
|
|
|
|
struct msm_bo *msm_bo = to_msm_bo(r->bo);
|
|
|
|
struct drm_msm_gem_submit_reloc *reloc;
|
2016-10-16 00:01:17 -06:00
|
|
|
struct msm_cmd *cmd = current_cmd(ring);
|
|
|
|
uint32_t idx = APPEND(cmd, relocs);
|
2015-08-21 17:55:36 -06:00
|
|
|
uint32_t addr;
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
reloc = &cmd->relocs[idx];
|
2015-08-21 17:55:36 -06:00
|
|
|
|
|
|
|
reloc->reloc_idx = bo2idx(parent, r->bo, r->flags);
|
|
|
|
reloc->reloc_offset = r->offset;
|
|
|
|
reloc->or = r->or;
|
|
|
|
reloc->shift = r->shift;
|
2018-11-01 02:22:36 -06:00
|
|
|
reloc->submit_offset = offset_bytes(ring->cur, ring->start) +
|
|
|
|
to_msm_ringbuffer(ring)->offset;
|
2015-08-21 17:55:36 -06:00
|
|
|
|
|
|
|
addr = msm_bo->presumed;
|
2017-02-04 22:38:00 -07:00
|
|
|
if (reloc->shift < 0)
|
|
|
|
addr >>= -reloc->shift;
|
2015-08-21 17:55:36 -06:00
|
|
|
else
|
2017-02-04 22:38:00 -07:00
|
|
|
addr <<= reloc->shift;
|
2015-08-21 17:55:36 -06:00
|
|
|
(*ring->cur++) = addr | r->or;
|
2017-02-04 22:38:00 -07:00
|
|
|
|
|
|
|
if (ring->pipe->gpu_id >= 500) {
|
|
|
|
struct drm_msm_gem_submit_reloc *reloc_hi;
|
|
|
|
|
2017-04-14 01:45:34 -06:00
|
|
|
/* NOTE: grab reloc_idx *before* APPEND() since that could
|
|
|
|
* realloc() meaning that 'reloc' ptr is no longer valid:
|
|
|
|
*/
|
|
|
|
uint32_t reloc_idx = reloc->reloc_idx;
|
|
|
|
|
2017-02-04 22:38:00 -07:00
|
|
|
idx = APPEND(cmd, relocs);
|
|
|
|
|
|
|
|
reloc_hi = &cmd->relocs[idx];
|
|
|
|
|
2017-04-14 01:45:34 -06:00
|
|
|
reloc_hi->reloc_idx = reloc_idx;
|
2017-02-04 22:38:00 -07:00
|
|
|
reloc_hi->reloc_offset = r->offset;
|
|
|
|
reloc_hi->or = r->orhi;
|
|
|
|
reloc_hi->shift = r->shift - 32;
|
2018-11-01 02:22:36 -06:00
|
|
|
reloc_hi->submit_offset = offset_bytes(ring->cur, ring->start) +
|
|
|
|
to_msm_ringbuffer(ring)->offset;
|
2017-02-04 22:38:00 -07:00
|
|
|
|
|
|
|
addr = msm_bo->presumed >> 32;
|
|
|
|
if (reloc_hi->shift < 0)
|
|
|
|
addr >>= -reloc_hi->shift;
|
|
|
|
else
|
|
|
|
addr <<= reloc_hi->shift;
|
|
|
|
(*ring->cur++) = addr | r->orhi;
|
|
|
|
}
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
|
2018-11-01 02:22:36 -06:00
|
|
|
struct fd_ringbuffer *target, uint32_t cmd_idx)
|
2015-08-21 17:55:36 -06:00
|
|
|
{
|
2016-10-16 00:01:17 -06:00
|
|
|
struct msm_cmd *cmd = NULL;
|
2018-11-01 02:22:36 -06:00
|
|
|
struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
|
2016-10-16 00:01:17 -06:00
|
|
|
uint32_t idx = 0;
|
2018-09-13 05:55:15 -06:00
|
|
|
int added_cmd = FALSE;
|
2018-11-01 02:22:36 -06:00
|
|
|
uint32_t size;
|
|
|
|
uint32_t submit_offset = msm_target->offset;
|
2016-10-16 00:01:17 -06:00
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
LIST_FOR_EACH_ENTRY(cmd, &msm_target->cmd_list, list) {
|
2016-10-16 00:01:17 -06:00
|
|
|
if (idx == cmd_idx)
|
|
|
|
break;
|
|
|
|
idx++;
|
|
|
|
}
|
2015-08-21 17:55:36 -06:00
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
assert(cmd && (idx == cmd_idx));
|
2015-08-21 17:55:36 -06:00
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
if (idx < (msm_target->cmd_count - 1)) {
|
2016-10-16 00:01:17 -06:00
|
|
|
/* All but the last cmd buffer is fully "baked" (ie. already has
|
|
|
|
* done get_cmd() to add it to the cmds table). But in this case,
|
|
|
|
* the size we get is invalid (since it is calculated from the
|
|
|
|
* last cmd buffer):
|
|
|
|
*/
|
|
|
|
size = cmd->size;
|
|
|
|
} else {
|
2018-09-13 05:55:15 -06:00
|
|
|
struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
|
2018-11-01 02:22:36 -06:00
|
|
|
size = offset_bytes(target->cur, target->start);
|
2018-09-13 05:55:15 -06:00
|
|
|
added_cmd = get_cmd(parent, cmd, submit_offset, size,
|
|
|
|
MSM_SUBMIT_CMD_IB_TARGET_BUF);
|
2016-10-16 00:01:17 -06:00
|
|
|
}
|
2015-08-21 17:55:36 -06:00
|
|
|
|
|
|
|
msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
|
2016-10-16 00:01:17 -06:00
|
|
|
.bo = cmd->ring_bo,
|
2015-08-21 17:55:36 -06:00
|
|
|
.flags = FD_RELOC_READ,
|
|
|
|
.offset = submit_offset,
|
|
|
|
});
|
2016-10-16 00:01:17 -06:00
|
|
|
|
2018-09-13 05:55:15 -06:00
|
|
|
/* Unlike traditional ringbuffers which are deleted as a set (after
|
|
|
|
* being flushed), mesa can't really guarantee that a stateobj isn't
|
|
|
|
* destroyed after emitted but before flush, so we must hold a ref:
|
|
|
|
*/
|
|
|
|
if (added_cmd && (target->flags & FD_RINGBUFFER_OBJECT)) {
|
2018-11-01 02:22:36 -06:00
|
|
|
fd_ringbuffer_ref(target);
|
2018-09-13 05:55:15 -06:00
|
|
|
}
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
|
|
|
|
{
|
|
|
|
return to_msm_ringbuffer(ring)->cmd_count;
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
static void msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
|
2015-08-21 17:55:36 -06:00
|
|
|
{
|
|
|
|
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
|
2016-10-16 00:01:17 -06:00
|
|
|
|
|
|
|
flush_reset(ring);
|
|
|
|
delete_cmds(msm_ring);
|
|
|
|
|
|
|
|
free(msm_ring->submit.cmds);
|
|
|
|
free(msm_ring->submit.bos);
|
|
|
|
free(msm_ring->bos);
|
|
|
|
free(msm_ring->cmds);
|
2015-08-21 17:55:36 -06:00
|
|
|
free(msm_ring);
|
|
|
|
}
|
|
|
|
|
2015-12-27 01:58:00 -07:00
|
|
|
static const struct fd_ringbuffer_funcs funcs = {
|
2015-08-21 17:55:36 -06:00
|
|
|
.hostptr = msm_ringbuffer_hostptr,
|
|
|
|
.flush = msm_ringbuffer_flush,
|
2016-10-16 00:01:17 -06:00
|
|
|
.grow = msm_ringbuffer_grow,
|
2015-08-21 17:55:36 -06:00
|
|
|
.reset = msm_ringbuffer_reset,
|
|
|
|
.emit_reloc = msm_ringbuffer_emit_reloc,
|
|
|
|
.emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
|
2016-10-16 00:01:17 -06:00
|
|
|
.cmd_count = msm_ringbuffer_cmd_count,
|
2018-11-01 02:22:36 -06:00
|
|
|
.destroy = msm_ringbuffer_destroy,
|
2015-08-21 17:55:36 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
|
2018-09-13 05:55:15 -06:00
|
|
|
uint32_t size, enum fd_ringbuffer_flags flags)
|
2015-08-21 17:55:36 -06:00
|
|
|
{
|
|
|
|
struct msm_ringbuffer *msm_ring;
|
2018-02-16 19:24:37 -07:00
|
|
|
struct fd_ringbuffer *ring;
|
2015-08-21 17:55:36 -06:00
|
|
|
|
|
|
|
msm_ring = calloc(1, sizeof(*msm_ring));
|
|
|
|
if (!msm_ring) {
|
|
|
|
ERROR_MSG("allocation failed");
|
2018-02-16 19:24:37 -07:00
|
|
|
return NULL;
|
2015-08-21 17:55:36 -06:00
|
|
|
}
|
|
|
|
|
2016-10-16 00:01:17 -06:00
|
|
|
if (size == 0) {
|
|
|
|
assert(pipe->dev->version >= FD_VERSION_UNLIMITED_CMDS);
|
|
|
|
size = INIT_SIZE;
|
|
|
|
msm_ring->is_growable = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_inithead(&msm_ring->cmd_list);
|
|
|
|
msm_ring->seqno = ++to_msm_device(pipe->dev)->ring_cnt;
|
|
|
|
|
2015-08-21 17:55:36 -06:00
|
|
|
ring = &msm_ring->base;
|
2018-11-01 02:22:36 -06:00
|
|
|
atomic_set(&ring->refcnt, 1);
|
|
|
|
|
2015-08-21 17:55:36 -06:00
|
|
|
ring->funcs = &funcs;
|
2016-10-16 00:01:17 -06:00
|
|
|
ring->size = size;
|
|
|
|
ring->pipe = pipe; /* needed in ring_cmd_new() */
|
2015-08-21 17:55:36 -06:00
|
|
|
|
2018-11-01 02:22:36 -06:00
|
|
|
ring_cmd_new(ring, size, flags);
|
2015-08-21 17:55:36 -06:00
|
|
|
|
|
|
|
return ring;
|
|
|
|
}
|