update libdrm to 2.4.9 (actually to -current, but the only real changes

are to some assertions and a small change to modesetting code).

bump libdrm_intel minor due to added symbols, libdrm doesn't get bumped,
no change to symbol list.

ok matthieu@.
This commit is contained in:
oga 2009-05-03 19:43:26 +00:00
parent a06becfd67
commit 54ef134d1f
12 changed files with 460 additions and 316 deletions

View File

@ -1,4 +1,4 @@
# $OpenBSD: Makefile,v 1.2 2009/01/26 23:14:37 oga Exp $
# $OpenBSD: Makefile,v 1.3 2009/05/03 19:43:26 oga Exp $
.include <bsd.xconf.mk>
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "amd64"
@ -9,7 +9,7 @@ LIB= drm
DRM_MAJOR= 2
DRM_MINOR= 4
DRM_TINY= 4
DRM_TINY= 9
INCSDIR= ${X11BASE}/include/

View File

@ -51,6 +51,13 @@ drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
drm_intel_bo *
drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
}
void
drm_intel_bo_reference(drm_intel_bo *bo)
{
@ -205,232 +212,3 @@ int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
*swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
return 0;
}
#if 0
/*
* $XFree86: xc/lib/XThrStub/UIThrStubs.c,v 3.3 2001/11/18 21:13:26 herrb Exp $
*
* Copyright (c) 1995 David E. Wexelblat. All rights reserved
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL DAVID E. WEXELBLAT BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name of David E. Wexelblat shall
* not be used in advertising or otherwise to promote the sale, use or
* other dealings in this Software without prior written authorization
* from David E. Wexelblat.
*
*/
/*
* Stubs for thread functions needed by the X library. Supports
* UnixWare 2.x threads; may support Solaris 2 threads as well, but not
* tested. Defining things this way removes the dependency of the X
* library on the threads library, but still supports threads if the user
* specificies the thread library on the link line.
*/
/*
* Modifications by Carlos A M dos Santos, XFree86 Project, November 1999.
*
* Explanation from <X11/Xos_r.h>:
* The structure below is complicated, mostly because P1003.1c (the
* IEEE POSIX Threads spec) went through lots of drafts, and some
* vendors shipped systems based on draft API that were changed later.
* Unfortunately POSIX did not provide a feature-test macro for
* distinguishing each of the drafts.
*/
#include <stdlib.h>
static int _Xthr_once_stub_(void *, void (*)(void));
static int _Xthr_key_create_stub_(unsigned int *, void (*)(void *));
static int _Xthr_setspecific_stub_(unsigned int, const void *);
static void *_Xthr_getspecific_stub_(unsigned int);
#ifdef CTHREADS
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <cthreads.h>
typedef cthread_t xthread_t;
#define xthread_self cthread_self
#pragma weak cthread_self = _Xthr_self_stub_
#define xmutex_init mutex_init
#pragma weak mutex_init = _Xthr_zero_stub_
#pragma weak mutex_clear = _Xthr_zero_stub_
#pragma weak mutex_lock = _Xthr_zero_stub_
#pragma weak mutex_unlock = _Xthr_zero_stub_
#pragma weak condition_init = _Xthr_zero_stub_
#pragma weak condition_clear = _Xthr_zero_stub_
#pragma weak condition_wait = _Xthr_zero_stub_
#pragma weak condition_signal = _Xthr_zero_stub_
#pragma weak condition_broadcast = _Xthr_zero_stub_
#else /* !CTHREADS */
#if defined(SVR4) && !defined(__sgi)
#include <thread.h>
typedef thread_t xthread_t;
#pragma weak thr_self = _Xthr_self_stub_
#pragma weak mutex_init = _Xthr_zero_stub_
#pragma weak mutex_destroy = _Xthr_zero_stub_
#pragma weak mutex_lock = _Xthr_zero_stub_
#pragma weak mutex_unlock = _Xthr_zero_stub_
#pragma weak cond_init = _Xthr_zero_stub_
#pragma weak cond_destroy = _Xthr_zero_stub_
#pragma weak cond_wait = _Xthr_zero_stub_
#pragma weak cond_signal = _Xthr_zero_stub_
#pragma weak cond_broadcast = _Xthr_zero_stub_
#else /* !SVR4 */
#ifdef WIN32
/*
* Don't know what to do here. Is there something do be done at all?
*/
#else /* !WIN32 */
#ifdef USE_TIS_SUPPORT
#include <tis.h>
typedef pthread_t xthread_t;
#pragma weak tis_self = _Xthr_self_stub_
#pragma weak tis_mutex_init = _Xthr_zero_stub_
#pragma weak tis_mutex_destroy = _Xthr_zero_stub_
#pragma weak tis_mutex_lock = _Xthr_zero_stub_
#pragma weak tis_mutex_unlock = _Xthr_zero_stub_
#pragma weak tis_cond_init = _Xthr_zero_stub_
#pragma weak tis_cond_destroy = _Xthr_zero_stub_
#pragma weak tis_cond_wait = _Xthr_zero_stub_
#pragma weak tis_cond_signal = _Xthr_zero_stub_
#pragma weak tis_cond_broadcast = _Xthr_zero_stub_
#else
#include <pthread.h>
typedef pthread_t xthread_t;
#if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 95)
xthread_t pthread_self() __attribute__ ((weak, alias ("_Xthr_self_stub_")));
int pthread_mutex_init() __attribute__ ((weak, alias ("_Xthr_zero_stub_")));
int pthread_mutex_destroy() __attribute__ ((weak, alias ("_Xthr_zero_stub_")));
int pthread_mutex_lock() __attribute__ ((weak, alias ("_Xthr_zero_stub_")));
int pthread_mutex_unlock() __attribute__ ((weak, alias ("_Xthr_zero_stub_")));
int pthread_cond_init() __attribute__ ((weak, alias ("_Xthr_zero_stub_")));
int pthread_cond_destroy() __attribute__ ((weak, alias ("_Xthr_zero_stub_")));
int pthread_cond_wait() __attribute__ ((weak, alias ("_Xthr_zero_stub_")));
int pthread_cond_signal() __attribute__ ((weak, alias ("_Xthr_zero_stub_")));
int pthread_cond_broadcast() __attribute__ ((weak, alias ("_Xthr_zero_stub_")));
int pthread_key_create() __attribute__ ((weak, alias ("_Xthr_key_create_stub_")));
void *pthread_getspecific() __attribute__ ((weak, alias ("_Xthr_getspecific_stub_")));
int pthread_setspecific() __attribute__ ((weak, alias ("_Xthr_setspecific_stub_")));
int pthread_once() __attribute__ ((weak, alias ("_Xthr_once_stub_")));
#else /* __GNUC__ */
#pragma weak pthread_self = _Xthr_self_stub_
#pragma weak pthread_mutex_init = _Xthr_zero_stub_
#pragma weak pthread_mutex_destroy = _Xthr_zero_stub_
#pragma weak pthread_mutex_lock = _Xthr_zero_stub_
#pragma weak pthread_mutex_unlock = _Xthr_zero_stub_
#pragma weak pthread_cond_init = _Xthr_zero_stub_
#pragma weak pthread_cond_destroy = _Xthr_zero_stub_
#pragma weak pthread_cond_wait = _Xthr_zero_stub_
#pragma weak pthread_cond_signal = _Xthr_zero_stub_
#pragma weak pthread_cond_broadcast = _Xthr_zero_stub_
/* These are added for libGL */
#pragma weak pthread_key_create = _Xthr_key_create_stub_
#pragma weak pthread_getspecific = _Xthr_getspecific_stub_
#pragma weak pthread_setspecific = _Xthr_setspecific_stub_
#pragam weak pthread_once = _Xthr_once_stub_
#endif /* __GNUC__ */
#if defined(_DECTHREADS_) || defined(linux)
#pragma weak pthread_equal = _Xthr_equal_stub_ /* See Xthreads.h! */
int
_Xthr_equal_stub_()
{
return(1);
}
#endif /* _DECTHREADS_ || linux */
#endif /* USE_TIS_SUPPORT */
#endif /* WIN32 */
#endif /* SVR4 */
#endif /* CTHREADS */
static xthread_t
_Xthr_self_stub_()
{
static xthread_t _X_no_thread_id;
return(_X_no_thread_id); /* defined by <X11/Xthreads.h> */
}
static int
_Xthr_zero_stub_()
{
return(0);
}
static int
_Xthr_once_stub_(void *id, void (*routine)(void))
{
static int done = 0;
if (!done) {
routine();
done++;
}
return 0;
}
#include <errno.h>
#define XTHR_KEYS_CHUNK 100
static void **_Xthr_keys_ = NULL;
static unsigned int _Xthr_last_key_ = 0;
static int
_Xthr_key_create_stub_(unsigned int *key, void (*destructor)(void *))
{
void **tmp;
unsigned int i;
if ((_Xthr_last_key_ % XTHR_KEYS_CHUNK) == 0) {
tmp = realloc(_Xthr_keys_,
(_Xthr_last_key_ + XTHR_KEYS_CHUNK)*sizeof(void *));
if (tmp == NULL) {
free(_Xthr_keys_);
return ENOMEM;
}
for (i = 0; i < XTHR_KEYS_CHUNK; i++)
tmp[_Xthr_last_key_ + i] = 0;
_Xthr_keys_ = tmp;
}
*key = _Xthr_last_key_++;
return 0;
}
static int
_Xthr_setspecific_stub_(unsigned int key, const void *value)
{
if (_Xthr_last_key_ == 0 || key >= _Xthr_last_key_)
return EINVAL;
_Xthr_keys_[key] = value;
return 0;
}
static void *
_Xthr_getspecific_stub_(unsigned int key)
{
if (_Xthr_last_key_ == 0 || key >= _Xthr_last_key_)
return NULL;
return(_Xthr_keys_[key]);
}
#endif

View File

@ -75,6 +75,10 @@ struct _drm_intel_bo {
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
void drm_intel_bo_reference(drm_intel_bo *bo);
void drm_intel_bo_unreference(drm_intel_bo *bo);
int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
@ -111,6 +115,7 @@ drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
unsigned int handle);
void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
/* drm_intel_bufmgr_fake.c */

View File

@ -444,7 +444,8 @@ alloc_block(drm_intel_bo *bo)
/* Release the card storage associated with buf:
*/
static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block)
static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
int skip_dirty_copy)
{
drm_intel_bo_fake *bo_fake;
DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced);
@ -453,7 +454,11 @@ static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block)
return;
bo_fake = (drm_intel_bo_fake *)block->bo;
if (!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)) && (bo_fake->card_dirty == 1)) {
if (bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE))
skip_dirty_copy = 1;
if (!skip_dirty_copy && (bo_fake->card_dirty == 1)) {
memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
bo_fake->card_dirty = 0;
bo_fake->dirty = 1;
@ -534,7 +539,7 @@ evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
set_dirty(&bo_fake->bo);
bo_fake->block = NULL;
free_block(bufmgr_fake, block);
free_block(bufmgr_fake, block, 0);
return 1;
}
@ -557,7 +562,7 @@ evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
set_dirty(&bo_fake->bo);
bo_fake->block = NULL;
free_block(bufmgr_fake, block);
free_block(bufmgr_fake, block, 0);
return 1;
}
@ -872,7 +877,7 @@ drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
assert(bo_fake->map_count == 0);
/* No remaining references, so free it */
if (bo_fake->block)
free_block(bufmgr_fake, bo_fake->block);
free_block(bufmgr_fake, bo_fake->block, 1);
free_backing_store(bo);
for (i = 0; i < bo_fake->nr_relocs; i++)
@ -1064,7 +1069,7 @@ drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
block->on_hardware = 0;
free_block(bufmgr_fake, block);
free_block(bufmgr_fake, block, 0);
bo_fake->block = NULL;
bo_fake->validated = 0;
if (!(bo_fake->flags & BM_NO_BACKING_STORE))
@ -1462,8 +1467,10 @@ drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
/* Releases the memory, and memcpys dirty contents out if necessary. */
free_block(bufmgr_fake, block);
free_block(bufmgr_fake, block, 0);
bo_fake->block = NULL;
}
pthread_mutex_unlock(&bufmgr_fake->lock);
@ -1503,6 +1510,7 @@ drm_intel_bufmgr_fake_init(int fd,
/* Hook in methods */
bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
bufmgr_fake->bufmgr.bo_alloc_for_render = drm_intel_fake_bo_alloc;
bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;

View File

@ -52,8 +52,10 @@
#include <sys/types.h>
#include "errno.h"
#include "libdrm_lists.h"
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "intel_chipset.h"
#include "string.h"
#include "i915_drm.h"
@ -66,7 +68,8 @@
typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
struct drm_intel_gem_bo_bucket {
drm_intel_bo_gem *head, **tail;
drmMMListHead head;
/**
* Limit on the number of entries in this bucket.
*
@ -99,6 +102,8 @@ typedef struct _drm_intel_bufmgr_gem {
struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
uint64_t gtt_size;
int available_fences;
int pci_device;
} drm_intel_bufmgr_gem;
struct _drm_intel_bo_gem {
@ -140,10 +145,12 @@ struct _drm_intel_bo_gem {
/** Number of entries in relocs */
int reloc_count;
/** Mapped address for the buffer, saved across map/unmap cycles */
void *virtual;
void *mem_virtual;
/** GTT virtual address for the buffer, saved across map/unmap cycles */
void *gtt_virtual;
/** free list */
drm_intel_bo_gem *next;
/** BO cache list */
drmMMListHead head;
/**
* Boolean of whether this BO and its children have been included in
@ -165,6 +172,11 @@ struct _drm_intel_bo_gem {
* the common case.
*/
int reloc_tree_size;
/**
* Number of potential fence registers required by this buffer and its
* relocations.
*/
int reloc_tree_fences;
};
static void drm_intel_gem_bo_reference_locked(drm_intel_bo *bo);
@ -315,8 +327,9 @@ drm_intel_setup_reloc_list(drm_intel_bo *bo)
}
static drm_intel_bo *
drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment,
int for_render)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
drm_intel_bo_gem *bo_gem;
@ -345,18 +358,35 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
/* Get a buffer out of the cache if available */
if (bucket != NULL && bucket->num_entries > 0) {
struct drm_i915_gem_busy busy;
bo_gem = bucket->head;
busy.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
alloc_from_cache = (ret == 0 && busy.busy == 0);
if (alloc_from_cache) {
bucket->head = bo_gem->next;
if (bo_gem->next == NULL)
bucket->tail = &bucket->head;
if (for_render) {
/* Allocate new render-target BOs from the tail (MRU)
* of the list, as it will likely be hot in the GPU cache
* and in the aperture for us.
*/
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.prev, head);
DRMLISTDEL(&bo_gem->head);
bucket->num_entries--;
alloc_from_cache = 1;
} else {
/* For non-render-target BOs (where we're probably going to map it
* first thing in order to fill it with data), check if the
* last BO in the cache is unbusy, and only reuse in that case.
* Otherwise, allocating a new buffer is probably faster than
* waiting for the GPU to finish.
*/
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
memset(&busy, 0, sizeof(busy));
busy.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
alloc_from_cache = (ret == 0 && busy.busy == 0);
if (alloc_from_cache) {
DRMLISTDEL(&bo_gem->head);
bucket->num_entries--;
}
}
}
pthread_mutex_unlock(&bufmgr_gem->lock);
@ -386,6 +416,7 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
bo_gem->refcount = 1;
bo_gem->validate_index = -1;
bo_gem->reloc_tree_size = bo_gem->bo.size;
bo_gem->reloc_tree_fences = 0;
bo_gem->used_as_reloc_target = 0;
bo_gem->tiling_mode = I915_TILING_NONE;
bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
@ -396,6 +427,20 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
return &bo_gem->bo;
}
static drm_intel_bo *
drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment, 1);
}
static drm_intel_bo *
drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment, 0);
}
/**
* Returns a drm_intel_bo wrapping the given buffer object handle.
*
@ -435,6 +480,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
bo_gem->gem_handle = open_arg.handle;
bo_gem->global_name = handle;
memset(&get_tiling, 0, sizeof(get_tiling));
get_tiling.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
if (ret != 0) {
@ -443,6 +489,10 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
}
bo_gem->tiling_mode = get_tiling.tiling_mode;
bo_gem->swizzle_mode = get_tiling.swizzle_mode;
if (bo_gem->tiling_mode == I915_TILING_NONE)
bo_gem->reloc_tree_fences = 0;
else
bo_gem->reloc_tree_fences = 1;
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
@ -455,6 +505,7 @@ drm_intel_gem_bo_reference(drm_intel_bo *bo)
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
assert(bo_gem->refcount > 0);
pthread_mutex_lock(&bufmgr_gem->lock);
bo_gem->refcount++;
pthread_mutex_unlock(&bufmgr_gem->lock);
@ -465,6 +516,7 @@ drm_intel_gem_bo_reference_locked(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
assert(bo_gem->refcount > 0);
bo_gem->refcount++;
}
@ -476,10 +528,13 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
struct drm_gem_close close;
int ret;
if (bo_gem->virtual)
munmap (bo_gem->virtual, bo_gem->bo.size);
if (bo_gem->mem_virtual)
munmap (bo_gem->mem_virtual, bo_gem->bo.size);
if (bo_gem->gtt_virtual)
munmap (bo_gem->gtt_virtual, bo_gem->bo.size);
/* Close this object */
memset(&close, 0, sizeof(close));
close.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
if (ret != 0) {
@ -496,6 +551,7 @@ drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
assert(bo_gem->refcount > 0);
if (--bo_gem->refcount == 0) {
struct drm_intel_gem_bo_bucket *bucket;
uint32_t tiling_mode;
@ -529,9 +585,7 @@ drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
bo_gem->reloc_target_bo = NULL;
bo_gem->reloc_count = 0;
bo_gem->next = NULL;
*bucket->tail = bo_gem;
bucket->tail = &bo_gem->next;
DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
bucket->num_entries++;
} else {
drm_intel_gem_bo_free(bo);
@ -562,7 +616,7 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
/* Allow recursive mapping. Mesa may recursively map buffers with
* nested display loops.
*/
if (!bo_gem->virtual) {
if (!bo_gem->mem_virtual) {
struct drm_i915_gem_mmap mmap_arg;
DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
@ -579,12 +633,12 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
bo_gem->mem_virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
bo_gem->swrast = 0;
}
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
bo_gem->virtual);
bo->virtual = bo_gem->virtual;
bo_gem->mem_virtual);
bo->virtual = bo_gem->mem_virtual;
if (bo_gem->global_name != 0 || !bo_gem->swrast) {
set_domain.handle = bo_gem->gem_handle;
@ -630,7 +684,7 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
pthread_mutex_lock(&bufmgr_gem->lock);
/* Get a mapping of the buffer if we haven't before. */
if (bo_gem->virtual == NULL) {
if (bo_gem->gtt_virtual == NULL) {
struct drm_i915_gem_mmap_gtt mmap_arg;
DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
@ -651,10 +705,10 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
}
/* and mmap it */
bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
MAP_SHARED, bufmgr_gem->fd,
mmap_arg.offset);
if (bo_gem->virtual == MAP_FAILED) {
bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
MAP_SHARED, bufmgr_gem->fd,
mmap_arg.offset);
if (bo_gem->gtt_virtual == MAP_FAILED) {
fprintf(stderr,
"%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__,
@ -665,10 +719,10 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
}
}
bo->virtual = bo_gem->virtual;
bo->virtual = bo_gem->gtt_virtual;
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
bo_gem->virtual);
bo_gem->gtt_virtual);
/* Now move it to the GTT domain so that the CPU caches are flushed */
set_domain.handle = bo_gem->gem_handle;
@ -680,7 +734,7 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
fprintf (stderr, "%s:%d: Error setting domain %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
}
@ -690,6 +744,26 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
#endif
}
int
drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_i915_gem_sw_finish sw_finish;
int ret = 0;
if (bo == NULL)
return 0;
assert(bo_gem->gtt_virtual != NULL);
pthread_mutex_lock(&bufmgr_gem->lock);
bo->virtual = NULL;
pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
static int
drm_intel_gem_bo_unmap(drm_intel_bo *bo)
{
@ -701,7 +775,7 @@ drm_intel_gem_bo_unmap(drm_intel_bo *bo)
if (bo == NULL)
return 0;
assert(bo_gem->virtual != NULL);
assert(bo_gem->mem_virtual != NULL);
pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->swrast) {
@ -712,6 +786,7 @@ drm_intel_gem_bo_unmap(drm_intel_bo *bo)
} while (ret == -1 && errno == EINTR);
bo_gem->swrast = 0;
}
bo->virtual = NULL;
pthread_mutex_unlock(&bufmgr_gem->lock);
return 0;
}
@ -820,10 +895,9 @@ drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
drm_intel_bo_gem *bo_gem;
while ((bo_gem = bucket->head) != NULL) {
bucket->head = bo_gem->next;
if (bo_gem->next == NULL)
bucket->tail = &bucket->head;
while (!DRMLISTEMPTY(&bucket->head)) {
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
DRMLISTDEL(&bo_gem->head);
bucket->num_entries--;
drm_intel_gem_bo_free(&bo_gem->bo);
@ -869,6 +943,7 @@ drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
*/
assert(!bo_gem->used_as_reloc_target);
bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
/* Flag the target to disallow further relocations in it. */
target_bo_gem->used_as_reloc_target = 1;
@ -1005,11 +1080,12 @@ drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
struct drm_i915_gem_pin pin;
int ret;
memset(&pin, 0, sizeof(pin));
pin.handle = bo_gem->gem_handle;
pin.alignment = alignment;
do {
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
} while (ret == -1 && errno == EINTR);
if (ret != 0)
@ -1027,6 +1103,7 @@ drm_intel_gem_bo_unpin(drm_intel_bo *bo)
struct drm_i915_gem_unpin unpin;
int ret;
memset(&unpin, 0, sizeof(unpin));
unpin.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
@ -1048,6 +1125,11 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
return 0;
/* If we're going from non-tiling to tiling, bump fence count */
if (bo_gem->tiling_mode == I915_TILING_NONE)
bo_gem->reloc_tree_fences++;
memset(&set_tiling, 0, sizeof(set_tiling));
set_tiling.handle = bo_gem->gem_handle;
set_tiling.tiling_mode = *tiling_mode;
set_tiling.stride = stride;
@ -1060,6 +1142,10 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
bo_gem->tiling_mode = set_tiling.tiling_mode;
bo_gem->swizzle_mode = set_tiling.swizzle_mode;
/* If we're going from tiling to non-tiling, drop fence count */
if (bo_gem->tiling_mode == I915_TILING_NONE)
bo_gem->reloc_tree_fences--;
*tiling_mode = bo_gem->tiling_mode;
return 0;
}
@ -1084,6 +1170,7 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t *name)
int ret;
if (!bo_gem->global_name) {
memset(&flink, 0, sizeof(flink));
flink.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
@ -1137,6 +1224,31 @@ drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
return total;
}
/**
* Count the number of buffers in this list that need a fence reg
*
* If the count is greater than the number of available regs, we'll have
* to ask the caller to resubmit a batch with fewer tiled buffers.
*
* This function over-counts if the same buffer is used multiple times.
*/
static unsigned int
drm_intel_gem_total_fences(drm_intel_bo **bo_array, int count)
{
int i;
unsigned int total = 0;
for (i = 0; i < count; i++) {
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
if (bo_gem == NULL)
continue;
total += bo_gem->reloc_tree_fences;
}
return total;
}
/**
* Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
* for the next drm_intel_bufmgr_check_aperture_space() call.
@ -1185,8 +1297,21 @@ drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
int i;
unsigned int total = 0;
for (i = 0; i < count; i++)
for (i = 0; i < count; i++) {
total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
/* For the first buffer object in the array, we get an accurate count
* back for its reloc_tree size (since nothing had been flagged as
* being counted yet). We can save that value out as a more
* conservative reloc_tree_size that avoids double-counting target
* buffers. Since the first buffer happens to usually be the batch
* buffer in our callers, this can pull us back from doing the tree
* walk on every new batch emit.
*/
if (i == 0) {
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
bo_gem->reloc_tree_size = total;
}
}
for (i = 0; i < count; i++)
drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
@ -1215,9 +1340,17 @@ drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo_array[0]->bufmgr;
unsigned int total = 0;
unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
int total_fences;
/* Check for fence reg constraints if necessary */
if (bufmgr_gem->available_fences) {
total_fences = drm_intel_gem_total_fences(bo_array, count);
if (total_fences > bufmgr_gem->available_fences)
return -1;
}
total = drm_intel_gem_estimate_batch_space(bo_array, count);
if (total > threshold)
total = drm_intel_gem_compute_batch_space(bo_array, count);
@ -1243,6 +1376,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
{
drm_intel_bufmgr_gem *bufmgr_gem;
struct drm_i915_gem_get_aperture aperture;
drm_i915_getparam_t gp;
int ret, i;
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
@ -1266,6 +1400,25 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
(int)bufmgr_gem->gtt_size / 1024);
}
gp.param = I915_PARAM_CHIPSET_ID;
gp.value = &bufmgr_gem->pci_device;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret) {
fprintf(stderr, "get chip id failed: %d\n", ret);
fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
}
if (!IS_I965G(bufmgr_gem)) {
gp.param = I915_PARAM_NUM_FENCES_AVAIL;
gp.value = &bufmgr_gem->available_fences;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret) {
fprintf(stderr, "get fences failed: %d\n", ret);
fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
bufmgr_gem->available_fences = 0;
}
}
/* Let's go with one relocation per every 2 dwords (but round down a bit
* since a power of two will mean an extra page allocation for the reloc
* buffer).
@ -1275,6 +1428,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
bufmgr_gem->bufmgr.bo_alloc_for_render = drm_intel_gem_bo_alloc_for_render;
bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
@ -1294,7 +1448,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space;
/* Initialize the linked lists for BO reuse cache. */
for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++)
bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
return &bufmgr_gem->bufmgr;
}

View File

@ -51,6 +51,16 @@ struct _drm_intel_bufmgr {
drm_intel_bo *(*bo_alloc)(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
/**
* Allocate a buffer object, hinting that it will be used as a render target.
*
* This is otherwise the same as bo_alloc.
*/
drm_intel_bo *(*bo_alloc_for_render)(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
/** Takes a reference on a buffer object */
void (*bo_reference)(drm_intel_bo *bo);

View File

@ -0,0 +1,71 @@
/*
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _INTEL_CHIPSET_H
#define _INTEL_CHIPSET_H
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
#define IS_I855(dev) ((dev)->pci_device == 0x3582)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
(dev)->pci_device == 0x27AE)
#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
(dev)->pci_device == 0x2982 || \
(dev)->pci_device == 0x2992 || \
(dev)->pci_device == 0x29A2 || \
(dev)->pci_device == 0x2A02 || \
(dev)->pci_device == 0x2A12 || \
(dev)->pci_device == 0x2A42 || \
(dev)->pci_device == 0x2E02 || \
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22)
#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22)
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
(dev)->pci_device == 0x29B2 || \
(dev)->pci_device == 0x29D2)
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
#endif /* _INTEL_CHIPSET_H */

View File

@ -1,2 +1,2 @@
major=1
minor=0
minor=1

View File

@ -29,6 +29,8 @@
* list handling. No list looping yet.
*/
#include <stddef.h>
typedef struct _drmMMListHead
{
struct _drmMMListHead *prev;

View File

@ -326,28 +326,28 @@ typedef struct _drmSetVersion {
#elif defined(__alpha__)
#define DRM_CAS(lock, old, new, ret) \
do { \
int old32; \
int cur32; \
__asm__ __volatile__( \
" mb\n" \
" zap %4, 0xF0, %0\n" \
" ldl_l %1, %2\n" \
" zap %1, 0xF0, %1\n" \
" cmpeq %0, %1, %1\n" \
" beq %1, 1f\n" \
" bis %5, %5, %1\n" \
" stl_c %1, %2\n" \
"1: xor %1, 1, %1\n" \
" stl %1, %3" \
: "=r" (old32), \
"=&r" (cur32), \
"=m" (__drm_dummy_lock(lock)),\
"=m" (ret) \
: "r" (old), \
"r" (new)); \
} while(0)
#define DRM_CAS(lock, old, new, ret) \
do { \
int tmp, old32; \
__asm__ __volatile__( \
" addl $31, %5, %3\n" \
"1: ldl_l %0, %2\n" \
" cmpeq %0, %3, %1\n" \
" beq %1, 2f\n" \
" mov %4, %0\n" \
" stl_c %0, %2\n" \
" beq %0, 3f\n" \
" mb\n" \
"2: cmpeq %1, 0, %1\n" \
".subsection 2\n" \
"3: br 1b\n" \
".previous" \
: "=&r"(tmp), "=&r"(ret), \
"=m"(__drm_dummy_lock(lock)), \
"=&r"(old32) \
: "r"(new), "r"(old) \
: "memory"); \
} while (0)
#elif defined(__sparc__)
@ -430,7 +430,9 @@ do { register unsigned int __old __asm("o0"); \
#define DRM_CAS(lock,old,new,ret) do { ret=1; } while (0) /* FAST LOCK FAILS */
#endif
#if defined(__alpha__) || defined(__powerpc__)
#if defined(__alpha__)
#define DRM_CAS_RESULT(_result) long _result
#elif defined(__powerpc__)
#define DRM_CAS_RESULT(_result) int _result
#else
#define DRM_CAS_RESULT(_result) char _result

View File

@ -76,7 +76,7 @@ void* drmAllocCpy(void *array, int count, int entry_size)
* A couple of free functions.
*/
void drmModeFreeModeInfo(struct drm_mode_modeinfo *ptr)
void drmModeFreeModeInfo(drmModeModeInfoPtr ptr)
{
if (!ptr)
return;
@ -273,7 +273,7 @@ drmModeCrtcPtr drmModeGetCrtc(int fd, uint32_t crtcId)
int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId,
uint32_t x, uint32_t y, uint32_t *connectors, int count,
struct drm_mode_modeinfo *mode)
drmModeModeInfoPtr mode)
{
struct drm_mode_crtc crtc;
@ -395,7 +395,8 @@ drmModeConnectorPtr drmModeGetConnector(int fd, uint32_t connector_id)
r->connection = conn.connection;
r->mmWidth = conn.mm_width;
r->mmHeight = conn.mm_height;
r->subpixel = conn.subpixel;
/* convert subpixel from kernel to userspace */
r->subpixel = conn.subpixel + 1;
r->count_modes = conn.count_modes;
/* TODO we should test if these alloc & cpy fails. */
r->count_props = conn.count_props;
@ -419,7 +420,7 @@ err_allocs:
return r;
}
int drmModeAttachMode(int fd, uint32_t connector_id, struct drm_mode_modeinfo *mode_info)
int drmModeAttachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_info)
{
struct drm_mode_mode_cmd res;
@ -429,7 +430,7 @@ int drmModeAttachMode(int fd, uint32_t connector_id, struct drm_mode_modeinfo *m
return drmIoctl(fd, DRM_IOCTL_MODE_ATTACHMODE, &res);
}
int drmModeDetachMode(int fd, uint32_t connector_id, struct drm_mode_modeinfo *mode_info)
int drmModeDetachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_info)
{
struct drm_mode_mode_cmd res;

View File

@ -52,6 +52,99 @@
* buffer object interface. This object needs to be pinned.
*/
/*
* If we pickup an old version of drm.h which doesn't include drm_mode.h
* we should redefine defines. This is so that builds doesn't breaks with
* new libdrm on old kernels.
*/
#ifndef _DRM_MODE_H
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
#define DRM_MODE_TYPE_BUILTIN (1<<0)
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_PREFERRED (1<<3)
#define DRM_MODE_TYPE_DEFAULT (1<<4)
#define DRM_MODE_TYPE_USERDEF (1<<5)
#define DRM_MODE_TYPE_DRIVER (1<<6)
/* Video mode flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2)
#define DRM_MODE_FLAG_NVSYNC (1<<3)
#define DRM_MODE_FLAG_INTERLACE (1<<4)
#define DRM_MODE_FLAG_DBLSCAN (1<<5)
#define DRM_MODE_FLAG_CSYNC (1<<6)
#define DRM_MODE_FLAG_PCSYNC (1<<7)
#define DRM_MODE_FLAG_NCSYNC (1<<8)
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
#define DRM_MODE_FLAG_BCAST (1<<10)
#define DRM_MODE_FLAG_PIXMUX (1<<11)
#define DRM_MODE_FLAG_DBLCLK (1<<12)
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
/* DPMS flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_DPMS_ON 0
#define DRM_MODE_DPMS_STANDBY 1
#define DRM_MODE_DPMS_SUSPEND 2
#define DRM_MODE_DPMS_OFF 3
/* Scaling mode options */
#define DRM_MODE_SCALE_NON_GPU 0
#define DRM_MODE_SCALE_FULLSCREEN 1
#define DRM_MODE_SCALE_NO_SCALE 2
#define DRM_MODE_SCALE_ASPECT 3
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
#define DRM_MODE_ENCODER_NONE 0
#define DRM_MODE_ENCODER_DAC 1
#define DRM_MODE_ENCODER_TMDS 2
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
#define DRM_MODE_SUBCONNECTOR_Automatic 0
#define DRM_MODE_SUBCONNECTOR_Unknown 0
#define DRM_MODE_SUBCONNECTOR_DVID 3
#define DRM_MODE_SUBCONNECTOR_DVIA 4
#define DRM_MODE_SUBCONNECTOR_Composite 5
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
#define DRM_MODE_SUBCONNECTOR_Component 8
#define DRM_MODE_CONNECTOR_Unknown 0
#define DRM_MODE_CONNECTOR_VGA 1
#define DRM_MODE_CONNECTOR_DVII 2
#define DRM_MODE_CONNECTOR_DVID 3
#define DRM_MODE_CONNECTOR_DVIA 4
#define DRM_MODE_CONNECTOR_Composite 5
#define DRM_MODE_CONNECTOR_SVIDEO 6
#define DRM_MODE_CONNECTOR_LVDS 7
#define DRM_MODE_CONNECTOR_Component 8
#define DRM_MODE_CONNECTOR_9PinDIN 9
#define DRM_MODE_CONNECTOR_DisplayPort 10
#define DRM_MODE_CONNECTOR_HDMIA 11
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_PROP_PENDING (1<<0)
#define DRM_MODE_PROP_RANGE (1<<1)
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
#define DRM_MODE_CURSOR_BO (1<<0)
#define DRM_MODE_CURSOR_MOVE (1<<1)
#endif /* _DRM_MODE_H */
typedef struct _drmModeRes {
int count_fbs;
@ -70,7 +163,27 @@ typedef struct _drmModeRes {
uint32_t min_height, max_height;
} drmModeRes, *drmModeResPtr;
typedef struct drm_mode_fb_cmd drmModeFB, *drmModeFBPtr;
typedef struct _drmModeModeInfo {
uint32_t clock;
uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew;
uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan;
uint32_t vrefresh; /* vertical refresh * 1000 */
uint32_t flags;
uint32_t type;
char name[DRM_DISPLAY_MODE_LEN];
} drmModeModeInfo, *drmModeModeInfoPtr;
typedef struct _drmModeFB {
uint32_t fb_id;
uint32_t width, height;
uint32_t pitch;
uint32_t bpp;
uint32_t depth;
/* driver specific handle */
uint32_t handle;
} drmModeFB, *drmModeFBPtr;
typedef struct _drmModePropertyBlob {
uint32_t id;
@ -97,7 +210,7 @@ typedef struct _drmModeCrtc {
uint32_t x, y; /**< Position on the framebuffer */
uint32_t width, height;
int mode_valid;
struct drm_mode_modeinfo mode;
drmModeModeInfo mode;
int gamma_size; /**< Number of gamma stops */
@ -136,7 +249,7 @@ typedef struct _drmModeConnector {
drmModeSubPixel subpixel;
int count_modes;
struct drm_mode_modeinfo *modes;
drmModeModeInfoPtr modes;
int count_props;
uint32_t *props; /**< List of property ids */
@ -148,7 +261,7 @@ typedef struct _drmModeConnector {
extern void drmModeFreeModeInfo( struct drm_mode_modeinfo *ptr );
extern void drmModeFreeModeInfo( drmModeModeInfoPtr ptr );
extern void drmModeFreeResources( drmModeResPtr ptr );
extern void drmModeFreeFB( drmModeFBPtr ptr );
extern void drmModeFreeCrtc( drmModeCrtcPtr ptr );
@ -194,7 +307,7 @@ extern drmModeCrtcPtr drmModeGetCrtc(int fd, uint32_t crtcId);
*/
int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId,
uint32_t x, uint32_t y, uint32_t *connectors, int count,
struct drm_mode_modeinfo *mode);
drmModeModeInfoPtr mode);
/*
* Cursor functions
@ -228,13 +341,13 @@ extern drmModeConnectorPtr drmModeGetConnector(int fd,
/**
* Attaches the given mode to an connector.
*/
extern int drmModeAttachMode(int fd, uint32_t connectorId, struct drm_mode_modeinfo *mode_info);
extern int drmModeAttachMode(int fd, uint32_t connectorId, drmModeModeInfoPtr mode_info);
/**
* Detaches a mode from the connector
* must be unused, by the given mode.
*/
extern int drmModeDetachMode(int fd, uint32_t connectorId, struct drm_mode_modeinfo *mode_info);
extern int drmModeDetachMode(int fd, uint32_t connectorId, drmModeModeInfoPtr mode_info);
extern drmModePropertyPtr drmModeGetProperty(int fd, uint32_t propertyId);
extern void drmModeFreeProperty(drmModePropertyPtr ptr);