Go to the source code of this file.
Functions | |
static void | intel_realloc_relocs (struct intel_be_batchbuffer *batch, int num_relocs) |
void | intel_be_batchbuffer_reset (struct intel_be_batchbuffer *batch) |
struct intel_be_batchbuffer * | intel_be_batchbuffer_alloc (struct intel_be_context *intel) |
void | intel_be_batchbuffer_free (struct intel_be_batchbuffer *batch) |
void | intel_be_offset_relocation (struct intel_be_batchbuffer *batch, unsigned pre_add, struct _DriBufferObject *driBO, uint64_t val_flags, uint64_t val_mask) |
static void | i915_drm_copy_reply (const struct drm_bo_info_rep *rep, drmBO *buf) |
static int | i915_execbuf (struct intel_be_batchbuffer *batch, unsigned int used, boolean ignore_cliprects, drmBOList *list, struct drm_i915_execbuffer *ea) |
static struct _DriFenceObject * | do_flush_locked (struct intel_be_batchbuffer *batch, unsigned int used, boolean ignore_cliprects, boolean allow_unlock) |
struct _DriFenceObject * | intel_be_batchbuffer_flush (struct intel_be_batchbuffer *batch) |
void | intel_be_batchbuffer_finish (struct intel_be_batchbuffer *batch) |
static struct _DriFenceObject* do_flush_locked | ( | struct intel_be_batchbuffer * | batch, | |
unsigned int | used, | |||
boolean | ignore_cliprects, | |||
boolean | allow_unlock | |||
) | [static, read] |
Definition at line 274 of file intel_be_batchbuffer.c.
References intel_be_batchbuffer::device, driBOFenceUserList(), driBOValidateUserList(), driFenceReference(), driFenceType(), driFenceUnReference(), driGetdrmBOList(), driPutdrmBOList(), _DriFenceObject::fence_class, intel_be_device::fenceMgr, i915_execbuf(), intel_be_batchbuffer::intel, intel_be_batchbuffer::last_fence, and intel_be_batchbuffer::list.
00277 { 00278 struct intel_be_context *intel = batch->intel; 00279 struct _DriFenceObject *fo; 00280 drmFence fence; 00281 drmBOList *boList; 00282 struct drm_i915_execbuffer ea; 00283 int ret = 0; 00284 00285 driBOValidateUserList(batch->list); 00286 boList = driGetdrmBOList(batch->list); 00287 00288 #if 0 /* ZZZ JB Allways run */ 00289 if (!(intel->numClipRects == 0 && !ignore_cliprects)) { 00290 #else 00291 if (1) { 00292 #endif 00293 ret = i915_execbuf(batch, used, ignore_cliprects, boList, &ea); 00294 } else { 00295 driPutdrmBOList(batch->list); 00296 fo = NULL; 00297 goto out; 00298 } 00299 driPutdrmBOList(batch->list); 00300 if (ret) 00301 abort(); 00302 00303 if (ea.fence_arg.error != 0) { 00304 00305 /* 00306 * The hardware has been idled by the kernel. 00307 * Don't fence the driBOs. 00308 */ 00309 00310 if (batch->last_fence) 00311 driFenceUnReference(&batch->last_fence); 00312 #if 0 /* ZZZ JB: no _mesa_* funcs in gallium */ 00313 _mesa_printf("fence error\n"); 00314 #endif 00315 batch->last_fence = NULL; 00316 fo = NULL; 00317 goto out; 00318 } 00319 00320 fence.handle = ea.fence_arg.handle; 00321 fence.fence_class = ea.fence_arg.fence_class; 00322 fence.type = ea.fence_arg.type; 00323 fence.flags = ea.fence_arg.flags; 00324 fence.signaled = ea.fence_arg.signaled; 00325 00326 fo = driBOFenceUserList(batch->device->fenceMgr, batch->list, 00327 "SuperFence", &fence); 00328 00329 if (driFenceType(fo) & DRM_I915_FENCE_TYPE_RW) { 00330 if (batch->last_fence) 00331 driFenceUnReference(&batch->last_fence); 00332 /* 00333 * FIXME: Context last fence?? 00334 */ 00335 batch->last_fence = fo; 00336 driFenceReference(fo); 00337 } 00338 out: 00339 #if 0 /* ZZZ JB: fix this */ 00340 intel->vtbl.lost_hardware(intel); 00341 #else 00342 (void)intel; 00343 #endif 00344 return fo; 00345 }
static void i915_drm_copy_reply | ( | const struct drm_bo_info_rep * | rep, | |
drmBO * | buf | |||
) | [static] |
Definition at line 170 of file intel_be_batchbuffer.c.
00171 { 00172 buf->handle = rep->handle; 00173 buf->flags = rep->flags; 00174 buf->size = rep->size; 00175 buf->offset = rep->offset; 00176 buf->mapHandle = rep->arg_handle; 00177 buf->proposedFlags = rep->proposed_flags; 00178 buf->start = rep->buffer_start; 00179 buf->fenceFlags = rep->fence_flags; 00180 buf->replyFlags = rep->rep_flags; 00181 buf->pageAlignment = rep->page_alignment; 00182 }
static int i915_execbuf | ( | struct intel_be_batchbuffer * | batch, | |
unsigned int | used, | |||
boolean | ignore_cliprects, | |||
drmBOList * | list, | |||
struct drm_i915_execbuffer * | ea | |||
) | [static] |
Definition at line 185 of file intel_be_batchbuffer.c.
References _drmBONode::arg0, _drmBONode::arg1, _drmBONode::bo_arg, _drmBONode::buf, intel_be_batchbuffer::device, intel_be_device::fd, _drmBOList::list, intel_be_batchbuffer::nr_relocs, intel_be_batchbuffer::poolOffset, and intel_be_batchbuffer::reloc.
00190 { 00191 // struct intel_be_context *intel = batch->intel; 00192 drmBONode *node; 00193 drmMMListHead *l; 00194 struct drm_i915_op_arg *arg, *first; 00195 struct drm_bo_op_req *req; 00196 struct drm_bo_info_rep *rep; 00197 uint64_t *prevNext = NULL; 00198 drmBO *buf; 00199 int ret = 0; 00200 uint32_t count = 0; 00201 00202 first = NULL; 00203 for (l = list->list.next; l != &list->list; l = l->next) { 00204 node = DRMLISTENTRY(drmBONode, l, head); 00205 00206 arg = &node->bo_arg; 00207 req = &arg->d.req; 00208 00209 if (!first) 00210 first = arg; 00211 00212 if (prevNext) 00213 *prevNext = (unsigned long)arg; 00214 00215 prevNext = &arg->next; 00216 req->bo_req.handle = node->buf->handle; 00217 req->op = drm_bo_validate; 00218 req->bo_req.flags = node->arg0; 00219 req->bo_req.mask = node->arg1; 00220 req->bo_req.hint |= 0; 00221 count++; 00222 } 00223 00224 memset(ea, 0, sizeof(*ea)); 00225 ea->num_buffers = count; 00226 ea->batch.start = batch->poolOffset; 00227 ea->batch.used = used; 00228 #if 0 /* ZZZ JB: no cliprects used */ 00229 ea->batch.cliprects = intel->pClipRects; 00230 ea->batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects; 00231 ea->batch.DR1 = 0; 00232 ea->batch.DR4 = 0;((((GLuint) intel->drawX) & 0xffff) | 00233 (((GLuint) intel->drawY) << 16)); 00234 #else 00235 ea->batch.cliprects = NULL; 00236 ea->batch.num_cliprects = 0; 00237 ea->batch.DR1 = 0; 00238 ea->batch.DR4 = 0; 00239 #endif 00240 ea->fence_arg.flags = DRM_I915_FENCE_FLAG_FLUSHED; 00241 ea->ops_list = (unsigned long) first; 00242 first->reloc_ptr = (unsigned long) batch->reloc; 00243 batch->reloc[0] = batch->nr_relocs; 00244 00245 //return -EFAULT; 00246 do { 00247 ret = drmCommandWriteRead(batch->device->fd, DRM_I915_EXECBUFFER, ea, 00248 sizeof(*ea)); 00249 } while (ret == -EAGAIN); 00250 00251 if (ret != 0) 00252 return ret; 00253 00254 for (l = list->list.next; l != &list->list; l = l->next) { 00255 node = DRMLISTENTRY(drmBONode, l, head); 00256 arg = &node->bo_arg; 00257 rep = &arg->d.rep.bo_info; 00258 00259 if (!arg->handled) { 00260 return -EFAULT; 00261 } 00262 if (arg->d.rep.ret) 00263 return arg->d.rep.ret; 00264 00265 buf = node->buf; 00266 i915_drm_copy_reply(rep, buf); 00267 } 00268 return 0; 00269 }
struct intel_be_batchbuffer* intel_be_batchbuffer_alloc | ( | struct intel_be_context * | intel | ) | [read] |
Definition at line 87 of file intel_be_batchbuffer.c.
References intel_be_device::batchPool, intel_be_batchbuffer::buffer, intel_be_context::device, intel_be_batchbuffer::device, driBOCreateList(), driGenBuffers(), intel_be_batchbuffer::intel, intel_be_batchbuffer_reset(), intel_be_batchbuffer::last_fence, intel_be_batchbuffer::list, and intel_be_batchbuffer::reloc.
00088 { 00089 struct intel_be_batchbuffer *batch = calloc(sizeof(*batch), 1); 00090 00091 batch->intel = intel; 00092 batch->device = intel->device; 00093 00094 driGenBuffers(intel->device->batchPool, "batchbuffer", 1, 00095 &batch->buffer, 4096, 00096 DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0); 00097 batch->last_fence = NULL; 00098 batch->list = driBOCreateList(20); 00099 batch->reloc = NULL; 00100 intel_be_batchbuffer_reset(batch); 00101 return batch; 00102 }
void intel_be_batchbuffer_finish | ( | struct intel_be_batchbuffer * | batch | ) |
Definition at line 412 of file intel_be_batchbuffer.c.
References driFenceFinish(), driFenceType(), driFenceUnReference(), FALSE, and intel_be_batchbuffer_flush().
00413 { 00414 struct _DriFenceObject *fence = intel_be_batchbuffer_flush(batch); 00415 driFenceFinish(fence, driFenceType(fence), FALSE); 00416 driFenceUnReference(&fence); 00417 }
struct _DriFenceObject* intel_be_batchbuffer_flush | ( | struct intel_be_batchbuffer * | batch | ) | [read] |
Definition at line 349 of file intel_be_batchbuffer.c.
References intel_be_batchbuffer::base, intel_be_batchbuffer::buffer, do_flush_locked(), driBOUnmap(), driFenceReference(), FALSE, intel_be_batchbuffer::flags, intel_be_context::hardware_lock, intel_be_context::hardware_locked, intel_be_context::hardware_unlock, intel_be_batchbuffer::intel, INTEL_BATCH_CLIPRECTS, intel_be_batchbuffer_reset(), intel_be_batchbuffer::last_fence, i915_batchbuffer::map, MI_BATCH_BUFFER_END, and i915_batchbuffer::ptr.
00350 { 00351 struct intel_be_context *intel = batch->intel; 00352 unsigned int used = batch->base.ptr - batch->base.map; 00353 boolean was_locked = batch->intel->hardware_locked(intel); 00354 struct _DriFenceObject *fence; 00355 00356 if (used == 0) { 00357 driFenceReference(batch->last_fence); 00358 return batch->last_fence; 00359 } 00360 00361 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a 00362 * performance drain that we would like to avoid. 00363 */ 00364 #if 0 /* ZZZ JB: what should we do here? */ 00365 if (used & 4) { 00366 ((int *) batch->base.ptr)[0] = intel->vtbl.flush_cmd(); 00367 ((int *) batch->base.ptr)[1] = 0; 00368 ((int *) batch->base.ptr)[2] = MI_BATCH_BUFFER_END; 00369 used += 12; 00370 } 00371 else { 00372 ((int *) batch->base.ptr)[0] = intel->vtbl.flush_cmd(); 00373 ((int *) batch->base.ptr)[1] = MI_BATCH_BUFFER_END; 00374 used += 8; 00375 } 00376 #else 00377 if (used & 4) { 00378 ((int *) batch->base.ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH; 00379 ((int *) batch->base.ptr)[1] = 0; 00380 ((int *) batch->base.ptr)[2] = (0xA<<23); // MI_BATCH_BUFFER_END; 00381 used += 12; 00382 } 00383 else { 00384 ((int *) batch->base.ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH; 00385 ((int *) batch->base.ptr)[1] = (0xA<<23); // MI_BATCH_BUFFER_END; 00386 used += 8; 00387 } 00388 #endif 00389 driBOUnmap(batch->buffer); 00390 batch->base.ptr = NULL; 00391 batch->base.map = NULL; 00392 00393 /* TODO: Just pass the relocation list and dma buffer up to the 00394 * kernel. 00395 */ 00396 if (!was_locked) 00397 intel->hardware_lock(intel); 00398 00399 fence = do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS), 00400 FALSE); 00401 00402 if (!was_locked) 00403 intel->hardware_unlock(intel); 00404 00405 /* Reset the buffer: 00406 */ 00407 intel_be_batchbuffer_reset(batch); 00408 return fence; 00409 }
void intel_be_batchbuffer_free | ( | struct intel_be_batchbuffer * | batch | ) |
Definition at line 105 of file intel_be_batchbuffer.c.
References intel_be_batchbuffer::base, intel_be_batchbuffer::buffer, driBOFreeList(), driBOUnmap(), driBOUnReference(), driFenceFinish(), driFenceUnReference(), FALSE, intel_be_batchbuffer::last_fence, intel_be_batchbuffer::list, i915_batchbuffer::map, and intel_be_batchbuffer::reloc.
00106 { 00107 if (batch->last_fence) { 00108 driFenceFinish(batch->last_fence, 00109 DRM_FENCE_TYPE_EXE, FALSE); 00110 driFenceUnReference(&batch->last_fence); 00111 } 00112 if (batch->base.map) { 00113 driBOUnmap(batch->buffer); 00114 batch->base.map = NULL; 00115 } 00116 driBOUnReference(batch->buffer); 00117 driBOFreeList(batch->list); 00118 if (batch->reloc) 00119 free(batch->reloc); 00120 batch->buffer = NULL; 00121 free(batch); 00122 }
void intel_be_batchbuffer_reset | ( | struct intel_be_batchbuffer * | batch | ) |
Definition at line 21 of file intel_be_batchbuffer.c.
References i915_batchbuffer::actual_size, assert, intel_be_batchbuffer::base, BATCH_RESERVED, _drmBONode::bo_arg, intel_be_batchbuffer::buffer, intel_be_batchbuffer::dest_location, intel_be_batchbuffer::device, intel_be_batchbuffer::dirty_state, driBOAddListItem(), driBOData(), driBOKernel(), driBOMap(), driBOPoolOffset(), driBOResetList(), driBOUnrefUserList(), driReadLockKernelBO(), driReadUnlockKernelBO(), intel_be_batchbuffer::drmBOVirtual, intel_be_batchbuffer::flags, intel_be_batchbuffer::id, INTEL_DEFAULT_RELOCS, INTEL_MAX_RELOCS, intel_realloc_relocs(), intel_be_batchbuffer::list, i915_batchbuffer::map, intel_be_device::max_batch_size, intel_be_batchbuffer::node, intel_be_batchbuffer::nr_relocs, intel_be_batchbuffer::poolOffset, i915_batchbuffer::ptr, intel_be_batchbuffer::reloc, intel_be_batchbuffer::reloc_size, and i915_batchbuffer::size.
00022 { 00023 /* 00024 * Get a new, free batchbuffer. 00025 */ 00026 drmBO *bo; 00027 struct drm_bo_info_req *req; 00028 00029 driBOUnrefUserList(batch->list); 00030 driBOResetList(batch->list); 00031 00032 /* base.size is the size available to the i915simple driver */ 00033 batch->base.size = batch->device->max_batch_size - BATCH_RESERVED; 00034 batch->base.actual_size = batch->device->max_batch_size; 00035 driBOData(batch->buffer, batch->base.actual_size, NULL, NULL, 0); 00036 00037 /* 00038 * Add the batchbuffer to the validate list. 00039 */ 00040 00041 driBOAddListItem(batch->list, batch->buffer, 00042 DRM_BO_FLAG_EXE | DRM_BO_FLAG_MEM_TT, 00043 DRM_BO_FLAG_EXE | DRM_BO_MASK_MEM, 00044 &batch->dest_location, &batch->node); 00045 00046 req = &batch->node->bo_arg.d.req.bo_req; 00047 00048 /* 00049 * Set up information needed for us to make relocations 00050 * relative to the underlying drm buffer objects. 00051 */ 00052 00053 driReadLockKernelBO(); 00054 bo = driBOKernel(batch->buffer); 00055 req->presumed_offset = (uint64_t) bo->offset; 00056 req->hint = DRM_BO_HINT_PRESUMED_OFFSET; 00057 batch->drmBOVirtual = (uint8_t *) bo->virtual; 00058 driReadUnlockKernelBO(); 00059 00060 /* 00061 * Adjust the relocation buffer size. 00062 */ 00063 00064 if (batch->reloc_size > INTEL_MAX_RELOCS || 00065 batch->reloc == NULL) 00066 intel_realloc_relocs(batch, INTEL_DEFAULT_RELOCS); 00067 00068 assert(batch->reloc != NULL); 00069 batch->reloc[0] = 0; /* No relocs yet. */ 00070 batch->reloc[1] = 1; /* Reloc type 1 */ 00071 batch->reloc[2] = 0; /* Only a single relocation list. */ 00072 batch->reloc[3] = 0; /* Only a single relocation list. */ 00073 00074 batch->base.map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0); 00075 batch->poolOffset = driBOPoolOffset(batch->buffer); 00076 batch->base.ptr = batch->base.map; 00077 batch->dirty_state = ~0; 00078 batch->nr_relocs = 0; 00079 batch->flags = 0; 00080 batch->id = 0;//batch->intel->intelScreen->batch_id++; 00081 }
void intel_be_offset_relocation | ( | struct intel_be_batchbuffer * | batch, | |
unsigned | pre_add, | |||
struct _DriBufferObject * | driBO, | |||
uint64_t | val_flags, | |||
uint64_t | val_mask | |||
) |
Definition at line 125 of file intel_be_batchbuffer.c.
References intel_be_batchbuffer::base, _drmBONode::bo_arg, intel_be_batchbuffer::dest_location, driBOAddListItem(), driBOKernel(), driBOPoolOffset(), driReadLockKernelBO(), driReadUnlockKernelBO(), intel_be_batchbuffer::drmBOVirtual, i915_batchbuffer_dword(), intel_realloc_relocs(), intel_be_batchbuffer::list, intel_be_batchbuffer::nr_relocs, i915_batchbuffer::ptr, intel_be_batchbuffer::reloc, and intel_be_batchbuffer::reloc_size.
00130 { 00131 int itemLoc; 00132 struct _drmBONode *node; 00133 uint32_t *reloc; 00134 struct drm_bo_info_req *req; 00135 00136 driBOAddListItem(batch->list, driBO, val_flags, val_mask, 00137 &itemLoc, &node); 00138 req = &node->bo_arg.d.req.bo_req; 00139 00140 if (!(req->hint & DRM_BO_HINT_PRESUMED_OFFSET)) { 00141 00142 /* 00143 * Stop other threads from tampering with the underlying 00144 * drmBO while we're reading its offset. 00145 */ 00146 00147 driReadLockKernelBO(); 00148 req->presumed_offset = (uint64_t) driBOKernel(driBO)->offset; 00149 driReadUnlockKernelBO(); 00150 req->hint = DRM_BO_HINT_PRESUMED_OFFSET; 00151 } 00152 00153 pre_add += driBOPoolOffset(driBO); 00154 00155 if (batch->nr_relocs == batch->reloc_size) 00156 intel_realloc_relocs(batch, batch->reloc_size * 2); 00157 00158 reloc = batch->reloc + 00159 (I915_RELOC_HEADER + batch->nr_relocs * I915_RELOC0_STRIDE); 00160 00161 reloc[0] = ((uint8_t *)batch->base.ptr - batch->drmBOVirtual); 00162 i915_batchbuffer_dword(&batch->base, req->presumed_offset + pre_add); 00163 reloc[1] = pre_add; 00164 reloc[2] = itemLoc; 00165 reloc[3] = batch->dest_location; 00166 batch->nr_relocs++; 00167 }
static void intel_realloc_relocs | ( | struct intel_be_batchbuffer * | batch, | |
int | num_relocs | |||
) | [static] |
Definition at line 10 of file intel_be_batchbuffer.c.
References intel_be_batchbuffer::reloc, and intel_be_batchbuffer::reloc_size.
00011 { 00012 unsigned long size = num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER; 00013 00014 size *= sizeof(uint32_t); 00015 batch->reloc = realloc(batch->reloc, size); 00016 batch->reloc_size = num_relocs; 00017 }