00001
00002 #include "intel_be_batchbuffer.h"
00003 #include "intel_be_context.h"
00004 #include "intel_be_device.h"
00005 #include <errno.h>
00006
00007 #include "xf86drm.h"
00008
00009 static void
00010 intel_realloc_relocs(struct intel_be_batchbuffer *batch, int num_relocs)
00011 {
00012 unsigned long size = num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER;
00013
00014 size *= sizeof(uint32_t);
00015 batch->reloc = realloc(batch->reloc, size);
00016 batch->reloc_size = num_relocs;
00017 }
00018
00019
00020 void
00021 intel_be_batchbuffer_reset(struct intel_be_batchbuffer *batch)
00022 {
00023
00024
00025
00026 drmBO *bo;
00027 struct drm_bo_info_req *req;
00028
00029 driBOUnrefUserList(batch->list);
00030 driBOResetList(batch->list);
00031
00032
00033 batch->base.size = batch->device->max_batch_size - BATCH_RESERVED;
00034 batch->base.actual_size = batch->device->max_batch_size;
00035 driBOData(batch->buffer, batch->base.actual_size, NULL, NULL, 0);
00036
00037
00038
00039
00040
00041 driBOAddListItem(batch->list, batch->buffer,
00042 DRM_BO_FLAG_EXE | DRM_BO_FLAG_MEM_TT,
00043 DRM_BO_FLAG_EXE | DRM_BO_MASK_MEM,
00044 &batch->dest_location, &batch->node);
00045
00046 req = &batch->node->bo_arg.d.req.bo_req;
00047
00048
00049
00050
00051
00052
00053 driReadLockKernelBO();
00054 bo = driBOKernel(batch->buffer);
00055 req->presumed_offset = (uint64_t) bo->offset;
00056 req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
00057 batch->drmBOVirtual = (uint8_t *) bo->virtual;
00058 driReadUnlockKernelBO();
00059
00060
00061
00062
00063
00064 if (batch->reloc_size > INTEL_MAX_RELOCS ||
00065 batch->reloc == NULL)
00066 intel_realloc_relocs(batch, INTEL_DEFAULT_RELOCS);
00067
00068 assert(batch->reloc != NULL);
00069 batch->reloc[0] = 0;
00070 batch->reloc[1] = 1;
00071 batch->reloc[2] = 0;
00072 batch->reloc[3] = 0;
00073
00074 batch->base.map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0);
00075 batch->poolOffset = driBOPoolOffset(batch->buffer);
00076 batch->base.ptr = batch->base.map;
00077 batch->dirty_state = ~0;
00078 batch->nr_relocs = 0;
00079 batch->flags = 0;
00080 batch->id = 0;
00081 }
00082
00083
00084
00085
00086 struct intel_be_batchbuffer *
00087 intel_be_batchbuffer_alloc(struct intel_be_context *intel)
00088 {
00089 struct intel_be_batchbuffer *batch = calloc(sizeof(*batch), 1);
00090
00091 batch->intel = intel;
00092 batch->device = intel->device;
00093
00094 driGenBuffers(intel->device->batchPool, "batchbuffer", 1,
00095 &batch->buffer, 4096,
00096 DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0);
00097 batch->last_fence = NULL;
00098 batch->list = driBOCreateList(20);
00099 batch->reloc = NULL;
00100 intel_be_batchbuffer_reset(batch);
00101 return batch;
00102 }
00103
00104 void
00105 intel_be_batchbuffer_free(struct intel_be_batchbuffer *batch)
00106 {
00107 if (batch->last_fence) {
00108 driFenceFinish(batch->last_fence,
00109 DRM_FENCE_TYPE_EXE, FALSE);
00110 driFenceUnReference(&batch->last_fence);
00111 }
00112 if (batch->base.map) {
00113 driBOUnmap(batch->buffer);
00114 batch->base.map = NULL;
00115 }
00116 driBOUnReference(batch->buffer);
00117 driBOFreeList(batch->list);
00118 if (batch->reloc)
00119 free(batch->reloc);
00120 batch->buffer = NULL;
00121 free(batch);
00122 }
00123
00124 void
00125 intel_be_offset_relocation(struct intel_be_batchbuffer *batch,
00126 unsigned pre_add,
00127 struct _DriBufferObject *driBO,
00128 uint64_t val_flags,
00129 uint64_t val_mask)
00130 {
00131 int itemLoc;
00132 struct _drmBONode *node;
00133 uint32_t *reloc;
00134 struct drm_bo_info_req *req;
00135
00136 driBOAddListItem(batch->list, driBO, val_flags, val_mask,
00137 &itemLoc, &node);
00138 req = &node->bo_arg.d.req.bo_req;
00139
00140 if (!(req->hint & DRM_BO_HINT_PRESUMED_OFFSET)) {
00141
00142
00143
00144
00145
00146
00147 driReadLockKernelBO();
00148 req->presumed_offset = (uint64_t) driBOKernel(driBO)->offset;
00149 driReadUnlockKernelBO();
00150 req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
00151 }
00152
00153 pre_add += driBOPoolOffset(driBO);
00154
00155 if (batch->nr_relocs == batch->reloc_size)
00156 intel_realloc_relocs(batch, batch->reloc_size * 2);
00157
00158 reloc = batch->reloc +
00159 (I915_RELOC_HEADER + batch->nr_relocs * I915_RELOC0_STRIDE);
00160
00161 reloc[0] = ((uint8_t *)batch->base.ptr - batch->drmBOVirtual);
00162 i915_batchbuffer_dword(&batch->base, req->presumed_offset + pre_add);
00163 reloc[1] = pre_add;
00164 reloc[2] = itemLoc;
00165 reloc[3] = batch->dest_location;
00166 batch->nr_relocs++;
00167 }
00168
00169 static void
00170 i915_drm_copy_reply(const struct drm_bo_info_rep * rep, drmBO * buf)
00171 {
00172 buf->handle = rep->handle;
00173 buf->flags = rep->flags;
00174 buf->size = rep->size;
00175 buf->offset = rep->offset;
00176 buf->mapHandle = rep->arg_handle;
00177 buf->proposedFlags = rep->proposed_flags;
00178 buf->start = rep->buffer_start;
00179 buf->fenceFlags = rep->fence_flags;
00180 buf->replyFlags = rep->rep_flags;
00181 buf->pageAlignment = rep->page_alignment;
00182 }
00183
00184 static int
00185 i915_execbuf(struct intel_be_batchbuffer *batch,
00186 unsigned int used,
00187 boolean ignore_cliprects,
00188 drmBOList *list,
00189 struct drm_i915_execbuffer *ea)
00190 {
00191
00192 drmBONode *node;
00193 drmMMListHead *l;
00194 struct drm_i915_op_arg *arg, *first;
00195 struct drm_bo_op_req *req;
00196 struct drm_bo_info_rep *rep;
00197 uint64_t *prevNext = NULL;
00198 drmBO *buf;
00199 int ret = 0;
00200 uint32_t count = 0;
00201
00202 first = NULL;
00203 for (l = list->list.next; l != &list->list; l = l->next) {
00204 node = DRMLISTENTRY(drmBONode, l, head);
00205
00206 arg = &node->bo_arg;
00207 req = &arg->d.req;
00208
00209 if (!first)
00210 first = arg;
00211
00212 if (prevNext)
00213 *prevNext = (unsigned long)arg;
00214
00215 prevNext = &arg->next;
00216 req->bo_req.handle = node->buf->handle;
00217 req->op = drm_bo_validate;
00218 req->bo_req.flags = node->arg0;
00219 req->bo_req.mask = node->arg1;
00220 req->bo_req.hint |= 0;
00221 count++;
00222 }
00223
00224 memset(ea, 0, sizeof(*ea));
00225 ea->num_buffers = count;
00226 ea->batch.start = batch->poolOffset;
00227 ea->batch.used = used;
00228 #if 0
00229 ea->batch.cliprects = intel->pClipRects;
00230 ea->batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
00231 ea->batch.DR1 = 0;
00232 ea->batch.DR4 = 0;((((GLuint) intel->drawX) & 0xffff) |
00233 (((GLuint) intel->drawY) << 16));
00234 #else
00235 ea->batch.cliprects = NULL;
00236 ea->batch.num_cliprects = 0;
00237 ea->batch.DR1 = 0;
00238 ea->batch.DR4 = 0;
00239 #endif
00240 ea->fence_arg.flags = DRM_I915_FENCE_FLAG_FLUSHED;
00241 ea->ops_list = (unsigned long) first;
00242 first->reloc_ptr = (unsigned long) batch->reloc;
00243 batch->reloc[0] = batch->nr_relocs;
00244
00245
00246 do {
00247 ret = drmCommandWriteRead(batch->device->fd, DRM_I915_EXECBUFFER, ea,
00248 sizeof(*ea));
00249 } while (ret == -EAGAIN);
00250
00251 if (ret != 0)
00252 return ret;
00253
00254 for (l = list->list.next; l != &list->list; l = l->next) {
00255 node = DRMLISTENTRY(drmBONode, l, head);
00256 arg = &node->bo_arg;
00257 rep = &arg->d.rep.bo_info;
00258
00259 if (!arg->handled) {
00260 return -EFAULT;
00261 }
00262 if (arg->d.rep.ret)
00263 return arg->d.rep.ret;
00264
00265 buf = node->buf;
00266 i915_drm_copy_reply(rep, buf);
00267 }
00268 return 0;
00269 }
00270
00271
00272
00273 static struct _DriFenceObject *
00274 do_flush_locked(struct intel_be_batchbuffer *batch,
00275 unsigned int used,
00276 boolean ignore_cliprects, boolean allow_unlock)
00277 {
00278 struct intel_be_context *intel = batch->intel;
00279 struct _DriFenceObject *fo;
00280 drmFence fence;
00281 drmBOList *boList;
00282 struct drm_i915_execbuffer ea;
00283 int ret = 0;
00284
00285 driBOValidateUserList(batch->list);
00286 boList = driGetdrmBOList(batch->list);
00287
00288 #if 0
00289 if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
00290 #else
00291 if (1) {
00292 #endif
00293 ret = i915_execbuf(batch, used, ignore_cliprects, boList, &ea);
00294 } else {
00295 driPutdrmBOList(batch->list);
00296 fo = NULL;
00297 goto out;
00298 }
00299 driPutdrmBOList(batch->list);
00300 if (ret)
00301 abort();
00302
00303 if (ea.fence_arg.error != 0) {
00304
00305
00306
00307
00308
00309
00310 if (batch->last_fence)
00311 driFenceUnReference(&batch->last_fence);
00312 #if 0
00313 _mesa_printf("fence error\n");
00314 #endif
00315 batch->last_fence = NULL;
00316 fo = NULL;
00317 goto out;
00318 }
00319
00320 fence.handle = ea.fence_arg.handle;
00321 fence.fence_class = ea.fence_arg.fence_class;
00322 fence.type = ea.fence_arg.type;
00323 fence.flags = ea.fence_arg.flags;
00324 fence.signaled = ea.fence_arg.signaled;
00325
00326 fo = driBOFenceUserList(batch->device->fenceMgr, batch->list,
00327 "SuperFence", &fence);
00328
00329 if (driFenceType(fo) & DRM_I915_FENCE_TYPE_RW) {
00330 if (batch->last_fence)
00331 driFenceUnReference(&batch->last_fence);
00332
00333
00334
00335 batch->last_fence = fo;
00336 driFenceReference(fo);
00337 }
00338 out:
00339 #if 0
00340 intel->vtbl.lost_hardware(intel);
00341 #else
00342 (void)intel;
00343 #endif
00344 return fo;
00345 }
00346
00347
00348 struct _DriFenceObject *
00349 intel_be_batchbuffer_flush(struct intel_be_batchbuffer *batch)
00350 {
00351 struct intel_be_context *intel = batch->intel;
00352 unsigned int used = batch->base.ptr - batch->base.map;
00353 boolean was_locked = batch->intel->hardware_locked(intel);
00354 struct _DriFenceObject *fence;
00355
00356 if (used == 0) {
00357 driFenceReference(batch->last_fence);
00358 return batch->last_fence;
00359 }
00360
00361
00362
00363
00364 #if 0
00365 if (used & 4) {
00366 ((int *) batch->base.ptr)[0] = intel->vtbl.flush_cmd();
00367 ((int *) batch->base.ptr)[1] = 0;
00368 ((int *) batch->base.ptr)[2] = MI_BATCH_BUFFER_END;
00369 used += 12;
00370 }
00371 else {
00372 ((int *) batch->base.ptr)[0] = intel->vtbl.flush_cmd();
00373 ((int *) batch->base.ptr)[1] = MI_BATCH_BUFFER_END;
00374 used += 8;
00375 }
00376 #else
00377 if (used & 4) {
00378 ((int *) batch->base.ptr)[0] = ((0<<29)|(4<<23));
00379 ((int *) batch->base.ptr)[1] = 0;
00380 ((int *) batch->base.ptr)[2] = (0xA<<23);
00381 used += 12;
00382 }
00383 else {
00384 ((int *) batch->base.ptr)[0] = ((0<<29)|(4<<23));
00385 ((int *) batch->base.ptr)[1] = (0xA<<23);
00386 used += 8;
00387 }
00388 #endif
00389 driBOUnmap(batch->buffer);
00390 batch->base.ptr = NULL;
00391 batch->base.map = NULL;
00392
00393
00394
00395
00396 if (!was_locked)
00397 intel->hardware_lock(intel);
00398
00399 fence = do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
00400 FALSE);
00401
00402 if (!was_locked)
00403 intel->hardware_unlock(intel);
00404
00405
00406
00407 intel_be_batchbuffer_reset(batch);
00408 return fence;
00409 }
00410
00411 void
00412 intel_be_batchbuffer_finish(struct intel_be_batchbuffer *batch)
00413 {
00414 struct _DriFenceObject *fence = intel_be_batchbuffer_flush(batch);
00415 driFenceFinish(fence, driFenceType(fence), FALSE);
00416 driFenceUnReference(&fence);
00417 }
00418
00419 #if 0
00420 void
00421 intel_be_batchbuffer_data(struct intel_be_batchbuffer *batch,
00422 const void *data, unsigned int bytes, unsigned int flags)
00423 {
00424 assert((bytes & 3) == 0);
00425 intel_batchbuffer_require_space(batch, bytes, flags);
00426 memcpy(batch->base.ptr, data, bytes);
00427 batch->base.ptr += bytes;
00428 }
00429 #endif