Merge tag 'dlm-fixes-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 /*
38  * 965+ support PIPE_CONTROL commands, which provide finer grained control
39  * over cache flushing.
40  */
41 struct pipe_control {
42         struct drm_i915_gem_object *obj;
43         volatile u32 *cpu_page;
44         u32 gtt_offset;
45 };
46
47 static inline int ring_space(struct intel_ring_buffer *ring)
48 {
49         int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50         if (space < 0)
51                 space += ring->size;
52         return space;
53 }
54
55 static int
56 render_ring_flush(struct intel_ring_buffer *ring,
57                   u32   invalidate_domains,
58                   u32   flush_domains)
59 {
60         struct drm_device *dev = ring->dev;
61         u32 cmd;
62         int ret;
63
64         /*
65          * read/write caches:
66          *
67          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
68          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
69          * also flushed at 2d versus 3d pipeline switches.
70          *
71          * read-only caches:
72          *
73          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
74          * MI_READ_FLUSH is set, and is always flushed on 965.
75          *
76          * I915_GEM_DOMAIN_COMMAND may not exist?
77          *
78          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
79          * invalidated when MI_EXE_FLUSH is set.
80          *
81          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
82          * invalidated with every MI_FLUSH.
83          *
84          * TLBs:
85          *
86          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
87          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
88          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
89          * are flushed at any MI_FLUSH.
90          */
91
92         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
93         if ((invalidate_domains|flush_domains) &
94             I915_GEM_DOMAIN_RENDER)
95                 cmd &= ~MI_NO_WRITE_FLUSH;
96         if (INTEL_INFO(dev)->gen < 4) {
97                 /*
98                  * On the 965, the sampler cache always gets flushed
99                  * and this bit is reserved.
100                  */
101                 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
102                         cmd |= MI_READ_FLUSH;
103         }
104         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
105                 cmd |= MI_EXE_FLUSH;
106
107         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
108             (IS_G4X(dev) || IS_GEN5(dev)))
109                 cmd |= MI_INVALIDATE_ISP;
110
111         ret = intel_ring_begin(ring, 2);
112         if (ret)
113                 return ret;
114
115         intel_ring_emit(ring, cmd);
116         intel_ring_emit(ring, MI_NOOP);
117         intel_ring_advance(ring);
118
119         return 0;
120 }
121
122 /**
123  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
124  * implementing two workarounds on gen6.  From section 1.4.7.1
125  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
126  *
127  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
128  * produced by non-pipelined state commands), software needs to first
129  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
130  * 0.
131  *
132  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
133  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
134  *
135  * And the workaround for these two requires this workaround first:
136  *
137  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
138  * BEFORE the pipe-control with a post-sync op and no write-cache
139  * flushes.
140  *
141  * And this last workaround is tricky because of the requirements on
142  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
143  * volume 2 part 1:
144  *
145  *     "1 of the following must also be set:
146  *      - Render Target Cache Flush Enable ([12] of DW1)
147  *      - Depth Cache Flush Enable ([0] of DW1)
148  *      - Stall at Pixel Scoreboard ([1] of DW1)
149  *      - Depth Stall ([13] of DW1)
150  *      - Post-Sync Operation ([13] of DW1)
151  *      - Notify Enable ([8] of DW1)"
152  *
153  * The cache flushes require the workaround flush that triggered this
154  * one, so we can't use it.  Depth stall would trigger the same.
155  * Post-sync nonzero is what triggered this second workaround, so we
156  * can't use that one either.  Notify enable is IRQs, which aren't
157  * really our business.  That leaves only stall at scoreboard.
158  */
159 static int
160 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
161 {
162         struct pipe_control *pc = ring->private;
163         u32 scratch_addr = pc->gtt_offset + 128;
164         int ret;
165
166
167         ret = intel_ring_begin(ring, 6);
168         if (ret)
169                 return ret;
170
171         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
172         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
173                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
174         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
175         intel_ring_emit(ring, 0); /* low dword */
176         intel_ring_emit(ring, 0); /* high dword */
177         intel_ring_emit(ring, MI_NOOP);
178         intel_ring_advance(ring);
179
180         ret = intel_ring_begin(ring, 6);
181         if (ret)
182                 return ret;
183
184         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
185         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
186         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
187         intel_ring_emit(ring, 0);
188         intel_ring_emit(ring, 0);
189         intel_ring_emit(ring, MI_NOOP);
190         intel_ring_advance(ring);
191
192         return 0;
193 }
194
195 static int
196 gen6_render_ring_flush(struct intel_ring_buffer *ring,
197                          u32 invalidate_domains, u32 flush_domains)
198 {
199         u32 flags = 0;
200         struct pipe_control *pc = ring->private;
201         u32 scratch_addr = pc->gtt_offset + 128;
202         int ret;
203
204         /* Force SNB workarounds for PIPE_CONTROL flushes */
205         intel_emit_post_sync_nonzero_flush(ring);
206
207         /* Just flush everything.  Experiments have shown that reducing the
208          * number of bits based on the write domains has little performance
209          * impact.
210          */
211         flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
212         flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
213         flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
214         flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
215         flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
216         flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
217         flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
218
219         ret = intel_ring_begin(ring, 6);
220         if (ret)
221                 return ret;
222
223         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
224         intel_ring_emit(ring, flags);
225         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
226         intel_ring_emit(ring, 0); /* lower dword */
227         intel_ring_emit(ring, 0); /* uppwer dword */
228         intel_ring_emit(ring, MI_NOOP);
229         intel_ring_advance(ring);
230
231         return 0;
232 }
233
234 static void ring_write_tail(struct intel_ring_buffer *ring,
235                             u32 value)
236 {
237         drm_i915_private_t *dev_priv = ring->dev->dev_private;
238         I915_WRITE_TAIL(ring, value);
239 }
240
241 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
242 {
243         drm_i915_private_t *dev_priv = ring->dev->dev_private;
244         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
245                         RING_ACTHD(ring->mmio_base) : ACTHD;
246
247         return I915_READ(acthd_reg);
248 }
249
250 static int init_ring_common(struct intel_ring_buffer *ring)
251 {
252         drm_i915_private_t *dev_priv = ring->dev->dev_private;
253         struct drm_i915_gem_object *obj = ring->obj;
254         u32 head;
255
256         /* Stop the ring if it's running. */
257         I915_WRITE_CTL(ring, 0);
258         I915_WRITE_HEAD(ring, 0);
259         ring->write_tail(ring, 0);
260
261         /* Initialize the ring. */
262         I915_WRITE_START(ring, obj->gtt_offset);
263         head = I915_READ_HEAD(ring) & HEAD_ADDR;
264
265         /* G45 ring initialization fails to reset head to zero */
266         if (head != 0) {
267                 DRM_DEBUG_KMS("%s head not reset to zero "
268                               "ctl %08x head %08x tail %08x start %08x\n",
269                               ring->name,
270                               I915_READ_CTL(ring),
271                               I915_READ_HEAD(ring),
272                               I915_READ_TAIL(ring),
273                               I915_READ_START(ring));
274
275                 I915_WRITE_HEAD(ring, 0);
276
277                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
278                         DRM_ERROR("failed to set %s head to zero "
279                                   "ctl %08x head %08x tail %08x start %08x\n",
280                                   ring->name,
281                                   I915_READ_CTL(ring),
282                                   I915_READ_HEAD(ring),
283                                   I915_READ_TAIL(ring),
284                                   I915_READ_START(ring));
285                 }
286         }
287
288         I915_WRITE_CTL(ring,
289                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
290                         | RING_VALID);
291
292         /* If the head is still not zero, the ring is dead */
293         if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
294             I915_READ_START(ring) != obj->gtt_offset ||
295             (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
296                 DRM_ERROR("%s initialization failed "
297                                 "ctl %08x head %08x tail %08x start %08x\n",
298                                 ring->name,
299                                 I915_READ_CTL(ring),
300                                 I915_READ_HEAD(ring),
301                                 I915_READ_TAIL(ring),
302                                 I915_READ_START(ring));
303                 return -EIO;
304         }
305
306         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
307                 i915_kernel_lost_context(ring->dev);
308         else {
309                 ring->head = I915_READ_HEAD(ring);
310                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
311                 ring->space = ring_space(ring);
312         }
313
314         return 0;
315 }
316
317 static int
318 init_pipe_control(struct intel_ring_buffer *ring)
319 {
320         struct pipe_control *pc;
321         struct drm_i915_gem_object *obj;
322         int ret;
323
324         if (ring->private)
325                 return 0;
326
327         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
328         if (!pc)
329                 return -ENOMEM;
330
331         obj = i915_gem_alloc_object(ring->dev, 4096);
332         if (obj == NULL) {
333                 DRM_ERROR("Failed to allocate seqno page\n");
334                 ret = -ENOMEM;
335                 goto err;
336         }
337
338         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
339
340         ret = i915_gem_object_pin(obj, 4096, true);
341         if (ret)
342                 goto err_unref;
343
344         pc->gtt_offset = obj->gtt_offset;
345         pc->cpu_page =  kmap(obj->pages[0]);
346         if (pc->cpu_page == NULL)
347                 goto err_unpin;
348
349         pc->obj = obj;
350         ring->private = pc;
351         return 0;
352
353 err_unpin:
354         i915_gem_object_unpin(obj);
355 err_unref:
356         drm_gem_object_unreference(&obj->base);
357 err:
358         kfree(pc);
359         return ret;
360 }
361
362 static void
363 cleanup_pipe_control(struct intel_ring_buffer *ring)
364 {
365         struct pipe_control *pc = ring->private;
366         struct drm_i915_gem_object *obj;
367
368         if (!ring->private)
369                 return;
370
371         obj = pc->obj;
372         kunmap(obj->pages[0]);
373         i915_gem_object_unpin(obj);
374         drm_gem_object_unreference(&obj->base);
375
376         kfree(pc);
377         ring->private = NULL;
378 }
379
380 static int init_render_ring(struct intel_ring_buffer *ring)
381 {
382         struct drm_device *dev = ring->dev;
383         struct drm_i915_private *dev_priv = dev->dev_private;
384         int ret = init_ring_common(ring);
385
386         if (INTEL_INFO(dev)->gen > 3) {
387                 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
388                 I915_WRITE(MI_MODE, mode);
389                 if (IS_GEN7(dev))
390                         I915_WRITE(GFX_MODE_GEN7,
391                                    GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
392                                    GFX_MODE_ENABLE(GFX_REPLAY_MODE));
393         }
394
395         if (INTEL_INFO(dev)->gen >= 5) {
396                 ret = init_pipe_control(ring);
397                 if (ret)
398                         return ret;
399         }
400
401         if (INTEL_INFO(dev)->gen >= 6) {
402                 I915_WRITE(INSTPM,
403                            INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
404         }
405
406         return ret;
407 }
408
409 static void render_ring_cleanup(struct intel_ring_buffer *ring)
410 {
411         if (!ring->private)
412                 return;
413
414         cleanup_pipe_control(ring);
415 }
416
417 static void
418 update_mboxes(struct intel_ring_buffer *ring,
419             u32 seqno,
420             u32 mmio_offset)
421 {
422         intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
423                               MI_SEMAPHORE_GLOBAL_GTT |
424                               MI_SEMAPHORE_REGISTER |
425                               MI_SEMAPHORE_UPDATE);
426         intel_ring_emit(ring, seqno);
427         intel_ring_emit(ring, mmio_offset);
428 }
429
430 /**
431  * gen6_add_request - Update the semaphore mailbox registers
432  * 
433  * @ring - ring that is adding a request
434  * @seqno - return seqno stuck into the ring
435  *
436  * Update the mailbox registers in the *other* rings with the current seqno.
437  * This acts like a signal in the canonical semaphore.
438  */
439 static int
440 gen6_add_request(struct intel_ring_buffer *ring,
441                  u32 *seqno)
442 {
443         u32 mbox1_reg;
444         u32 mbox2_reg;
445         int ret;
446
447         ret = intel_ring_begin(ring, 10);
448         if (ret)
449                 return ret;
450
451         mbox1_reg = ring->signal_mbox[0];
452         mbox2_reg = ring->signal_mbox[1];
453
454         *seqno = i915_gem_next_request_seqno(ring);
455
456         update_mboxes(ring, *seqno, mbox1_reg);
457         update_mboxes(ring, *seqno, mbox2_reg);
458         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
459         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
460         intel_ring_emit(ring, *seqno);
461         intel_ring_emit(ring, MI_USER_INTERRUPT);
462         intel_ring_advance(ring);
463
464         return 0;
465 }
466
467 /**
468  * intel_ring_sync - sync the waiter to the signaller on seqno
469  *
470  * @waiter - ring that is waiting
471  * @signaller - ring which has, or will signal
472  * @seqno - seqno which the waiter will block on
473  */
474 static int
475 intel_ring_sync(struct intel_ring_buffer *waiter,
476                 struct intel_ring_buffer *signaller,
477                 int ring,
478                 u32 seqno)
479 {
480         int ret;
481         u32 dw1 = MI_SEMAPHORE_MBOX |
482                   MI_SEMAPHORE_COMPARE |
483                   MI_SEMAPHORE_REGISTER;
484
485         ret = intel_ring_begin(waiter, 4);
486         if (ret)
487                 return ret;
488
489         intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
490         intel_ring_emit(waiter, seqno);
491         intel_ring_emit(waiter, 0);
492         intel_ring_emit(waiter, MI_NOOP);
493         intel_ring_advance(waiter);
494
495         return 0;
496 }
497
498 /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
499 int
500 render_ring_sync_to(struct intel_ring_buffer *waiter,
501                     struct intel_ring_buffer *signaller,
502                     u32 seqno)
503 {
504         WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
505         return intel_ring_sync(waiter,
506                                signaller,
507                                RCS,
508                                seqno);
509 }
510
511 /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
512 int
513 gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
514                       struct intel_ring_buffer *signaller,
515                       u32 seqno)
516 {
517         WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
518         return intel_ring_sync(waiter,
519                                signaller,
520                                VCS,
521                                seqno);
522 }
523
524 /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
525 int
526 gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
527                       struct intel_ring_buffer *signaller,
528                       u32 seqno)
529 {
530         WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
531         return intel_ring_sync(waiter,
532                                signaller,
533                                BCS,
534                                seqno);
535 }
536
537
538
539 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
540 do {                                                                    \
541         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
542                  PIPE_CONTROL_DEPTH_STALL);                             \
543         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
544         intel_ring_emit(ring__, 0);                                                     \
545         intel_ring_emit(ring__, 0);                                                     \
546 } while (0)
547
548 static int
549 pc_render_add_request(struct intel_ring_buffer *ring,
550                       u32 *result)
551 {
552         u32 seqno = i915_gem_next_request_seqno(ring);
553         struct pipe_control *pc = ring->private;
554         u32 scratch_addr = pc->gtt_offset + 128;
555         int ret;
556
557         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
558          * incoherent with writes to memory, i.e. completely fubar,
559          * so we need to use PIPE_NOTIFY instead.
560          *
561          * However, we also need to workaround the qword write
562          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
563          * memory before requesting an interrupt.
564          */
565         ret = intel_ring_begin(ring, 32);
566         if (ret)
567                 return ret;
568
569         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
570                         PIPE_CONTROL_WRITE_FLUSH |
571                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
572         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
573         intel_ring_emit(ring, seqno);
574         intel_ring_emit(ring, 0);
575         PIPE_CONTROL_FLUSH(ring, scratch_addr);
576         scratch_addr += 128; /* write to separate cachelines */
577         PIPE_CONTROL_FLUSH(ring, scratch_addr);
578         scratch_addr += 128;
579         PIPE_CONTROL_FLUSH(ring, scratch_addr);
580         scratch_addr += 128;
581         PIPE_CONTROL_FLUSH(ring, scratch_addr);
582         scratch_addr += 128;
583         PIPE_CONTROL_FLUSH(ring, scratch_addr);
584         scratch_addr += 128;
585         PIPE_CONTROL_FLUSH(ring, scratch_addr);
586
587         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
588                         PIPE_CONTROL_WRITE_FLUSH |
589                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
590                         PIPE_CONTROL_NOTIFY);
591         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
592         intel_ring_emit(ring, seqno);
593         intel_ring_emit(ring, 0);
594         intel_ring_advance(ring);
595
596         *result = seqno;
597         return 0;
598 }
599
600 static int
601 render_ring_add_request(struct intel_ring_buffer *ring,
602                         u32 *result)
603 {
604         u32 seqno = i915_gem_next_request_seqno(ring);
605         int ret;
606
607         ret = intel_ring_begin(ring, 4);
608         if (ret)
609                 return ret;
610
611         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
612         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
613         intel_ring_emit(ring, seqno);
614         intel_ring_emit(ring, MI_USER_INTERRUPT);
615         intel_ring_advance(ring);
616
617         *result = seqno;
618         return 0;
619 }
620
621 static u32
622 gen6_ring_get_seqno(struct intel_ring_buffer *ring)
623 {
624         struct drm_device *dev = ring->dev;
625
626         /* Workaround to force correct ordering between irq and seqno writes on
627          * ivb (and maybe also on snb) by reading from a CS register (like
628          * ACTHD) before reading the status page. */
629         if (IS_GEN6(dev) || IS_GEN7(dev))
630                 intel_ring_get_active_head(ring);
631         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
632 }
633
634 static u32
635 ring_get_seqno(struct intel_ring_buffer *ring)
636 {
637         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
638 }
639
640 static u32
641 pc_render_get_seqno(struct intel_ring_buffer *ring)
642 {
643         struct pipe_control *pc = ring->private;
644         return pc->cpu_page[0];
645 }
646
647 static void
648 ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
649 {
650         dev_priv->gt_irq_mask &= ~mask;
651         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
652         POSTING_READ(GTIMR);
653 }
654
655 static void
656 ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
657 {
658         dev_priv->gt_irq_mask |= mask;
659         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
660         POSTING_READ(GTIMR);
661 }
662
663 static void
664 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
665 {
666         dev_priv->irq_mask &= ~mask;
667         I915_WRITE(IMR, dev_priv->irq_mask);
668         POSTING_READ(IMR);
669 }
670
671 static void
672 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
673 {
674         dev_priv->irq_mask |= mask;
675         I915_WRITE(IMR, dev_priv->irq_mask);
676         POSTING_READ(IMR);
677 }
678
679 static bool
680 render_ring_get_irq(struct intel_ring_buffer *ring)
681 {
682         struct drm_device *dev = ring->dev;
683         drm_i915_private_t *dev_priv = dev->dev_private;
684
685         if (!dev->irq_enabled)
686                 return false;
687
688         spin_lock(&ring->irq_lock);
689         if (ring->irq_refcount++ == 0) {
690                 if (HAS_PCH_SPLIT(dev))
691                         ironlake_enable_irq(dev_priv,
692                                             GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
693                 else
694                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
695         }
696         spin_unlock(&ring->irq_lock);
697
698         return true;
699 }
700
701 static void
702 render_ring_put_irq(struct intel_ring_buffer *ring)
703 {
704         struct drm_device *dev = ring->dev;
705         drm_i915_private_t *dev_priv = dev->dev_private;
706
707         spin_lock(&ring->irq_lock);
708         if (--ring->irq_refcount == 0) {
709                 if (HAS_PCH_SPLIT(dev))
710                         ironlake_disable_irq(dev_priv,
711                                              GT_USER_INTERRUPT |
712                                              GT_PIPE_NOTIFY);
713                 else
714                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
715         }
716         spin_unlock(&ring->irq_lock);
717 }
718
719 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
720 {
721         struct drm_device *dev = ring->dev;
722         drm_i915_private_t *dev_priv = ring->dev->dev_private;
723         u32 mmio = 0;
724
725         /* The ring status page addresses are no longer next to the rest of
726          * the ring registers as of gen7.
727          */
728         if (IS_GEN7(dev)) {
729                 switch (ring->id) {
730                 case RCS:
731                         mmio = RENDER_HWS_PGA_GEN7;
732                         break;
733                 case BCS:
734                         mmio = BLT_HWS_PGA_GEN7;
735                         break;
736                 case VCS:
737                         mmio = BSD_HWS_PGA_GEN7;
738                         break;
739                 }
740         } else if (IS_GEN6(ring->dev)) {
741                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
742         } else {
743                 mmio = RING_HWS_PGA(ring->mmio_base);
744         }
745
746         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
747         POSTING_READ(mmio);
748 }
749
750 static int
751 bsd_ring_flush(struct intel_ring_buffer *ring,
752                u32     invalidate_domains,
753                u32     flush_domains)
754 {
755         int ret;
756
757         ret = intel_ring_begin(ring, 2);
758         if (ret)
759                 return ret;
760
761         intel_ring_emit(ring, MI_FLUSH);
762         intel_ring_emit(ring, MI_NOOP);
763         intel_ring_advance(ring);
764         return 0;
765 }
766
767 static int
768 ring_add_request(struct intel_ring_buffer *ring,
769                  u32 *result)
770 {
771         u32 seqno;
772         int ret;
773
774         ret = intel_ring_begin(ring, 4);
775         if (ret)
776                 return ret;
777
778         seqno = i915_gem_next_request_seqno(ring);
779
780         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
781         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
782         intel_ring_emit(ring, seqno);
783         intel_ring_emit(ring, MI_USER_INTERRUPT);
784         intel_ring_advance(ring);
785
786         *result = seqno;
787         return 0;
788 }
789
790 static bool
791 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
792 {
793         struct drm_device *dev = ring->dev;
794         drm_i915_private_t *dev_priv = dev->dev_private;
795
796         if (!dev->irq_enabled)
797                return false;
798
799         /* It looks like we need to prevent the gt from suspending while waiting
800          * for an notifiy irq, otherwise irqs seem to get lost on at least the
801          * blt/bsd rings on ivb. */
802         gen6_gt_force_wake_get(dev_priv);
803
804         spin_lock(&ring->irq_lock);
805         if (ring->irq_refcount++ == 0) {
806                 ring->irq_mask &= ~rflag;
807                 I915_WRITE_IMR(ring, ring->irq_mask);
808                 ironlake_enable_irq(dev_priv, gflag);
809         }
810         spin_unlock(&ring->irq_lock);
811
812         return true;
813 }
814
815 static void
816 gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
817 {
818         struct drm_device *dev = ring->dev;
819         drm_i915_private_t *dev_priv = dev->dev_private;
820
821         spin_lock(&ring->irq_lock);
822         if (--ring->irq_refcount == 0) {
823                 ring->irq_mask |= rflag;
824                 I915_WRITE_IMR(ring, ring->irq_mask);
825                 ironlake_disable_irq(dev_priv, gflag);
826         }
827         spin_unlock(&ring->irq_lock);
828
829         gen6_gt_force_wake_put(dev_priv);
830 }
831
832 static bool
833 bsd_ring_get_irq(struct intel_ring_buffer *ring)
834 {
835         struct drm_device *dev = ring->dev;
836         drm_i915_private_t *dev_priv = dev->dev_private;
837
838         if (!dev->irq_enabled)
839                 return false;
840
841         spin_lock(&ring->irq_lock);
842         if (ring->irq_refcount++ == 0) {
843                 if (IS_G4X(dev))
844                         i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
845                 else
846                         ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
847         }
848         spin_unlock(&ring->irq_lock);
849
850         return true;
851 }
852 static void
853 bsd_ring_put_irq(struct intel_ring_buffer *ring)
854 {
855         struct drm_device *dev = ring->dev;
856         drm_i915_private_t *dev_priv = dev->dev_private;
857
858         spin_lock(&ring->irq_lock);
859         if (--ring->irq_refcount == 0) {
860                 if (IS_G4X(dev))
861                         i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
862                 else
863                         ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
864         }
865         spin_unlock(&ring->irq_lock);
866 }
867
868 static int
869 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
870 {
871         int ret;
872
873         ret = intel_ring_begin(ring, 2);
874         if (ret)
875                 return ret;
876
877         intel_ring_emit(ring,
878                         MI_BATCH_BUFFER_START | (2 << 6) |
879                         MI_BATCH_NON_SECURE_I965);
880         intel_ring_emit(ring, offset);
881         intel_ring_advance(ring);
882
883         return 0;
884 }
885
886 static int
887 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
888                                 u32 offset, u32 len)
889 {
890         struct drm_device *dev = ring->dev;
891         int ret;
892
893         if (IS_I830(dev) || IS_845G(dev)) {
894                 ret = intel_ring_begin(ring, 4);
895                 if (ret)
896                         return ret;
897
898                 intel_ring_emit(ring, MI_BATCH_BUFFER);
899                 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
900                 intel_ring_emit(ring, offset + len - 8);
901                 intel_ring_emit(ring, 0);
902         } else {
903                 ret = intel_ring_begin(ring, 2);
904                 if (ret)
905                         return ret;
906
907                 if (INTEL_INFO(dev)->gen >= 4) {
908                         intel_ring_emit(ring,
909                                         MI_BATCH_BUFFER_START | (2 << 6) |
910                                         MI_BATCH_NON_SECURE_I965);
911                         intel_ring_emit(ring, offset);
912                 } else {
913                         intel_ring_emit(ring,
914                                         MI_BATCH_BUFFER_START | (2 << 6));
915                         intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
916                 }
917         }
918         intel_ring_advance(ring);
919
920         return 0;
921 }
922
923 static void cleanup_status_page(struct intel_ring_buffer *ring)
924 {
925         drm_i915_private_t *dev_priv = ring->dev->dev_private;
926         struct drm_i915_gem_object *obj;
927
928         obj = ring->status_page.obj;
929         if (obj == NULL)
930                 return;
931
932         kunmap(obj->pages[0]);
933         i915_gem_object_unpin(obj);
934         drm_gem_object_unreference(&obj->base);
935         ring->status_page.obj = NULL;
936
937         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
938 }
939
940 static int init_status_page(struct intel_ring_buffer *ring)
941 {
942         struct drm_device *dev = ring->dev;
943         drm_i915_private_t *dev_priv = dev->dev_private;
944         struct drm_i915_gem_object *obj;
945         int ret;
946
947         obj = i915_gem_alloc_object(dev, 4096);
948         if (obj == NULL) {
949                 DRM_ERROR("Failed to allocate status page\n");
950                 ret = -ENOMEM;
951                 goto err;
952         }
953
954         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
955
956         ret = i915_gem_object_pin(obj, 4096, true);
957         if (ret != 0) {
958                 goto err_unref;
959         }
960
961         ring->status_page.gfx_addr = obj->gtt_offset;
962         ring->status_page.page_addr = kmap(obj->pages[0]);
963         if (ring->status_page.page_addr == NULL) {
964                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
965                 goto err_unpin;
966         }
967         ring->status_page.obj = obj;
968         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
969
970         intel_ring_setup_status_page(ring);
971         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
972                         ring->name, ring->status_page.gfx_addr);
973
974         return 0;
975
976 err_unpin:
977         i915_gem_object_unpin(obj);
978 err_unref:
979         drm_gem_object_unreference(&obj->base);
980 err:
981         return ret;
982 }
983
984 int intel_init_ring_buffer(struct drm_device *dev,
985                            struct intel_ring_buffer *ring)
986 {
987         struct drm_i915_gem_object *obj;
988         int ret;
989
990         ring->dev = dev;
991         INIT_LIST_HEAD(&ring->active_list);
992         INIT_LIST_HEAD(&ring->request_list);
993         INIT_LIST_HEAD(&ring->gpu_write_list);
994
995         init_waitqueue_head(&ring->irq_queue);
996         spin_lock_init(&ring->irq_lock);
997         ring->irq_mask = ~0;
998
999         if (I915_NEED_GFX_HWS(dev)) {
1000                 ret = init_status_page(ring);
1001                 if (ret)
1002                         return ret;
1003         }
1004
1005         obj = i915_gem_alloc_object(dev, ring->size);
1006         if (obj == NULL) {
1007                 DRM_ERROR("Failed to allocate ringbuffer\n");
1008                 ret = -ENOMEM;
1009                 goto err_hws;
1010         }
1011
1012         ring->obj = obj;
1013
1014         ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
1015         if (ret)
1016                 goto err_unref;
1017
1018         ring->map.size = ring->size;
1019         ring->map.offset = dev->agp->base + obj->gtt_offset;
1020         ring->map.type = 0;
1021         ring->map.flags = 0;
1022         ring->map.mtrr = 0;
1023
1024         drm_core_ioremap_wc(&ring->map, dev);
1025         if (ring->map.handle == NULL) {
1026                 DRM_ERROR("Failed to map ringbuffer.\n");
1027                 ret = -EINVAL;
1028                 goto err_unpin;
1029         }
1030
1031         ring->virtual_start = ring->map.handle;
1032         ret = ring->init(ring);
1033         if (ret)
1034                 goto err_unmap;
1035
1036         /* Workaround an erratum on the i830 which causes a hang if
1037          * the TAIL pointer points to within the last 2 cachelines
1038          * of the buffer.
1039          */
1040         ring->effective_size = ring->size;
1041         if (IS_I830(ring->dev) || IS_845G(ring->dev))
1042                 ring->effective_size -= 128;
1043
1044         return 0;
1045
1046 err_unmap:
1047         drm_core_ioremapfree(&ring->map, dev);
1048 err_unpin:
1049         i915_gem_object_unpin(obj);
1050 err_unref:
1051         drm_gem_object_unreference(&obj->base);
1052         ring->obj = NULL;
1053 err_hws:
1054         cleanup_status_page(ring);
1055         return ret;
1056 }
1057
1058 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1059 {
1060         struct drm_i915_private *dev_priv;
1061         int ret;
1062
1063         if (ring->obj == NULL)
1064                 return;
1065
1066         /* Disable the ring buffer. The ring must be idle at this point */
1067         dev_priv = ring->dev->dev_private;
1068         ret = intel_wait_ring_idle(ring);
1069         if (ret)
1070                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1071                           ring->name, ret);
1072
1073         I915_WRITE_CTL(ring, 0);
1074
1075         drm_core_ioremapfree(&ring->map, ring->dev);
1076
1077         i915_gem_object_unpin(ring->obj);
1078         drm_gem_object_unreference(&ring->obj->base);
1079         ring->obj = NULL;
1080
1081         if (ring->cleanup)
1082                 ring->cleanup(ring);
1083
1084         cleanup_status_page(ring);
1085 }
1086
1087 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1088 {
1089         unsigned int *virt;
1090         int rem = ring->size - ring->tail;
1091
1092         if (ring->space < rem) {
1093                 int ret = intel_wait_ring_buffer(ring, rem);
1094                 if (ret)
1095                         return ret;
1096         }
1097
1098         virt = (unsigned int *)(ring->virtual_start + ring->tail);
1099         rem /= 8;
1100         while (rem--) {
1101                 *virt++ = MI_NOOP;
1102                 *virt++ = MI_NOOP;
1103         }
1104
1105         ring->tail = 0;
1106         ring->space = ring_space(ring);
1107
1108         return 0;
1109 }
1110
1111 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1112 {
1113         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1114         bool was_interruptible;
1115         int ret;
1116
1117         /* XXX As we have not yet audited all the paths to check that
1118          * they are ready for ERESTARTSYS from intel_ring_begin, do not
1119          * allow us to be interruptible by a signal.
1120          */
1121         was_interruptible = dev_priv->mm.interruptible;
1122         dev_priv->mm.interruptible = false;
1123
1124         ret = i915_wait_request(ring, seqno, true);
1125
1126         dev_priv->mm.interruptible = was_interruptible;
1127
1128         return ret;
1129 }
1130
1131 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1132 {
1133         struct drm_i915_gem_request *request;
1134         u32 seqno = 0;
1135         int ret;
1136
1137         i915_gem_retire_requests_ring(ring);
1138
1139         if (ring->last_retired_head != -1) {
1140                 ring->head = ring->last_retired_head;
1141                 ring->last_retired_head = -1;
1142                 ring->space = ring_space(ring);
1143                 if (ring->space >= n)
1144                         return 0;
1145         }
1146
1147         list_for_each_entry(request, &ring->request_list, list) {
1148                 int space;
1149
1150                 if (request->tail == -1)
1151                         continue;
1152
1153                 space = request->tail - (ring->tail + 8);
1154                 if (space < 0)
1155                         space += ring->size;
1156                 if (space >= n) {
1157                         seqno = request->seqno;
1158                         break;
1159                 }
1160
1161                 /* Consume this request in case we need more space than
1162                  * is available and so need to prevent a race between
1163                  * updating last_retired_head and direct reads of
1164                  * I915_RING_HEAD. It also provides a nice sanity check.
1165                  */
1166                 request->tail = -1;
1167         }
1168
1169         if (seqno == 0)
1170                 return -ENOSPC;
1171
1172         ret = intel_ring_wait_seqno(ring, seqno);
1173         if (ret)
1174                 return ret;
1175
1176         if (WARN_ON(ring->last_retired_head == -1))
1177                 return -ENOSPC;
1178
1179         ring->head = ring->last_retired_head;
1180         ring->last_retired_head = -1;
1181         ring->space = ring_space(ring);
1182         if (WARN_ON(ring->space < n))
1183                 return -ENOSPC;
1184
1185         return 0;
1186 }
1187
1188 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1189 {
1190         struct drm_device *dev = ring->dev;
1191         struct drm_i915_private *dev_priv = dev->dev_private;
1192         unsigned long end;
1193         int ret;
1194
1195         ret = intel_ring_wait_request(ring, n);
1196         if (ret != -ENOSPC)
1197                 return ret;
1198
1199         trace_i915_ring_wait_begin(ring);
1200         if (drm_core_check_feature(dev, DRIVER_GEM))
1201                 /* With GEM the hangcheck timer should kick us out of the loop,
1202                  * leaving it early runs the risk of corrupting GEM state (due
1203                  * to running on almost untested codepaths). But on resume
1204                  * timers don't work yet, so prevent a complete hang in that
1205                  * case by choosing an insanely large timeout. */
1206                 end = jiffies + 60 * HZ;
1207         else
1208                 end = jiffies + 3 * HZ;
1209
1210         do {
1211                 ring->head = I915_READ_HEAD(ring);
1212                 ring->space = ring_space(ring);
1213                 if (ring->space >= n) {
1214                         trace_i915_ring_wait_end(ring);
1215                         return 0;
1216                 }
1217
1218                 if (dev->primary->master) {
1219                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1220                         if (master_priv->sarea_priv)
1221                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1222                 }
1223
1224                 msleep(1);
1225                 if (atomic_read(&dev_priv->mm.wedged))
1226                         return -EAGAIN;
1227         } while (!time_after(jiffies, end));
1228         trace_i915_ring_wait_end(ring);
1229         return -EBUSY;
1230 }
1231
1232 int intel_ring_begin(struct intel_ring_buffer *ring,
1233                      int num_dwords)
1234 {
1235         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1236         int n = 4*num_dwords;
1237         int ret;
1238
1239         if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1240                 return -EIO;
1241
1242         if (unlikely(ring->tail + n > ring->effective_size)) {
1243                 ret = intel_wrap_ring_buffer(ring);
1244                 if (unlikely(ret))
1245                         return ret;
1246         }
1247
1248         if (unlikely(ring->space < n)) {
1249                 ret = intel_wait_ring_buffer(ring, n);
1250                 if (unlikely(ret))
1251                         return ret;
1252         }
1253
1254         ring->space -= n;
1255         return 0;
1256 }
1257
1258 void intel_ring_advance(struct intel_ring_buffer *ring)
1259 {
1260         ring->tail &= ring->size - 1;
1261         ring->write_tail(ring, ring->tail);
1262 }
1263
1264 static const struct intel_ring_buffer render_ring = {
1265         .name                   = "render ring",
1266         .id                     = RCS,
1267         .mmio_base              = RENDER_RING_BASE,
1268         .size                   = 32 * PAGE_SIZE,
1269         .init                   = init_render_ring,
1270         .write_tail             = ring_write_tail,
1271         .flush                  = render_ring_flush,
1272         .add_request            = render_ring_add_request,
1273         .get_seqno              = ring_get_seqno,
1274         .irq_get                = render_ring_get_irq,
1275         .irq_put                = render_ring_put_irq,
1276         .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1277         .cleanup                = render_ring_cleanup,
1278         .sync_to                = render_ring_sync_to,
1279         .semaphore_register     = {MI_SEMAPHORE_SYNC_INVALID,
1280                                    MI_SEMAPHORE_SYNC_RV,
1281                                    MI_SEMAPHORE_SYNC_RB},
1282         .signal_mbox            = {GEN6_VRSYNC, GEN6_BRSYNC},
1283 };
1284
1285 /* ring buffer for bit-stream decoder */
1286
1287 static const struct intel_ring_buffer bsd_ring = {
1288         .name                   = "bsd ring",
1289         .id                     = VCS,
1290         .mmio_base              = BSD_RING_BASE,
1291         .size                   = 32 * PAGE_SIZE,
1292         .init                   = init_ring_common,
1293         .write_tail             = ring_write_tail,
1294         .flush                  = bsd_ring_flush,
1295         .add_request            = ring_add_request,
1296         .get_seqno              = ring_get_seqno,
1297         .irq_get                = bsd_ring_get_irq,
1298         .irq_put                = bsd_ring_put_irq,
1299         .dispatch_execbuffer    = ring_dispatch_execbuffer,
1300 };
1301
1302
1303 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1304                                      u32 value)
1305 {
1306         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1307
1308        /* Every tail move must follow the sequence below */
1309         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1310                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1311                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1312         I915_WRITE(GEN6_BSD_RNCID, 0x0);
1313
1314         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1315                 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1316                 50))
1317         DRM_ERROR("timed out waiting for IDLE Indicator\n");
1318
1319         I915_WRITE_TAIL(ring, value);
1320         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1321                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1322                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1323 }
1324
1325 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1326                            u32 invalidate, u32 flush)
1327 {
1328         uint32_t cmd;
1329         int ret;
1330
1331         ret = intel_ring_begin(ring, 4);
1332         if (ret)
1333                 return ret;
1334
1335         cmd = MI_FLUSH_DW;
1336         if (invalidate & I915_GEM_GPU_DOMAINS)
1337                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1338         intel_ring_emit(ring, cmd);
1339         intel_ring_emit(ring, 0);
1340         intel_ring_emit(ring, 0);
1341         intel_ring_emit(ring, MI_NOOP);
1342         intel_ring_advance(ring);
1343         return 0;
1344 }
1345
1346 static int
1347 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1348                               u32 offset, u32 len)
1349 {
1350         int ret;
1351
1352         ret = intel_ring_begin(ring, 2);
1353         if (ret)
1354                 return ret;
1355
1356         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1357         /* bit0-7 is the length on GEN6+ */
1358         intel_ring_emit(ring, offset);
1359         intel_ring_advance(ring);
1360
1361         return 0;
1362 }
1363
1364 static bool
1365 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1366 {
1367         return gen6_ring_get_irq(ring,
1368                                  GT_USER_INTERRUPT,
1369                                  GEN6_RENDER_USER_INTERRUPT);
1370 }
1371
1372 static void
1373 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1374 {
1375         return gen6_ring_put_irq(ring,
1376                                  GT_USER_INTERRUPT,
1377                                  GEN6_RENDER_USER_INTERRUPT);
1378 }
1379
1380 static bool
1381 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1382 {
1383         return gen6_ring_get_irq(ring,
1384                                  GT_GEN6_BSD_USER_INTERRUPT,
1385                                  GEN6_BSD_USER_INTERRUPT);
1386 }
1387
1388 static void
1389 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1390 {
1391         return gen6_ring_put_irq(ring,
1392                                  GT_GEN6_BSD_USER_INTERRUPT,
1393                                  GEN6_BSD_USER_INTERRUPT);
1394 }
1395
1396 /* ring buffer for Video Codec for Gen6+ */
1397 static const struct intel_ring_buffer gen6_bsd_ring = {
1398         .name                   = "gen6 bsd ring",
1399         .id                     = VCS,
1400         .mmio_base              = GEN6_BSD_RING_BASE,
1401         .size                   = 32 * PAGE_SIZE,
1402         .init                   = init_ring_common,
1403         .write_tail             = gen6_bsd_ring_write_tail,
1404         .flush                  = gen6_ring_flush,
1405         .add_request            = gen6_add_request,
1406         .get_seqno              = gen6_ring_get_seqno,
1407         .irq_get                = gen6_bsd_ring_get_irq,
1408         .irq_put                = gen6_bsd_ring_put_irq,
1409         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1410         .sync_to                = gen6_bsd_ring_sync_to,
1411         .semaphore_register     = {MI_SEMAPHORE_SYNC_VR,
1412                                    MI_SEMAPHORE_SYNC_INVALID,
1413                                    MI_SEMAPHORE_SYNC_VB},
1414         .signal_mbox            = {GEN6_RVSYNC, GEN6_BVSYNC},
1415 };
1416
1417 /* Blitter support (SandyBridge+) */
1418
1419 static bool
1420 blt_ring_get_irq(struct intel_ring_buffer *ring)
1421 {
1422         return gen6_ring_get_irq(ring,
1423                                  GT_BLT_USER_INTERRUPT,
1424                                  GEN6_BLITTER_USER_INTERRUPT);
1425 }
1426
1427 static void
1428 blt_ring_put_irq(struct intel_ring_buffer *ring)
1429 {
1430         gen6_ring_put_irq(ring,
1431                           GT_BLT_USER_INTERRUPT,
1432                           GEN6_BLITTER_USER_INTERRUPT);
1433 }
1434
1435 static int blt_ring_flush(struct intel_ring_buffer *ring,
1436                           u32 invalidate, u32 flush)
1437 {
1438         uint32_t cmd;
1439         int ret;
1440
1441         ret = intel_ring_begin(ring, 4);
1442         if (ret)
1443                 return ret;
1444
1445         cmd = MI_FLUSH_DW;
1446         if (invalidate & I915_GEM_DOMAIN_RENDER)
1447                 cmd |= MI_INVALIDATE_TLB;
1448         intel_ring_emit(ring, cmd);
1449         intel_ring_emit(ring, 0);
1450         intel_ring_emit(ring, 0);
1451         intel_ring_emit(ring, MI_NOOP);
1452         intel_ring_advance(ring);
1453         return 0;
1454 }
1455
1456 static const struct intel_ring_buffer gen6_blt_ring = {
1457         .name                   = "blt ring",
1458         .id                     = BCS,
1459         .mmio_base              = BLT_RING_BASE,
1460         .size                   = 32 * PAGE_SIZE,
1461         .init                   = init_ring_common,
1462         .write_tail             = ring_write_tail,
1463         .flush                  = blt_ring_flush,
1464         .add_request            = gen6_add_request,
1465         .get_seqno              = gen6_ring_get_seqno,
1466         .irq_get                = blt_ring_get_irq,
1467         .irq_put                = blt_ring_put_irq,
1468         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1469         .sync_to                = gen6_blt_ring_sync_to,
1470         .semaphore_register     = {MI_SEMAPHORE_SYNC_BR,
1471                                    MI_SEMAPHORE_SYNC_BV,
1472                                    MI_SEMAPHORE_SYNC_INVALID},
1473         .signal_mbox            = {GEN6_RBSYNC, GEN6_VBSYNC},
1474 };
1475
1476 int intel_init_render_ring_buffer(struct drm_device *dev)
1477 {
1478         drm_i915_private_t *dev_priv = dev->dev_private;
1479         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1480
1481         *ring = render_ring;
1482         if (INTEL_INFO(dev)->gen >= 6) {
1483                 ring->add_request = gen6_add_request;
1484                 ring->flush = gen6_render_ring_flush;
1485                 ring->irq_get = gen6_render_ring_get_irq;
1486                 ring->irq_put = gen6_render_ring_put_irq;
1487                 ring->get_seqno = gen6_ring_get_seqno;
1488         } else if (IS_GEN5(dev)) {
1489                 ring->add_request = pc_render_add_request;
1490                 ring->get_seqno = pc_render_get_seqno;
1491         }
1492
1493         if (!I915_NEED_GFX_HWS(dev)) {
1494                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1495                 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1496         }
1497
1498         return intel_init_ring_buffer(dev, ring);
1499 }
1500
1501 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1502 {
1503         drm_i915_private_t *dev_priv = dev->dev_private;
1504         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1505
1506         *ring = render_ring;
1507         if (INTEL_INFO(dev)->gen >= 6) {
1508                 ring->add_request = gen6_add_request;
1509                 ring->irq_get = gen6_render_ring_get_irq;
1510                 ring->irq_put = gen6_render_ring_put_irq;
1511         } else if (IS_GEN5(dev)) {
1512                 ring->add_request = pc_render_add_request;
1513                 ring->get_seqno = pc_render_get_seqno;
1514         }
1515
1516         if (!I915_NEED_GFX_HWS(dev))
1517                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1518
1519         ring->dev = dev;
1520         INIT_LIST_HEAD(&ring->active_list);
1521         INIT_LIST_HEAD(&ring->request_list);
1522         INIT_LIST_HEAD(&ring->gpu_write_list);
1523
1524         ring->size = size;
1525         ring->effective_size = ring->size;
1526         if (IS_I830(ring->dev))
1527                 ring->effective_size -= 128;
1528
1529         ring->map.offset = start;
1530         ring->map.size = size;
1531         ring->map.type = 0;
1532         ring->map.flags = 0;
1533         ring->map.mtrr = 0;
1534
1535         drm_core_ioremap_wc(&ring->map, dev);
1536         if (ring->map.handle == NULL) {
1537                 DRM_ERROR("can not ioremap virtual address for"
1538                           " ring buffer\n");
1539                 return -ENOMEM;
1540         }
1541
1542         ring->virtual_start = (void __force __iomem *)ring->map.handle;
1543         return 0;
1544 }
1545
1546 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1547 {
1548         drm_i915_private_t *dev_priv = dev->dev_private;
1549         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1550
1551         if (IS_GEN6(dev) || IS_GEN7(dev))
1552                 *ring = gen6_bsd_ring;
1553         else
1554                 *ring = bsd_ring;
1555
1556         return intel_init_ring_buffer(dev, ring);
1557 }
1558
1559 int intel_init_blt_ring_buffer(struct drm_device *dev)
1560 {
1561         drm_i915_private_t *dev_priv = dev->dev_private;
1562         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1563
1564         *ring = gen6_blt_ring;
1565
1566         return intel_init_ring_buffer(dev, ring);
1567 }