| File: | dev/pci/drm/i915/gt/intel_ring.h | 
| Warning: | line 92, column 15 Value stored to 'head' during its initialization is never read | 
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | 
| 2 | * SPDX-License-Identifier: MIT | 
| 3 | * | 
| 4 | * Copyright © 2019 Intel Corporation | 
| 5 | */ | 
| 6 | |
| 7 | #ifndef INTEL_RING_H | 
| 8 | #define INTEL_RING_H | 
| 9 | |
| 10 | #include "i915_gem.h" /* GEM_BUG_ON */ | 
| 11 | #include "i915_request.h" | 
| 12 | #include "intel_ring_types.h" | 
| 13 | |
| 14 | struct intel_engine_cs; | 
| 15 | |
| 16 | struct intel_ring * | 
| 17 | intel_engine_create_ring(struct intel_engine_cs *engine, int size); | 
| 18 | |
| 19 | u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords); | 
| 20 | int intel_ring_cacheline_align(struct i915_request *rq); | 
| 21 | |
| 22 | unsigned int intel_ring_update_space(struct intel_ring *ring); | 
| 23 | |
| 24 | void __intel_ring_pin(struct intel_ring *ring); | 
| 25 | int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww); | 
| 26 | void intel_ring_unpin(struct intel_ring *ring); | 
| 27 | void intel_ring_reset(struct intel_ring *ring, u32 tail); | 
| 28 | |
| 29 | void intel_ring_free(struct kref *ref); | 
| 30 | |
| 31 | static inline struct intel_ring *intel_ring_get(struct intel_ring *ring) | 
| 32 | { | 
| 33 | kref_get(&ring->ref); | 
| 34 | return ring; | 
| 35 | } | 
| 36 | |
| 37 | static inline void intel_ring_put(struct intel_ring *ring) | 
| 38 | { | 
| 39 | kref_put(&ring->ref, intel_ring_free); | 
| 40 | } | 
| 41 | |
| 42 | static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) | 
| 43 | { | 
| 44 | /* Dummy function. | 
| 45 | * | 
| 46 | * This serves as a placeholder in the code so that the reader | 
| 47 | * can compare against the preceding intel_ring_begin() and | 
| 48 | * check that the number of dwords emitted matches the space | 
| 49 | * reserved for the command packet (i.e. the value passed to | 
| 50 | * intel_ring_begin()). | 
| 51 | */ | 
| 52 | GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs)((void)0); | 
| 53 | } | 
| 54 | |
| 55 | static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) | 
| 56 | { | 
| 57 | return pos & (ring->size - 1); | 
| 58 | } | 
| 59 | |
| 60 | static inline int intel_ring_direction(const struct intel_ring *ring, | 
| 61 | u32 next, u32 prev) | 
| 62 | { | 
| 63 | typecheck(typeof(ring->size), next)1; | 
| 64 | typecheck(typeof(ring->size), prev)1; | 
| 65 | return (next - prev) << ring->wrap; | 
| 66 | } | 
| 67 | |
| 68 | static inline bool_Bool | 
| 69 | intel_ring_offset_valid(const struct intel_ring *ring, | 
| 70 | unsigned int pos) | 
| 71 | { | 
| 72 | if (pos & -ring->size) /* must be strictly within the ring */ | 
| 73 | return false0; | 
| 74 | |
| 75 | if (!IS_ALIGNED(pos, 8)(((pos) & ((8) - 1)) == 0)) /* must be qword aligned */ | 
| 76 | return false0; | 
| 77 | |
| 78 | return true1; | 
| 79 | } | 
| 80 | |
| 81 | static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) | 
| 82 | { | 
| 83 | /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ | 
| 84 | u32 offset = addr - rq->ring->vaddr; | 
| 85 | GEM_BUG_ON(offset > rq->ring->size)((void)0); | 
| 86 | return intel_ring_wrap(rq->ring, offset); | 
| 87 | } | 
| 88 | |
| 89 | static inline void | 
| 90 | assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) | 
| 91 | { | 
| 92 | unsigned int head = READ_ONCE(ring->head)({ typeof(ring->head) __tmp = *(volatile typeof(ring->head ) *)&(ring->head); membar_datadep_consumer(); __tmp; } ); | 
| Value stored to 'head' during its initialization is never read | |
| 93 | |
| 94 | GEM_BUG_ON(!intel_ring_offset_valid(ring, tail))((void)0); | 
| 95 | |
| 96 | /* | 
| 97 | * "Ring Buffer Use" | 
| 98 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 | 
| 99 | * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5 | 
| 100 | * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5 | 
| 101 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the | 
| 102 | * same cacheline, the Head Pointer must not be greater than the Tail | 
| 103 | * Pointer." | 
| 104 | * | 
| 105 | * We use ring->head as the last known location of the actual RING_HEAD, | 
| 106 | * it may have advanced but in the worst case it is equally the same | 
| 107 | * as ring->head and so we should never program RING_TAIL to advance | 
| 108 | * into the same cacheline as ring->head. | 
| 109 | */ | 
| 110 | #define cacheline(a) round_down(a, CACHELINE_BYTES)(((a) / (64)) * (64)) | 
| 111 | GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head)((void)0); | 
| 112 | #undef cacheline | 
| 113 | } | 
| 114 | |
| 115 | static inline unsigned int | 
| 116 | intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) | 
| 117 | { | 
| 118 | /* Whilst writes to the tail are strictly order, there is no | 
| 119 | * serialisation between readers and the writers. The tail may be | 
| 120 | * read by i915_request_retire() just as it is being updated | 
| 121 | * by execlists, as although the breadcrumb is complete, the context | 
| 122 | * switch hasn't been seen. | 
| 123 | */ | 
| 124 | assert_ring_tail_valid(ring, tail); | 
| 125 | ring->tail = tail; | 
| 126 | return tail; | 
| 127 | } | 
| 128 | |
| 129 | static inline unsigned int | 
| 130 | __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) | 
| 131 | { | 
| 132 | /* | 
| 133 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the | 
| 134 | * same cacheline, the Head Pointer must not be greater than the Tail | 
| 135 | * Pointer." | 
| 136 | */ | 
| 137 | GEM_BUG_ON(!is_power_of_2(size))((void)0); | 
| 138 | return (head - tail - CACHELINE_BYTES64) & (size - 1); | 
| 139 | } | 
| 140 | |
| 141 | #endif /* INTEL_RING_H */ |