1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
6 #include <net/pkt_sched.h>
8 /* save one operation at the end for additional operation at list change */
9 #define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1)
11 static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt)
16 if (!qopt->cycle_time)
18 if (qopt->num_entries > TSNEP_MAX_GCL_NUM)
21 for (i = 0; i < qopt->num_entries; i++) {
22 if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
24 if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK)
26 if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL)
28 cycle_time += qopt->entries[i].interval;
30 if (qopt->cycle_time != cycle_time)
32 if (qopt->cycle_time_extension >= qopt->cycle_time)
38 static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index,
39 u32 properties, u32 interval, bool flush)
41 void __iomem *addr = gcl->addr +
42 sizeof(struct tsnep_gcl_operation) * index;
44 gcl->operation[index].properties = properties;
45 gcl->operation[index].interval = interval;
47 iowrite32(properties, addr);
48 iowrite32(interval, addr + sizeof(u32));
51 /* flush write with read access */
56 static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index)
61 /* change needs to be triggered one or two operations before start of
62 * new gate control list
63 * - change is triggered at start of operation (minimum one operation)
64 * - operation with adjusted interval is inserted on demand to exactly
65 * meet the start of the new gate control list (optional)
67 * additionally properties are read directly after start of previous
70 * therefore, three operations needs to be considered for the limit
75 duration += gcl->operation[index].interval;
79 index = gcl->count - 1;
87 static void tsnep_write_gcl(struct tsnep_gcl *gcl,
88 struct tc_taprio_qopt_offload *qopt)
95 gcl->base_time = ktime_to_ns(qopt->base_time);
96 gcl->cycle_time = qopt->cycle_time;
97 gcl->cycle_time_extension = qopt->cycle_time_extension;
99 for (i = 0; i < qopt->num_entries; i++) {
100 properties = qopt->entries[i].gate_mask;
101 if (i == (qopt->num_entries - 1))
102 properties |= TSNEP_GCL_LAST;
104 tsnep_write_gcl_operation(gcl, i, properties,
105 qopt->entries[i].interval, true);
107 gcl->count = qopt->num_entries;
109 /* calculate change limit; i.e., the time needed between enable and
110 * start of new gate control list
113 /* case 1: extend cycle time for change
114 * - change duration of last operation
115 * - cycle time extension
117 extend = tsnep_change_duration(gcl, gcl->count - 1);
118 extend += gcl->cycle_time_extension;
120 /* case 2: cut cycle time for change
121 * - maximum change duration
124 for (i = 0; i < gcl->count; i++)
125 cut = max(cut, tsnep_change_duration(gcl, i));
127 /* use maximum, because the actual case (extend or cut) can be
128 * determined only after limit is known (chicken-and-egg problem)
130 gcl->change_limit = max(extend, cut);
133 static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit)
135 u64 start = gcl->base_time;
138 if (start <= limit) {
139 n = div64_u64(limit - start, gcl->cycle_time);
140 start += (n + 1) * gcl->cycle_time;
146 static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit)
148 u64 start = gcl->base_time;
151 n = div64_u64(limit - start, gcl->cycle_time);
152 start += n * gcl->cycle_time;
154 start -= gcl->cycle_time;
159 static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change,
162 /* previous operation triggers change and properties are evaluated at
166 index = gcl->count - 1;
169 change -= gcl->operation[index].interval;
171 /* optionally change to new list with additional operation in between */
173 void __iomem *addr = gcl->addr +
174 sizeof(struct tsnep_gcl_operation) * index;
176 gcl->operation[index].properties |= TSNEP_GCL_INSERT;
177 iowrite32(gcl->operation[index].properties, addr);
183 static void tsnep_clean_gcl(struct tsnep_gcl *gcl)
186 u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK;
189 /* search for insert operation and reset properties */
190 for (i = 0; i < gcl->count; i++) {
191 if (gcl->operation[i].properties & ~mask) {
193 sizeof(struct tsnep_gcl_operation) * i;
195 gcl->operation[i].properties &= mask;
196 iowrite32(gcl->operation[i].properties, addr);
203 static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref,
204 u64 change, u32 interval)
208 properties = gcl->operation[ref].properties & TSNEP_GCL_MASK;
209 /* change to new list directly after inserted operation */
210 properties |= TSNEP_GCL_CHANGE;
212 /* last operation of list is reserved to insert operation */
213 tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties,
216 return tsnep_set_gcl_change(gcl, ref, change, true);
219 static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension)
221 int ref = gcl->count - 1;
222 u32 interval = gcl->operation[ref].interval + extension;
224 start -= gcl->operation[ref].interval;
226 return tsnep_insert_gcl_operation(gcl, ref, start, interval);
229 static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time)
234 /* find operation which shall be cutted */
235 for (i = 0; i < gcl->count; i++) {
236 u64 sum_tmp = sum + gcl->operation[i].interval;
239 /* sum up operations as long as cycle time is not exceeded */
240 if (sum_tmp > cycle_time)
243 /* remaining interval must be big enough for hardware */
244 interval = cycle_time - sum_tmp;
245 if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL)
250 if (sum == cycle_time) {
251 /* no need to cut operation itself or whole cycle
252 * => change exactly at operation
254 return tsnep_set_gcl_change(gcl, i, start + sum, false);
256 return tsnep_insert_gcl_operation(gcl, i, start + sum,
260 static int tsnep_enable_gcl(struct tsnep_adapter *adapter,
261 struct tsnep_gcl *gcl, struct tsnep_gcl *curr)
267 /* estimate timeout limit after timeout enable, actually timeout limit
268 * in hardware will be earlier than estimate so we are on the safe side
270 tsnep_get_system_time(adapter, &system_time);
271 timeout = system_time + TSNEP_GC_TIMEOUT;
274 limit = timeout + curr->change_limit;
278 gcl->start_time = tsnep_gcl_start_after(gcl, limit);
280 /* gate control time register is only 32bit => time shall be in the near
281 * future (no driver support for far future implemented)
283 if ((gcl->start_time - system_time) >= U32_MAX)
287 /* change gate control list */
291 last = tsnep_gcl_start_before(curr, gcl->start_time);
292 if ((last + curr->cycle_time) == gcl->start_time)
293 change = tsnep_cut_gcl(curr, last,
294 gcl->start_time - last);
295 else if (((gcl->start_time - last) <=
296 curr->cycle_time_extension) ||
297 ((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL))
298 change = tsnep_extend_gcl(curr, last,
299 gcl->start_time - last);
301 change = tsnep_cut_gcl(curr, last,
302 gcl->start_time - last);
304 WARN_ON(change <= timeout);
306 iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE);
308 /* start gate control list */
309 WARN_ON(gcl->start_time <= timeout);
311 iowrite32(gcl->start_time & 0xFFFFFFFF,
312 adapter->addr + TSNEP_GC_TIME);
318 static int tsnep_taprio(struct tsnep_adapter *adapter,
319 struct tc_taprio_qopt_offload *qopt)
321 struct tsnep_gcl *gcl;
322 struct tsnep_gcl *curr;
325 if (!adapter->gate_control)
329 /* disable gate control if active */
330 mutex_lock(&adapter->gate_control_lock);
332 if (adapter->gate_control_active) {
333 iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
334 adapter->gate_control_active = false;
337 mutex_unlock(&adapter->gate_control_lock);
342 retval = tsnep_validate_gcl(qopt);
346 mutex_lock(&adapter->gate_control_lock);
348 gcl = &adapter->gcl[adapter->next_gcl];
349 tsnep_write_gcl(gcl, qopt);
351 /* select current gate control list if active */
352 if (adapter->gate_control_active) {
353 if (adapter->next_gcl == 0)
354 curr = &adapter->gcl[1];
356 curr = &adapter->gcl[0];
362 /* start timeout which discards late enable, this helps ensuring
363 * that start/change time are in the future at enable
365 iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
367 retval = tsnep_enable_gcl(adapter, gcl, curr);
369 mutex_unlock(&adapter->gate_control_lock);
374 /* enable gate control list */
375 if (adapter->next_gcl == 0)
376 iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
378 iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC);
380 /* done if timeout did not happen */
381 if (!(ioread32(adapter->addr + TSNEP_GC) &
382 TSNEP_GC_TIMEOUT_SIGNAL))
385 /* timeout is acknowledged with any enable */
386 iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
389 tsnep_clean_gcl(curr);
391 /* retry because of timeout */
394 adapter->gate_control_active = true;
396 if (adapter->next_gcl == 0)
397 adapter->next_gcl = 1;
399 adapter->next_gcl = 0;
401 mutex_unlock(&adapter->gate_control_lock);
406 int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
409 struct tsnep_adapter *adapter = netdev_priv(netdev);
412 case TC_SETUP_QDISC_TAPRIO:
413 return tsnep_taprio(adapter, type_data);
419 int tsnep_tc_init(struct tsnep_adapter *adapter)
421 if (!adapter->gate_control)
425 iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
426 iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC);
428 adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A;
429 adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B;
434 void tsnep_tc_cleanup(struct tsnep_adapter *adapter)
436 if (!adapter->gate_control)
439 if (adapter->gate_control_active) {
440 iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
441 adapter->gate_control_active = false;