Merge tag 'linux-watchdog-5.15-rc1' of git://www.linux-watchdog.org/linux-watchdog
[linux-2.6-microblaze.git] / drivers / dma / idxd / perfmon.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2020 Intel Corporation. All rights rsvd. */
3
4 #include <linux/sched/task.h>
5 #include <linux/io-64-nonatomic-lo-hi.h>
6 #include "idxd.h"
7 #include "perfmon.h"
8
9 static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
10                             char *buf);
11
12 static cpumask_t                perfmon_dsa_cpu_mask;
13 static bool                     cpuhp_set_up;
14 static enum cpuhp_state         cpuhp_slot;
15
16 /*
17  * perf userspace reads this attribute to determine which cpus to open
18  * counters on.  It's connected to perfmon_dsa_cpu_mask, which is
19  * maintained by the cpu hotplug handlers.
20  */
21 static DEVICE_ATTR_RO(cpumask);
22
23 static struct attribute *perfmon_cpumask_attrs[] = {
24         &dev_attr_cpumask.attr,
25         NULL,
26 };
27
28 static struct attribute_group cpumask_attr_group = {
29         .attrs = perfmon_cpumask_attrs,
30 };
31
32 /*
33  * These attributes specify the bits in the config word that the perf
34  * syscall uses to pass the event ids and categories to perfmon.
35  */
36 DEFINE_PERFMON_FORMAT_ATTR(event_category, "config:0-3");
37 DEFINE_PERFMON_FORMAT_ATTR(event, "config:4-31");
38
39 /*
40  * These attributes specify the bits in the config1 word that the perf
41  * syscall uses to pass filter data to perfmon.
42  */
43 DEFINE_PERFMON_FORMAT_ATTR(filter_wq, "config1:0-31");
44 DEFINE_PERFMON_FORMAT_ATTR(filter_tc, "config1:32-39");
45 DEFINE_PERFMON_FORMAT_ATTR(filter_pgsz, "config1:40-43");
46 DEFINE_PERFMON_FORMAT_ATTR(filter_sz, "config1:44-51");
47 DEFINE_PERFMON_FORMAT_ATTR(filter_eng, "config1:52-59");
48
49 #define PERFMON_FILTERS_START   2
50 #define PERFMON_FILTERS_MAX     5
51
52 static struct attribute *perfmon_format_attrs[] = {
53         &format_attr_idxd_event_category.attr,
54         &format_attr_idxd_event.attr,
55         &format_attr_idxd_filter_wq.attr,
56         &format_attr_idxd_filter_tc.attr,
57         &format_attr_idxd_filter_pgsz.attr,
58         &format_attr_idxd_filter_sz.attr,
59         &format_attr_idxd_filter_eng.attr,
60         NULL,
61 };
62
63 static struct attribute_group perfmon_format_attr_group = {
64         .name = "format",
65         .attrs = perfmon_format_attrs,
66 };
67
68 static const struct attribute_group *perfmon_attr_groups[] = {
69         &perfmon_format_attr_group,
70         &cpumask_attr_group,
71         NULL,
72 };
73
74 static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
75                             char *buf)
76 {
77         return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask);
78 }
79
80 static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event)
81 {
82         return &idxd_pmu->pmu == event->pmu;
83 }
84
85 static int perfmon_collect_events(struct idxd_pmu *idxd_pmu,
86                                   struct perf_event *leader,
87                                   bool do_grp)
88 {
89         struct perf_event *event;
90         int n, max_count;
91
92         max_count = idxd_pmu->n_counters;
93         n = idxd_pmu->n_events;
94
95         if (n >= max_count)
96                 return -EINVAL;
97
98         if (is_idxd_event(idxd_pmu, leader)) {
99                 idxd_pmu->event_list[n] = leader;
100                 idxd_pmu->event_list[n]->hw.idx = n;
101                 n++;
102         }
103
104         if (!do_grp)
105                 return n;
106
107         for_each_sibling_event(event, leader) {
108                 if (!is_idxd_event(idxd_pmu, event) ||
109                     event->state <= PERF_EVENT_STATE_OFF)
110                         continue;
111
112                 if (n >= max_count)
113                         return -EINVAL;
114
115                 idxd_pmu->event_list[n] = event;
116                 idxd_pmu->event_list[n]->hw.idx = n;
117                 n++;
118         }
119
120         return n;
121 }
122
123 static void perfmon_assign_hw_event(struct idxd_pmu *idxd_pmu,
124                                     struct perf_event *event, int idx)
125 {
126         struct idxd_device *idxd = idxd_pmu->idxd;
127         struct hw_perf_event *hwc = &event->hw;
128
129         hwc->idx = idx;
130         hwc->config_base = ioread64(CNTRCFG_REG(idxd, idx));
131         hwc->event_base = ioread64(CNTRCFG_REG(idxd, idx));
132 }
133
134 static int perfmon_assign_event(struct idxd_pmu *idxd_pmu,
135                                 struct perf_event *event)
136 {
137         int i;
138
139         for (i = 0; i < IDXD_PMU_EVENT_MAX; i++)
140                 if (!test_and_set_bit(i, idxd_pmu->used_mask))
141                         return i;
142
143         return -EINVAL;
144 }
145
146 /*
147  * Check whether there are enough counters to satisfy that all the
148  * events in the group can actually be scheduled at the same time.
149  *
150  * To do this, create a fake idxd_pmu object so the event collection
151  * and assignment functions can be used without affecting the internal
152  * state of the real idxd_pmu object.
153  */
154 static int perfmon_validate_group(struct idxd_pmu *pmu,
155                                   struct perf_event *event)
156 {
157         struct perf_event *leader = event->group_leader;
158         struct idxd_pmu *fake_pmu;
159         int i, ret = 0, n, idx;
160
161         fake_pmu = kzalloc(sizeof(*fake_pmu), GFP_KERNEL);
162         if (!fake_pmu)
163                 return -ENOMEM;
164
165         fake_pmu->pmu.name = pmu->pmu.name;
166         fake_pmu->n_counters = pmu->n_counters;
167
168         n = perfmon_collect_events(fake_pmu, leader, true);
169         if (n < 0) {
170                 ret = n;
171                 goto out;
172         }
173
174         fake_pmu->n_events = n;
175         n = perfmon_collect_events(fake_pmu, event, false);
176         if (n < 0) {
177                 ret = n;
178                 goto out;
179         }
180
181         fake_pmu->n_events = n;
182
183         for (i = 0; i < n; i++) {
184                 event = fake_pmu->event_list[i];
185
186                 idx = perfmon_assign_event(fake_pmu, event);
187                 if (idx < 0) {
188                         ret = idx;
189                         goto out;
190                 }
191         }
192 out:
193         kfree(fake_pmu);
194
195         return ret;
196 }
197
198 static int perfmon_pmu_event_init(struct perf_event *event)
199 {
200         struct idxd_device *idxd;
201         int ret = 0;
202
203         idxd = event_to_idxd(event);
204         event->hw.idx = -1;
205
206         if (event->attr.type != event->pmu->type)
207                 return -ENOENT;
208
209         /* sampling not supported */
210         if (event->attr.sample_period)
211                 return -EINVAL;
212
213         if (event->cpu < 0)
214                 return -EINVAL;
215
216         if (event->pmu != &idxd->idxd_pmu->pmu)
217                 return -EINVAL;
218
219         event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd));
220         event->cpu = idxd->idxd_pmu->cpu;
221         event->hw.config = event->attr.config;
222
223         if (event->group_leader != event)
224                  /* non-group events have themselves as leader */
225                 ret = perfmon_validate_group(idxd->idxd_pmu, event);
226
227         return ret;
228 }
229
230 static inline u64 perfmon_pmu_read_counter(struct perf_event *event)
231 {
232         struct hw_perf_event *hwc = &event->hw;
233         struct idxd_device *idxd;
234         int cntr = hwc->idx;
235
236         idxd = event_to_idxd(event);
237
238         return ioread64(CNTRDATA_REG(idxd, cntr));
239 }
240
241 static void perfmon_pmu_event_update(struct perf_event *event)
242 {
243         struct idxd_device *idxd = event_to_idxd(event);
244         u64 prev_raw_count, new_raw_count, delta, p, n;
245         int shift = 64 - idxd->idxd_pmu->counter_width;
246         struct hw_perf_event *hwc = &event->hw;
247
248         do {
249                 prev_raw_count = local64_read(&hwc->prev_count);
250                 new_raw_count = perfmon_pmu_read_counter(event);
251         } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
252                         new_raw_count) != prev_raw_count);
253
254         n = (new_raw_count << shift);
255         p = (prev_raw_count << shift);
256
257         delta = ((n - p) >> shift);
258
259         local64_add(delta, &event->count);
260 }
261
262 void perfmon_counter_overflow(struct idxd_device *idxd)
263 {
264         int i, n_counters, max_loop = OVERFLOW_SIZE;
265         struct perf_event *event;
266         unsigned long ovfstatus;
267
268         n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE);
269
270         ovfstatus = ioread32(OVFSTATUS_REG(idxd));
271
272         /*
273          * While updating overflowed counters, other counters behind
274          * them could overflow and be missed in a given pass.
275          * Normally this could happen at most n_counters times, but in
276          * theory a tiny counter width could result in continual
277          * overflows and endless looping.  max_loop provides a
278          * failsafe in that highly unlikely case.
279          */
280         while (ovfstatus && max_loop--) {
281                 /* Figure out which counter(s) overflowed */
282                 for_each_set_bit(i, &ovfstatus, n_counters) {
283                         unsigned long ovfstatus_clear = 0;
284
285                         /* Update event->count for overflowed counter */
286                         event = idxd->idxd_pmu->event_list[i];
287                         perfmon_pmu_event_update(event);
288                         /* Writing 1 to OVFSTATUS bit clears it */
289                         set_bit(i, &ovfstatus_clear);
290                         iowrite32(ovfstatus_clear, OVFSTATUS_REG(idxd));
291                 }
292
293                 ovfstatus = ioread32(OVFSTATUS_REG(idxd));
294         }
295
296         /*
297          * Should never happen.  If so, it means a counter(s) looped
298          * around twice while this handler was running.
299          */
300         WARN_ON_ONCE(ovfstatus);
301 }
302
303 static inline void perfmon_reset_config(struct idxd_device *idxd)
304 {
305         iowrite32(CONFIG_RESET, PERFRST_REG(idxd));
306         iowrite32(0, OVFSTATUS_REG(idxd));
307         iowrite32(0, PERFFRZ_REG(idxd));
308 }
309
310 static inline void perfmon_reset_counters(struct idxd_device *idxd)
311 {
312         iowrite32(CNTR_RESET, PERFRST_REG(idxd));
313 }
314
315 static inline void perfmon_reset(struct idxd_device *idxd)
316 {
317         perfmon_reset_config(idxd);
318         perfmon_reset_counters(idxd);
319 }
320
321 static void perfmon_pmu_event_start(struct perf_event *event, int mode)
322 {
323         u32 flt_wq, flt_tc, flt_pg_sz, flt_xfer_sz, flt_eng = 0;
324         u64 cntr_cfg, cntrdata, event_enc, event_cat = 0;
325         struct hw_perf_event *hwc = &event->hw;
326         union filter_cfg flt_cfg;
327         union event_cfg event_cfg;
328         struct idxd_device *idxd;
329         int cntr;
330
331         idxd = event_to_idxd(event);
332
333         event->hw.idx = hwc->idx;
334         cntr = hwc->idx;
335
336         /* Obtain event category and event value from user space */
337         event_cfg.val = event->attr.config;
338         flt_cfg.val = event->attr.config1;
339         event_cat = event_cfg.event_cat;
340         event_enc = event_cfg.event_enc;
341
342         /* Obtain filter configuration from user space */
343         flt_wq = flt_cfg.wq;
344         flt_tc = flt_cfg.tc;
345         flt_pg_sz = flt_cfg.pg_sz;
346         flt_xfer_sz = flt_cfg.xfer_sz;
347         flt_eng = flt_cfg.eng;
348
349         if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters))
350                 iowrite32(flt_wq, FLTCFG_REG(idxd, cntr, FLT_WQ));
351         if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters))
352                 iowrite32(flt_tc, FLTCFG_REG(idxd, cntr, FLT_TC));
353         if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters))
354                 iowrite32(flt_pg_sz, FLTCFG_REG(idxd, cntr, FLT_PG_SZ));
355         if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters))
356                 iowrite32(flt_xfer_sz, FLTCFG_REG(idxd, cntr, FLT_XFER_SZ));
357         if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters))
358                 iowrite32(flt_eng, FLTCFG_REG(idxd, cntr, FLT_ENG));
359
360         /* Read the start value */
361         cntrdata = ioread64(CNTRDATA_REG(idxd, cntr));
362         local64_set(&event->hw.prev_count, cntrdata);
363
364         /* Set counter to event/category */
365         cntr_cfg = event_cat << CNTRCFG_CATEGORY_SHIFT;
366         cntr_cfg |= event_enc << CNTRCFG_EVENT_SHIFT;
367         /* Set interrupt on overflow and counter enable bits */
368         cntr_cfg |= (CNTRCFG_IRQ_OVERFLOW | CNTRCFG_ENABLE);
369
370         iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
371 }
372
373 static void perfmon_pmu_event_stop(struct perf_event *event, int mode)
374 {
375         struct hw_perf_event *hwc = &event->hw;
376         struct idxd_device *idxd;
377         int i, cntr = hwc->idx;
378         u64 cntr_cfg;
379
380         idxd = event_to_idxd(event);
381
382         /* remove this event from event list */
383         for (i = 0; i < idxd->idxd_pmu->n_events; i++) {
384                 if (event != idxd->idxd_pmu->event_list[i])
385                         continue;
386
387                 for (++i; i < idxd->idxd_pmu->n_events; i++)
388                         idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i];
389                 --idxd->idxd_pmu->n_events;
390                 break;
391         }
392
393         cntr_cfg = ioread64(CNTRCFG_REG(idxd, cntr));
394         cntr_cfg &= ~CNTRCFG_ENABLE;
395         iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
396
397         if (mode == PERF_EF_UPDATE)
398                 perfmon_pmu_event_update(event);
399
400         event->hw.idx = -1;
401         clear_bit(cntr, idxd->idxd_pmu->used_mask);
402 }
403
404 static void perfmon_pmu_event_del(struct perf_event *event, int mode)
405 {
406         perfmon_pmu_event_stop(event, PERF_EF_UPDATE);
407 }
408
409 static int perfmon_pmu_event_add(struct perf_event *event, int flags)
410 {
411         struct idxd_device *idxd = event_to_idxd(event);
412         struct idxd_pmu *idxd_pmu = idxd->idxd_pmu;
413         struct hw_perf_event *hwc = &event->hw;
414         int idx, n;
415
416         n = perfmon_collect_events(idxd_pmu, event, false);
417         if (n < 0)
418                 return n;
419
420         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
421         if (!(flags & PERF_EF_START))
422                 hwc->state |= PERF_HES_ARCH;
423
424         idx = perfmon_assign_event(idxd_pmu, event);
425         if (idx < 0)
426                 return idx;
427
428         perfmon_assign_hw_event(idxd_pmu, event, idx);
429
430         if (flags & PERF_EF_START)
431                 perfmon_pmu_event_start(event, 0);
432
433         idxd_pmu->n_events = n;
434
435         return 0;
436 }
437
438 static void enable_perfmon_pmu(struct idxd_device *idxd)
439 {
440         iowrite32(COUNTER_UNFREEZE, PERFFRZ_REG(idxd));
441 }
442
443 static void disable_perfmon_pmu(struct idxd_device *idxd)
444 {
445         iowrite32(COUNTER_FREEZE, PERFFRZ_REG(idxd));
446 }
447
448 static void perfmon_pmu_enable(struct pmu *pmu)
449 {
450         struct idxd_device *idxd = pmu_to_idxd(pmu);
451
452         enable_perfmon_pmu(idxd);
453 }
454
455 static void perfmon_pmu_disable(struct pmu *pmu)
456 {
457         struct idxd_device *idxd = pmu_to_idxd(pmu);
458
459         disable_perfmon_pmu(idxd);
460 }
461
462 static void skip_filter(int i)
463 {
464         int j;
465
466         for (j = i; j < PERFMON_FILTERS_MAX; j++)
467                 perfmon_format_attrs[PERFMON_FILTERS_START + j] =
468                         perfmon_format_attrs[PERFMON_FILTERS_START + j + 1];
469 }
470
471 static void idxd_pmu_init(struct idxd_pmu *idxd_pmu)
472 {
473         int i;
474
475         for (i = 0 ; i < PERFMON_FILTERS_MAX; i++) {
476                 if (!test_bit(i, &idxd_pmu->supported_filters))
477                         skip_filter(i);
478         }
479
480         idxd_pmu->pmu.name              = idxd_pmu->name;
481         idxd_pmu->pmu.attr_groups       = perfmon_attr_groups;
482         idxd_pmu->pmu.task_ctx_nr       = perf_invalid_context;
483         idxd_pmu->pmu.event_init        = perfmon_pmu_event_init;
484         idxd_pmu->pmu.pmu_enable        = perfmon_pmu_enable,
485         idxd_pmu->pmu.pmu_disable       = perfmon_pmu_disable,
486         idxd_pmu->pmu.add               = perfmon_pmu_event_add;
487         idxd_pmu->pmu.del               = perfmon_pmu_event_del;
488         idxd_pmu->pmu.start             = perfmon_pmu_event_start;
489         idxd_pmu->pmu.stop              = perfmon_pmu_event_stop;
490         idxd_pmu->pmu.read              = perfmon_pmu_event_update;
491         idxd_pmu->pmu.capabilities      = PERF_PMU_CAP_NO_EXCLUDE;
492         idxd_pmu->pmu.module            = THIS_MODULE;
493 }
494
495 void perfmon_pmu_remove(struct idxd_device *idxd)
496 {
497         if (!idxd->idxd_pmu)
498                 return;
499
500         cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node);
501         perf_pmu_unregister(&idxd->idxd_pmu->pmu);
502         kfree(idxd->idxd_pmu);
503         idxd->idxd_pmu = NULL;
504 }
505
506 static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node)
507 {
508         struct idxd_pmu *idxd_pmu;
509
510         idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
511
512         /* select the first online CPU as the designated reader */
513         if (cpumask_empty(&perfmon_dsa_cpu_mask)) {
514                 cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask);
515                 idxd_pmu->cpu = cpu;
516         }
517
518         return 0;
519 }
520
521 static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
522 {
523         struct idxd_pmu *idxd_pmu;
524         unsigned int target;
525
526         idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
527
528         if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask))
529                 return 0;
530
531         target = cpumask_any_but(cpu_online_mask, cpu);
532
533         /* migrate events if there is a valid target */
534         if (target < nr_cpu_ids)
535                 cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
536         else
537                 target = -1;
538
539         perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
540
541         return 0;
542 }
543
544 int perfmon_pmu_init(struct idxd_device *idxd)
545 {
546         union idxd_perfcap perfcap;
547         struct idxd_pmu *idxd_pmu;
548         int rc = -ENODEV;
549
550         /*
551          * perfmon module initialization failed, nothing to do
552          */
553         if (!cpuhp_set_up)
554                 return -ENODEV;
555
556         /*
557          * If perfmon_offset or num_counters is 0, it means perfmon is
558          * not supported on this hardware.
559          */
560         if (idxd->perfmon_offset == 0)
561                 return -ENODEV;
562
563         idxd_pmu = kzalloc(sizeof(*idxd_pmu), GFP_KERNEL);
564         if (!idxd_pmu)
565                 return -ENOMEM;
566
567         idxd_pmu->idxd = idxd;
568         idxd->idxd_pmu = idxd_pmu;
569
570         if (idxd->data->type == IDXD_TYPE_DSA) {
571                 rc = sprintf(idxd_pmu->name, "dsa%d", idxd->id);
572                 if (rc < 0)
573                         goto free;
574         } else if (idxd->data->type == IDXD_TYPE_IAX) {
575                 rc = sprintf(idxd_pmu->name, "iax%d", idxd->id);
576                 if (rc < 0)
577                         goto free;
578         } else {
579                 goto free;
580         }
581
582         perfmon_reset(idxd);
583
584         perfcap.bits = ioread64(PERFCAP_REG(idxd));
585
586         /*
587          * If total perf counter is 0, stop further registration.
588          * This is necessary in order to support driver running on
589          * guest which does not have pmon support.
590          */
591         if (perfcap.num_perf_counter == 0)
592                 goto free;
593
594         /* A counter width of 0 means it can't count */
595         if (perfcap.counter_width == 0)
596                 goto free;
597
598         /* Overflow interrupt and counter freeze support must be available */
599         if (!perfcap.overflow_interrupt || !perfcap.counter_freeze)
600                 goto free;
601
602         /* Number of event categories cannot be 0 */
603         if (perfcap.num_event_category == 0)
604                 goto free;
605
606         /*
607          * We don't support per-counter capabilities for now.
608          */
609         if (perfcap.cap_per_counter)
610                 goto free;
611
612         idxd_pmu->n_event_categories = perfcap.num_event_category;
613         idxd_pmu->supported_event_categories = perfcap.global_event_category;
614         idxd_pmu->per_counter_caps_supported = perfcap.cap_per_counter;
615
616         /* check filter capability.  If 0, then filters are not supported */
617         idxd_pmu->supported_filters = perfcap.filter;
618         if (perfcap.filter)
619                 idxd_pmu->n_filters = hweight8(perfcap.filter);
620
621         /* Store the total number of counters categories, and counter width */
622         idxd_pmu->n_counters = perfcap.num_perf_counter;
623         idxd_pmu->counter_width = perfcap.counter_width;
624
625         idxd_pmu_init(idxd_pmu);
626
627         rc = perf_pmu_register(&idxd_pmu->pmu, idxd_pmu->name, -1);
628         if (rc)
629                 goto free;
630
631         rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node);
632         if (rc) {
633                 perf_pmu_unregister(&idxd->idxd_pmu->pmu);
634                 goto free;
635         }
636 out:
637         return rc;
638 free:
639         kfree(idxd_pmu);
640         idxd->idxd_pmu = NULL;
641
642         goto out;
643 }
644
645 void __init perfmon_init(void)
646 {
647         int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
648                                          "driver/dma/idxd/perf:online",
649                                          perf_event_cpu_online,
650                                          perf_event_cpu_offline);
651         if (WARN_ON(rc < 0))
652                 return;
653
654         cpuhp_slot = rc;
655         cpuhp_set_up = true;
656 }
657
658 void __exit perfmon_exit(void)
659 {
660         if (cpuhp_set_up)
661                 cpuhp_remove_multi_state(cpuhp_slot);
662 }