Merge tag 'soundwire-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
[linux-2.6-microblaze.git] / drivers / cxl / core / cdat.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation. All rights reserved. */
3 #include <linux/acpi.h>
4 #include <linux/xarray.h>
5 #include <linux/fw_table.h>
6 #include <linux/node.h>
7 #include <linux/overflow.h>
8 #include "cxlpci.h"
9 #include "cxlmem.h"
10 #include "core.h"
11 #include "cxl.h"
12
13 struct dsmas_entry {
14         struct range dpa_range;
15         u8 handle;
16         struct access_coordinate coord;
17
18         int entries;
19         int qos_class;
20 };
21
22 static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
23                               const unsigned long end)
24 {
25         struct acpi_cdat_header *hdr = &header->cdat;
26         struct acpi_cdat_dsmas *dsmas;
27         int size = sizeof(*hdr) + sizeof(*dsmas);
28         struct xarray *dsmas_xa = arg;
29         struct dsmas_entry *dent;
30         u16 len;
31         int rc;
32
33         len = le16_to_cpu((__force __le16)hdr->length);
34         if (len != size || (unsigned long)hdr + len > end) {
35                 pr_warn("Malformed DSMAS table length: (%u:%u)\n", size, len);
36                 return -EINVAL;
37         }
38
39         /* Skip common header */
40         dsmas = (struct acpi_cdat_dsmas *)(hdr + 1);
41
42         dent = kzalloc(sizeof(*dent), GFP_KERNEL);
43         if (!dent)
44                 return -ENOMEM;
45
46         dent->handle = dsmas->dsmad_handle;
47         dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
48         dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
49                               le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
50
51         rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL);
52         if (rc) {
53                 kfree(dent);
54                 return rc;
55         }
56
57         return 0;
58 }
59
60 static void cxl_access_coordinate_set(struct access_coordinate *coord,
61                                       int access, unsigned int val)
62 {
63         switch (access) {
64         case ACPI_HMAT_ACCESS_LATENCY:
65                 coord->read_latency = val;
66                 coord->write_latency = val;
67                 break;
68         case ACPI_HMAT_READ_LATENCY:
69                 coord->read_latency = val;
70                 break;
71         case ACPI_HMAT_WRITE_LATENCY:
72                 coord->write_latency = val;
73                 break;
74         case ACPI_HMAT_ACCESS_BANDWIDTH:
75                 coord->read_bandwidth = val;
76                 coord->write_bandwidth = val;
77                 break;
78         case ACPI_HMAT_READ_BANDWIDTH:
79                 coord->read_bandwidth = val;
80                 break;
81         case ACPI_HMAT_WRITE_BANDWIDTH:
82                 coord->write_bandwidth = val;
83                 break;
84         }
85 }
86
87 static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
88                                const unsigned long end)
89 {
90         struct acpi_cdat_header *hdr = &header->cdat;
91         struct acpi_cdat_dslbis *dslbis;
92         int size = sizeof(*hdr) + sizeof(*dslbis);
93         struct xarray *dsmas_xa = arg;
94         struct dsmas_entry *dent;
95         __le64 le_base;
96         __le16 le_val;
97         u64 val;
98         u16 len;
99         int rc;
100
101         len = le16_to_cpu((__force __le16)hdr->length);
102         if (len != size || (unsigned long)hdr + len > end) {
103                 pr_warn("Malformed DSLBIS table length: (%u:%u)\n", size, len);
104                 return -EINVAL;
105         }
106
107         /* Skip common header */
108         dslbis = (struct acpi_cdat_dslbis *)(hdr + 1);
109
110         /* Skip unrecognized data type */
111         if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
112                 return 0;
113
114         /* Not a memory type, skip */
115         if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY)
116                 return 0;
117
118         dent = xa_load(dsmas_xa, dslbis->handle);
119         if (!dent) {
120                 pr_warn("No matching DSMAS entry for DSLBIS entry.\n");
121                 return 0;
122         }
123
124         le_base = (__force __le64)dslbis->entry_base_unit;
125         le_val = (__force __le16)dslbis->entry[0];
126         rc = check_mul_overflow(le64_to_cpu(le_base),
127                                 le16_to_cpu(le_val), &val);
128         if (rc)
129                 pr_warn("DSLBIS value overflowed.\n");
130
131         cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
132
133         return 0;
134 }
135
136 static int cdat_table_parse_output(int rc)
137 {
138         if (rc < 0)
139                 return rc;
140         if (rc == 0)
141                 return -ENOENT;
142
143         return 0;
144 }
145
146 static int cxl_cdat_endpoint_process(struct cxl_port *port,
147                                      struct xarray *dsmas_xa)
148 {
149         int rc;
150
151         rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
152                               dsmas_xa, port->cdat.table);
153         rc = cdat_table_parse_output(rc);
154         if (rc)
155                 return rc;
156
157         rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
158                               dsmas_xa, port->cdat.table);
159         return cdat_table_parse_output(rc);
160 }
161
162 static int cxl_port_perf_data_calculate(struct cxl_port *port,
163                                         struct xarray *dsmas_xa)
164 {
165         struct access_coordinate c;
166         struct dsmas_entry *dent;
167         int valid_entries = 0;
168         unsigned long index;
169         int rc;
170
171         rc = cxl_endpoint_get_perf_coordinates(port, &c);
172         if (rc) {
173                 dev_dbg(&port->dev, "Failed to retrieve perf coordinates.\n");
174                 return rc;
175         }
176
177         struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
178
179         if (!cxl_root)
180                 return -ENODEV;
181
182         if (!cxl_root->ops || !cxl_root->ops->qos_class)
183                 return -EOPNOTSUPP;
184
185         xa_for_each(dsmas_xa, index, dent) {
186                 int qos_class;
187
188                 dent->coord.read_latency = dent->coord.read_latency +
189                                            c.read_latency;
190                 dent->coord.write_latency = dent->coord.write_latency +
191                                             c.write_latency;
192                 dent->coord.read_bandwidth = min_t(int, c.read_bandwidth,
193                                                    dent->coord.read_bandwidth);
194                 dent->coord.write_bandwidth = min_t(int, c.write_bandwidth,
195                                                     dent->coord.write_bandwidth);
196
197                 dent->entries = 1;
198                 rc = cxl_root->ops->qos_class(cxl_root, &dent->coord, 1,
199                                               &qos_class);
200                 if (rc != 1)
201                         continue;
202
203                 valid_entries++;
204                 dent->qos_class = qos_class;
205         }
206
207         if (!valid_entries)
208                 return -ENOENT;
209
210         return 0;
211 }
212
213 static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
214                               struct cxl_dpa_perf *dpa_perf)
215 {
216         dpa_perf->dpa_range = dent->dpa_range;
217         dpa_perf->coord = dent->coord;
218         dpa_perf->qos_class = dent->qos_class;
219         dev_dbg(dev,
220                 "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
221                 dent->dpa_range.start, dpa_perf->qos_class,
222                 dent->coord.read_bandwidth, dent->coord.write_bandwidth,
223                 dent->coord.read_latency, dent->coord.write_latency);
224 }
225
226 static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
227                                      struct xarray *dsmas_xa)
228 {
229         struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
230         struct device *dev = cxlds->dev;
231         struct range pmem_range = {
232                 .start = cxlds->pmem_res.start,
233                 .end = cxlds->pmem_res.end,
234         };
235         struct range ram_range = {
236                 .start = cxlds->ram_res.start,
237                 .end = cxlds->ram_res.end,
238         };
239         struct dsmas_entry *dent;
240         unsigned long index;
241
242         xa_for_each(dsmas_xa, index, dent) {
243                 if (resource_size(&cxlds->ram_res) &&
244                     range_contains(&ram_range, &dent->dpa_range))
245                         update_perf_entry(dev, dent, &mds->ram_perf);
246                 else if (resource_size(&cxlds->pmem_res) &&
247                          range_contains(&pmem_range, &dent->dpa_range))
248                         update_perf_entry(dev, dent, &mds->pmem_perf);
249                 else
250                         dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
251                                 dent->dpa_range.start);
252         }
253 }
254
255 static int match_cxlrd_qos_class(struct device *dev, void *data)
256 {
257         int dev_qos_class = *(int *)data;
258         struct cxl_root_decoder *cxlrd;
259
260         if (!is_root_decoder(dev))
261                 return 0;
262
263         cxlrd = to_cxl_root_decoder(dev);
264         if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID)
265                 return 0;
266
267         if (cxlrd->qos_class == dev_qos_class)
268                 return 1;
269
270         return 0;
271 }
272
273 static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
274 {
275         *dpa_perf = (struct cxl_dpa_perf) {
276                 .qos_class = CXL_QOS_CLASS_INVALID,
277         };
278 }
279
280 static bool cxl_qos_match(struct cxl_port *root_port,
281                           struct cxl_dpa_perf *dpa_perf)
282 {
283         if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
284                 return false;
285
286         if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
287                                    match_cxlrd_qos_class))
288                 return false;
289
290         return true;
291 }
292
293 static int match_cxlrd_hb(struct device *dev, void *data)
294 {
295         struct device *host_bridge = data;
296         struct cxl_switch_decoder *cxlsd;
297         struct cxl_root_decoder *cxlrd;
298
299         if (!is_root_decoder(dev))
300                 return 0;
301
302         cxlrd = to_cxl_root_decoder(dev);
303         cxlsd = &cxlrd->cxlsd;
304
305         guard(rwsem_read)(&cxl_region_rwsem);
306         for (int i = 0; i < cxlsd->nr_targets; i++) {
307                 if (host_bridge == cxlsd->target[i]->dport_dev)
308                         return 1;
309         }
310
311         return 0;
312 }
313
314 static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
315 {
316         struct cxl_dev_state *cxlds = cxlmd->cxlds;
317         struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
318         struct cxl_port *root_port;
319         int rc;
320
321         struct cxl_root *cxl_root __free(put_cxl_root) =
322                 find_cxl_root(cxlmd->endpoint);
323
324         if (!cxl_root)
325                 return -ENODEV;
326
327         root_port = &cxl_root->port;
328
329         /* Check that the QTG IDs are all sane between end device and root decoders */
330         if (!cxl_qos_match(root_port, &mds->ram_perf))
331                 reset_dpa_perf(&mds->ram_perf);
332         if (!cxl_qos_match(root_port, &mds->pmem_perf))
333                 reset_dpa_perf(&mds->pmem_perf);
334
335         /* Check to make sure that the device's host bridge is under a root decoder */
336         rc = device_for_each_child(&root_port->dev,
337                                    cxlmd->endpoint->host_bridge, match_cxlrd_hb);
338         if (!rc) {
339                 reset_dpa_perf(&mds->ram_perf);
340                 reset_dpa_perf(&mds->pmem_perf);
341         }
342
343         return rc;
344 }
345
346 static void discard_dsmas(struct xarray *xa)
347 {
348         unsigned long index;
349         void *ent;
350
351         xa_for_each(xa, index, ent) {
352                 xa_erase(xa, index);
353                 kfree(ent);
354         }
355         xa_destroy(xa);
356 }
357 DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T))
358
359 void cxl_endpoint_parse_cdat(struct cxl_port *port)
360 {
361         struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
362         struct cxl_dev_state *cxlds = cxlmd->cxlds;
363         struct xarray __dsmas_xa;
364         struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa;
365         int rc;
366
367         xa_init(&__dsmas_xa);
368         if (!port->cdat.table)
369                 return;
370
371         rc = cxl_cdat_endpoint_process(port, dsmas_xa);
372         if (rc < 0) {
373                 dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc);
374                 return;
375         }
376
377         rc = cxl_port_perf_data_calculate(port, dsmas_xa);
378         if (rc) {
379                 dev_dbg(&port->dev, "Failed to do perf coord calculations.\n");
380                 return;
381         }
382
383         cxl_memdev_set_qos_class(cxlds, dsmas_xa);
384         cxl_qos_class_verify(cxlmd);
385         cxl_memdev_update_perf(cxlmd);
386 }
387 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
388
389 static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
390                                const unsigned long end)
391 {
392         struct acpi_cdat_sslbis *sslbis;
393         int size = sizeof(header->cdat) + sizeof(*sslbis);
394         struct cxl_port *port = arg;
395         struct device *dev = &port->dev;
396         struct acpi_cdat_sslbe *entry;
397         int remain, entries, i;
398         u16 len;
399
400         len = le16_to_cpu((__force __le16)header->cdat.length);
401         remain = len - size;
402         if (!remain || remain % sizeof(*entry) ||
403             (unsigned long)header + len > end) {
404                 dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
405                 return -EINVAL;
406         }
407
408         /* Skip common header */
409         sslbis = (struct acpi_cdat_sslbis *)((unsigned long)header +
410                                              sizeof(header->cdat));
411
412         /* Unrecognized data type, we can skip */
413         if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
414                 return 0;
415
416         entries = remain / sizeof(*entry);
417         entry = (struct acpi_cdat_sslbe *)((unsigned long)header + sizeof(*sslbis));
418
419         for (i = 0; i < entries; i++) {
420                 u16 x = le16_to_cpu((__force __le16)entry->portx_id);
421                 u16 y = le16_to_cpu((__force __le16)entry->porty_id);
422                 __le64 le_base;
423                 __le16 le_val;
424                 struct cxl_dport *dport;
425                 unsigned long index;
426                 u16 dsp_id;
427                 u64 val;
428
429                 switch (x) {
430                 case ACPI_CDAT_SSLBIS_US_PORT:
431                         dsp_id = y;
432                         break;
433                 case ACPI_CDAT_SSLBIS_ANY_PORT:
434                         switch (y) {
435                         case ACPI_CDAT_SSLBIS_US_PORT:
436                                 dsp_id = x;
437                                 break;
438                         case ACPI_CDAT_SSLBIS_ANY_PORT:
439                                 dsp_id = ACPI_CDAT_SSLBIS_ANY_PORT;
440                                 break;
441                         default:
442                                 dsp_id = y;
443                                 break;
444                         }
445                         break;
446                 default:
447                         dsp_id = x;
448                         break;
449                 }
450
451                 le_base = (__force __le64)sslbis->entry_base_unit;
452                 le_val = (__force __le16)entry->latency_or_bandwidth;
453
454                 if (check_mul_overflow(le64_to_cpu(le_base),
455                                        le16_to_cpu(le_val), &val))
456                         dev_warn(dev, "SSLBIS value overflowed!\n");
457
458                 xa_for_each(&port->dports, index, dport) {
459                         if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
460                             dsp_id == dport->port_id)
461                                 cxl_access_coordinate_set(&dport->sw_coord,
462                                                           sslbis->data_type,
463                                                           val);
464                 }
465
466                 entry++;
467         }
468
469         return 0;
470 }
471
472 void cxl_switch_parse_cdat(struct cxl_port *port)
473 {
474         int rc;
475
476         if (!port->cdat.table)
477                 return;
478
479         rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
480                               port, port->cdat.table);
481         rc = cdat_table_parse_output(rc);
482         if (rc)
483                 dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
484 }
485 EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
486
487 MODULE_IMPORT_NS(CXL);