2 # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3 """Convert directories of JSON events to C code."""
6 from functools import lru_cache
11 from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
14 # Global command line arguments.
16 # List of regular event tables.
18 # List of event tables generated from "/sys" directories.
19 _sys_event_tables = []
20 # List of regular metric tables.
22 # List of metric tables generated from "/sys" directories.
23 _sys_metric_tables = []
24 # Mapping between sys event table names and sys metric table names.
25 _sys_event_table_to_metric_table_mapping = {}
26 # Map from an event name to an architecture standard
27 # JsonEvent. Architecture standard events are in json files in the top
28 # f'{_args.starting_dir}/{_args.arch}' directory.
30 # Events to write out when the table is closed
32 # Name of events table to be written out
33 _pending_events_tblname = None
34 # Metrics to write out when the table is closed
36 # Name of metrics table to be written out
37 _pending_metrics_tblname = None
38 # Global BigCString shared by all structures.
40 # Map from the name of a metric group to a description of the group.
42 # Order specific JsonEvent attributes will be visited.
43 _json_event_attributes = [
44 # cmp_sevent related attributes.
45 'name', 'topic', 'desc',
46 # Seems useful, put it early.
48 # Short things in alphabetical order.
49 'compat', 'deprecated', 'perpkg', 'unit',
50 # Longer things (the last won't be iterated over during decompress).
54 # Attributes that are in pmu_metric rather than pmu_event.
55 _json_metric_attributes = [
56 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
57 'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group',
58 'default_metricgroup_name', 'aggr_mode', 'event_grouping'
60 # Attributes that are bools or enum int values, encoded as '0', '1',...
61 _json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
63 def removesuffix(s: str, suffix: str) -> str:
64 """Remove the suffix from a string
66 The removesuffix function is added to str in Python 3.9. We aim for 3.6
67 compatibility and so provide our own function here.
69 return s[0:-len(suffix)] if s.endswith(suffix) else s
72 def file_name_to_table_name(prefix: str, parents: Sequence[str],
74 """Generate a C table name from directory names."""
78 tblname += '_' + dirname
79 return tblname.replace('-', '_')
82 def c_len(s: str) -> int:
83 """Return the length of s a C string
85 This doesn't handle all escape characters properly. It first assumes
86 all \\ are for escaping, it then adjusts as it will have over counted
87 \\. The code uses \000 rather than \0 as a terminator as an adjacent
88 number would be folded into a string of \0 (ie. "\0" + "5" doesn't
89 equal a terminator followed by the number 5 but the escape of
90 \05). The code adjusts for \000 but not properly for all octal, hex
94 utf = s.encode(encoding='utf-8',errors='strict')
96 print(f'broken string {s}')
98 return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
101 """A class to hold many strings concatenated together.
103 Generating a large number of stand-alone C strings creates a large
104 number of relocations in position independent code. The BigCString
105 is a helper for this case. It builds a single string which within it
106 are all the other C strings (to avoid memory issues the string
107 itself is held as a list of strings). The offsets within the big
108 string are recorded and when stored to disk these don't need
109 relocation. To reduce the size of the string further, identical
110 strings are merged. If a longer string ends-with the same value as a
111 shorter string, these entries are also merged.
114 big_string: Sequence[str]
115 offsets: Dict[str, int]
117 insert_point: Dict[str, int]
122 self.insert_number = 0;
123 self.insert_point = {}
126 def add(self, s: str, metric: bool) -> None:
127 """Called to add to the big string."""
128 if s not in self.strings:
130 self.insert_point[s] = self.insert_number
131 self.insert_number += 1
135 def compute(self) -> None:
136 """Called once all strings are added to compute the string and offsets."""
139 # Determine if two strings can be folded, ie. let 1 string use the
140 # end of another. First reverse all strings and sort them.
141 sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
143 # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
144 # for each string to see if there is a better candidate to fold it
145 # into, in the example rather than using 'yz' we can use'xyz' at
146 # an offset of 1. We record which string can be folded into which
147 # in folded_strings, we don't need to record the offset as it is
148 # trivially computed from the string lengths.
149 for pos,s in enumerate(sorted_reversed_strings):
151 for check_pos in range(pos + 1, len(sorted_reversed_strings)):
152 if sorted_reversed_strings[check_pos].startswith(s):
157 folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
159 # Compute reverse mappings for debugging.
160 fold_into_strings = collections.defaultdict(set)
161 for key, val in folded_strings.items():
163 fold_into_strings[val].add(key)
165 # big_string_offset is the current location within the C string
166 # being appended to - comments, etc. don't count. big_string is
167 # the string contents represented as a list. Strings are immutable
168 # in Python and so appending to one causes memory issues, while
170 big_string_offset = 0
174 def string_cmp_key(s: str) -> Tuple[bool, int, str]:
175 return (s in self.metrics, self.insert_point[s], s)
177 # Emit all strings that aren't folded in a sorted manner.
178 for s in sorted(self.strings, key=string_cmp_key):
179 if s not in folded_strings:
180 self.offsets[s] = big_string_offset
181 self.big_string.append(f'/* offset={big_string_offset} */ "')
182 self.big_string.append(s)
183 self.big_string.append('"')
184 if s in fold_into_strings:
185 self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
186 self.big_string.append('\n')
187 big_string_offset += c_len(s)
190 # Compute the offsets of the folded strings.
191 for s in folded_strings.keys():
192 assert s not in self.offsets
193 folded_s = folded_strings[s]
194 self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
199 """Representation of an event loaded from a json file dictionary."""
201 def __init__(self, jd: dict):
202 """Constructor passed the dictionary of parsed json values."""
204 def llx(x: int) -> str:
205 """Convert an int to a string similar to a printf modifier of %#llx."""
206 return '0' if x == 0 else hex(x)
208 def fixdesc(s: str) -> str:
209 """Fix formatting issue for the desc string."""
212 return removesuffix(removesuffix(removesuffix(s, '. '),
213 '. '), '.').replace('\n', '\\n').replace(
214 '\"', '\\"').replace('\r', '\\r')
216 def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
217 """Returns the aggr_mode_class enum value associated with the JSON string."""
220 aggr_mode_to_enum = {
224 return aggr_mode_to_enum[aggr_mode]
226 def convert_metric_constraint(metric_constraint: str) -> Optional[str]:
227 """Returns the metric_event_groups enum value associated with the JSON string."""
228 if not metric_constraint:
230 metric_constraint_to_enum = {
231 'NO_GROUP_EVENTS': '1',
232 'NO_GROUP_EVENTS_NMI': '2',
233 'NO_NMI_WATCHDOG': '2',
234 'NO_GROUP_EVENTS_SMT': '3',
236 return metric_constraint_to_enum[metric_constraint]
238 def lookup_msr(num: str) -> Optional[str]:
239 """Converts the msr number, or first in a list to the appropriate event field."""
244 0x1A6: 'offcore_rsp=',
245 0x1A7: 'offcore_rsp=',
248 return msrmap[int(num.split(',', 1)[0], 0)]
250 def real_event(name: str, event: str) -> Optional[str]:
251 """Convert well known event names to an event string otherwise use the event argument."""
253 'inst_retired.any': 'event=0xc0,period=2000003',
254 'inst_retired.any_p': 'event=0xc0,period=2000003',
255 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
256 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
257 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
258 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
262 if name.lower() in fixed:
263 return fixed[name.lower()]
266 def unit_to_pmu(unit: str) -> Optional[str]:
267 """Convert a JSON Unit to Linux PMU name."""
269 return 'default_core'
270 # Comment brought over from jevents.c:
271 # it's not realistic to keep adding these, we need something more scalable ...
273 'CBO': 'uncore_cbox',
274 'QPI LL': 'uncore_qpi',
275 'SBO': 'uncore_sbox',
276 'iMPH-U': 'uncore_arb',
277 'CPU-M-CF': 'cpum_cf',
278 'CPU-M-SF': 'cpum_sf',
279 'PAI-CRYPTO' : 'pai_crypto',
280 'PAI-EXT' : 'pai_ext',
281 'UPI LL': 'uncore_upi',
282 'hisi_sicl,cpa': 'hisi_sicl,cpa',
283 'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
284 'hisi_sccl,hha': 'hisi_sccl,hha',
285 'hisi_sccl,l3c': 'hisi_sccl,l3c',
286 'imx8_ddr': 'imx8_ddr',
290 'cpu_core': 'cpu_core',
291 'cpu_atom': 'cpu_atom',
292 'ali_drw': 'ali_drw',
293 'arm_cmn': 'arm_cmn',
295 return table[unit] if unit in table else f'uncore_{unit.lower()}'
298 if 'EventCode' in jd:
299 eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
301 eventcode |= int(jd['ExtSel']) << 8
302 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
303 eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None
304 self.name = jd['EventName'].lower() if 'EventName' in jd else None
306 self.compat = jd.get('Compat')
307 self.desc = fixdesc(jd.get('BriefDescription'))
308 self.long_desc = fixdesc(jd.get('PublicDescription'))
309 precise = jd.get('PEBS')
310 msr = lookup_msr(jd.get('MSRIndex'))
311 msrval = jd.get('MSRValue')
314 extra_desc += ' Supports address when precise'
318 extra_desc += ' Spec update: ' + jd['Errata']
319 self.pmu = unit_to_pmu(jd.get('Unit'))
320 filter = jd.get('Filter')
321 self.unit = jd.get('ScaleUnit')
322 self.perpkg = jd.get('PerPkg')
323 self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
324 self.deprecated = jd.get('Deprecated')
325 self.metric_name = jd.get('MetricName')
326 self.metric_group = jd.get('MetricGroup')
327 self.metricgroup_no_group = jd.get('MetricgroupNoGroup')
328 self.default_metricgroup_name = jd.get('DefaultMetricgroupName')
329 self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
330 self.metric_expr = None
331 if 'MetricExpr' in jd:
332 self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
333 # Note, the metric formula for the threshold isn't parsed as the &
334 # and > have incorrect precedence.
335 self.metric_threshold = jd.get('MetricThreshold')
337 arch_std = jd.get('ArchStdEvent')
338 if precise and self.desc and '(Precise Event)' not in self.desc:
339 extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
342 if configcode is not None:
343 event = f'config={llx(configcode)}'
344 elif eventidcode is not None:
345 event = f'eventid={llx(eventidcode)}'
347 event = f'event={llx(eventcode)}'
349 ('AnyThread', 'any='),
350 ('PortMask', 'ch_mask='),
351 ('CounterMask', 'cmask='),
352 ('EdgeDetect', 'edge='),
353 ('FCMask', 'fc_mask='),
355 ('SampleAfterValue', 'period='),
357 ('NodeType', 'type='),
358 ('RdWrMask', 'rdwrmask='),
360 for key, value in event_fields:
361 if key in jd and jd[key] != '0':
362 event += ',' + value + jd[key]
364 event += f',{filter}'
366 event += f',{msr}{msrval}'
367 if self.desc and extra_desc:
368 self.desc += extra_desc
369 if self.long_desc and extra_desc:
370 self.long_desc += extra_desc
372 if arch_std.lower() in _arch_std_events:
373 event = _arch_std_events[arch_std.lower()].event
374 # Copy from the architecture standard event to self for undefined fields.
375 for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
376 if hasattr(self, attr) and not getattr(self, attr):
377 setattr(self, attr, value)
379 raise argparse.ArgumentTypeError('Cannot find arch std event:', arch_std)
381 self.event = real_event(self.name, event)
383 def __repr__(self) -> str:
384 """String representation primarily for debugging."""
386 for attr, value in self.__dict__.items():
388 s += f'\t{attr} = {value},\n'
391 def build_c_string(self, metric: bool) -> str:
393 for attr in _json_metric_attributes if metric else _json_event_attributes:
394 x = getattr(self, attr)
395 if metric and x and attr == 'metric_expr':
396 # Convert parsed metric expressions into a string. Slashes
397 # must be doubled in the file.
398 x = x.ToPerfJson().replace('\\', '\\\\')
399 if metric and x and attr == 'metric_threshold':
400 x = x.replace('\\', '\\\\')
401 if attr in _json_enum_attributes:
404 s += f'{x}\\000' if x else '\\000'
407 def to_c_string(self, metric: bool) -> str:
408 """Representation of the event as a C struct initializer."""
410 s = self.build_c_string(metric)
411 return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
414 @lru_cache(maxsize=None)
415 def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
416 """Read json events from the specified file."""
418 events = json.load(open(path), object_hook=JsonEvent)
419 except BaseException as err:
420 print(f"Exception processing {path}")
422 metrics: list[Tuple[str, str, metric.Expression]] = []
425 if event.metric_name and '-' not in event.metric_name:
426 metrics.append((event.pmu, event.metric_name, event.metric_expr))
427 updates = metric.RewriteMetricsInTermsOfOthers(metrics)
430 if event.metric_name in updates:
431 # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
432 # f'to\n"{updates[event.metric_name]}"')
433 event.metric_expr = updates[event.metric_name]
437 def preprocess_arch_std_files(archpath: str) -> None:
438 """Read in all architecture standard events."""
439 global _arch_std_events
440 for item in os.scandir(archpath):
441 if item.is_file() and item.name.endswith('.json'):
442 for event in read_json_events(item.path, topic=''):
444 _arch_std_events[event.name.lower()] = event
445 if event.metric_name:
446 _arch_std_events[event.metric_name.lower()] = event
449 def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
450 """Add contents of file to _pending_events table."""
451 for e in read_json_events(item.path, topic):
453 _pending_events.append(e)
455 _pending_metrics.append(e)
458 def print_pending_events() -> None:
459 """Optionally close events table."""
461 def event_cmp_key(j: JsonEvent) -> Tuple[str, str, bool, str, str]:
462 def fix_none(s: Optional[str]) -> str:
467 return (fix_none(j.pmu).replace(',','_'), fix_none(j.name), j.desc is not None, fix_none(j.topic),
468 fix_none(j.metric_name))
470 global _pending_events
471 if not _pending_events:
474 global _pending_events_tblname
475 if _pending_events_tblname.endswith('_sys'):
476 global _sys_event_tables
477 _sys_event_tables.append(_pending_events_tblname)
480 _event_tables.append(_pending_events_tblname)
485 for event in sorted(_pending_events, key=event_cmp_key):
486 if event.pmu != last_pmu:
488 _args.output_file.write('};\n')
489 pmu_name = event.pmu.replace(',', '_')
490 _args.output_file.write(
491 f'static const struct compact_pmu_event {_pending_events_tblname}_{pmu_name}[] = {{\n')
494 pmus.add((event.pmu, pmu_name))
496 _args.output_file.write(event.to_c_string(metric=False))
499 _args.output_file.write(f"""
502 const struct pmu_table_entry {_pending_events_tblname}[] = {{
504 for (pmu, tbl_pmu) in sorted(pmus):
505 pmu_name = f"{pmu}\\000"
506 _args.output_file.write(f"""{{
507 .entries = {_pending_events_tblname}_{tbl_pmu},
508 .num_entries = ARRAY_SIZE({_pending_events_tblname}_{tbl_pmu}),
509 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
512 _args.output_file.write('};\n\n')
514 def print_pending_metrics() -> None:
515 """Optionally close metrics table."""
517 def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
518 def fix_none(s: Optional[str]) -> str:
523 return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
525 global _pending_metrics
526 if not _pending_metrics:
529 global _pending_metrics_tblname
530 if _pending_metrics_tblname.endswith('_sys'):
531 global _sys_metric_tables
532 _sys_metric_tables.append(_pending_metrics_tblname)
535 _metric_tables.append(_pending_metrics_tblname)
540 for metric in sorted(_pending_metrics, key=metric_cmp_key):
541 if metric.pmu != last_pmu:
543 _args.output_file.write('};\n')
544 pmu_name = metric.pmu.replace(',', '_')
545 _args.output_file.write(
546 f'static const struct compact_pmu_event {_pending_metrics_tblname}_{pmu_name}[] = {{\n')
548 last_pmu = metric.pmu
549 pmus.add((metric.pmu, pmu_name))
551 _args.output_file.write(metric.to_c_string(metric=True))
552 _pending_metrics = []
554 _args.output_file.write(f"""
557 const struct pmu_table_entry {_pending_metrics_tblname}[] = {{
559 for (pmu, tbl_pmu) in sorted(pmus):
560 pmu_name = f"{pmu}\\000"
561 _args.output_file.write(f"""{{
562 .entries = {_pending_metrics_tblname}_{tbl_pmu},
563 .num_entries = ARRAY_SIZE({_pending_metrics_tblname}_{tbl_pmu}),
564 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
567 _args.output_file.write('};\n\n')
569 def get_topic(topic: str) -> str:
570 if topic.endswith('metrics.json'):
572 return removesuffix(topic, '.json').replace('-', ' ')
574 def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
579 # base dir or too deep
581 if level == 0 or level > 4:
584 # Ignore other directories. If the file name does not have a .json
585 # extension, ignore it. It could be a readme.txt for instance.
586 if not item.is_file() or not item.name.endswith('.json'):
589 if item.name == 'metricgroups.json':
590 metricgroup_descriptions = json.load(open(item.path))
591 for mgroup in metricgroup_descriptions:
592 assert len(mgroup) > 1, parents
593 description = f"{metricgroup_descriptions[mgroup]}\\000"
594 mgroup = f"{mgroup}\\000"
595 _bcs.add(mgroup, metric=True)
596 _bcs.add(description, metric=True)
597 _metricgroups[mgroup] = description
600 topic = get_topic(item.name)
601 for event in read_json_events(item.path, topic):
602 pmu_name = f"{event.pmu}\\000"
604 _bcs.add(pmu_name, metric=False)
605 _bcs.add(event.build_c_string(metric=False), metric=False)
606 if event.metric_name:
607 _bcs.add(pmu_name, metric=True)
608 _bcs.add(event.build_c_string(metric=True), metric=True)
610 def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
611 """Process a JSON file during the main walk."""
612 def is_leaf_dir(path: str) -> bool:
613 for item in os.scandir(path):
618 # model directory, reset topic
619 if item.is_dir() and is_leaf_dir(item.path):
620 print_pending_events()
621 print_pending_metrics()
623 global _pending_events_tblname
624 _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
625 global _pending_metrics_tblname
626 _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
628 if item.name == 'sys':
629 _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
632 # base dir or too deep
634 if level == 0 or level > 4:
637 # Ignore other directories. If the file name does not have a .json
638 # extension, ignore it. It could be a readme.txt for instance.
639 if not item.is_file() or not item.name.endswith('.json') or item.name == 'metricgroups.json':
642 add_events_table_entries(item, get_topic(item.name))
645 def print_mapping_table(archs: Sequence[str]) -> None:
646 """Read the mapfile and generate the struct from cpuid string to event table."""
647 _args.output_file.write("""
648 /* Struct used to make the PMU event table implementation opaque to callers. */
649 struct pmu_events_table {
650 const struct pmu_table_entry *pmus;
654 /* Struct used to make the PMU metric table implementation opaque to callers. */
655 struct pmu_metrics_table {
656 const struct pmu_table_entry *pmus;
661 * Map a CPU to its table of PMU events. The CPU is identified by the
662 * cpuid field, which is an arch-specific identifier for the CPU.
663 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
664 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
666 * The cpuid can contain any character other than the comma.
668 struct pmu_events_map {
671 struct pmu_events_table event_table;
672 struct pmu_metrics_table metric_table;
676 * Global table mapping each known CPU for the architecture to its
677 * table of PMU events.
679 const struct pmu_events_map pmu_events_map[] = {
683 _args.output_file.write("""{
684 \t.arch = "testarch",
685 \t.cpuid = "testcpu",
687 \t\t.pmus = pmu_events__test_soc_cpu,
688 \t\t.num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu),
691 \t\t.pmus = pmu_metrics__test_soc_cpu,
692 \t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
697 with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
698 table = csv.reader(csvfile)
701 # Skip the first row or any row beginning with #.
702 if not first and len(row) > 0 and not row[0].startswith('#'):
703 event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
704 if event_tblname in _event_tables:
705 event_size = f'ARRAY_SIZE({event_tblname})'
707 event_tblname = 'NULL'
709 metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
710 if metric_tblname in _metric_tables:
711 metric_size = f'ARRAY_SIZE({metric_tblname})'
713 metric_tblname = 'NULL'
715 if event_size == '0' and metric_size == '0':
717 cpuid = row[0].replace('\\', '\\\\')
718 _args.output_file.write(f"""{{
720 \t.cpuid = "{cpuid}",
722 \t\t.pmus = {event_tblname},
723 \t\t.num_pmus = {event_size}
726 \t\t.pmus = {metric_tblname},
727 \t\t.num_pmus = {metric_size}
733 _args.output_file.write("""{
736 \t.event_table = { 0, 0 },
737 \t.metric_table = { 0, 0 },
743 def print_system_mapping_table() -> None:
744 """C struct mapping table array for tables from /sys directories."""
745 _args.output_file.write("""
746 struct pmu_sys_events {
748 \tstruct pmu_events_table event_table;
749 \tstruct pmu_metrics_table metric_table;
752 static const struct pmu_sys_events pmu_sys_event_tables[] = {
754 printed_metric_tables = []
755 for tblname in _sys_event_tables:
756 _args.output_file.write(f"""\t{{
757 \t\t.event_table = {{
758 \t\t\t.pmus = {tblname},
759 \t\t\t.num_pmus = ARRAY_SIZE({tblname})
761 metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
762 if metric_tblname in _sys_metric_tables:
763 _args.output_file.write(f"""
764 \t\t.metric_table = {{
765 \t\t\t.pmus = {metric_tblname},
766 \t\t\t.num_pmus = ARRAY_SIZE({metric_tblname})
768 printed_metric_tables.append(metric_tblname)
769 _args.output_file.write(f"""
770 \t\t.name = \"{tblname}\",
773 for tblname in _sys_metric_tables:
774 if tblname in printed_metric_tables:
776 _args.output_file.write(f"""\t{{
777 \t\t.metric_table = {{
778 \t\t\t.pmus = {tblname},
779 \t\t\t.num_pmus = ARRAY_SIZE({tblname})
781 \t\t.name = \"{tblname}\",
784 _args.output_file.write("""\t{
785 \t\t.event_table = { 0, 0 },
786 \t\t.metric_table = { 0, 0 },
790 static void decompress_event(int offset, struct pmu_event *pe)
792 \tconst char *p = &big_c_string[offset];
794 for attr in _json_event_attributes:
795 _args.output_file.write(f'\n\tpe->{attr} = ')
796 if attr in _json_enum_attributes:
797 _args.output_file.write("*p - '0';\n")
799 _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
800 if attr == _json_event_attributes[-1]:
802 if attr in _json_enum_attributes:
803 _args.output_file.write('\tp++;')
805 _args.output_file.write('\twhile (*p++);')
806 _args.output_file.write("""}
808 static void decompress_metric(int offset, struct pmu_metric *pm)
810 \tconst char *p = &big_c_string[offset];
812 for attr in _json_metric_attributes:
813 _args.output_file.write(f'\n\tpm->{attr} = ')
814 if attr in _json_enum_attributes:
815 _args.output_file.write("*p - '0';\n")
817 _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
818 if attr == _json_metric_attributes[-1]:
820 if attr in _json_enum_attributes:
821 _args.output_file.write('\tp++;')
823 _args.output_file.write('\twhile (*p++);')
824 _args.output_file.write("""}
826 static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table,
827 const struct pmu_table_entry *pmu,
828 pmu_event_iter_fn fn,
832 struct pmu_event pe = {
833 .pmu = &big_c_string[pmu->pmu_name.offset],
836 for (uint32_t i = 0; i < pmu->num_entries; i++) {
837 decompress_event(pmu->entries[i].offset, &pe);
840 ret = fn(&pe, table, data);
847 static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table,
848 const struct pmu_table_entry *pmu,
850 pmu_event_iter_fn fn,
853 struct pmu_event pe = {
854 .pmu = &big_c_string[pmu->pmu_name.offset],
856 int low = 0, high = pmu->num_entries - 1;
858 while (low <= high) {
859 int cmp, mid = (low + high) / 2;
861 decompress_event(pmu->entries[mid].offset, &pe);
863 if (!pe.name && !name)
866 if (!pe.name && name) {
870 if (pe.name && !name) {
875 cmp = strcasecmp(pe.name, name);
885 return fn ? fn(&pe, table, data) : 0;
890 int pmu_events_table__for_each_event(const struct pmu_events_table *table,
891 struct perf_pmu *pmu,
892 pmu_event_iter_fn fn,
895 for (size_t i = 0; i < table->num_pmus; i++) {
896 const struct pmu_table_entry *table_pmu = &table->pmus[i];
897 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
900 if (pmu && !pmu__name_match(pmu, pmu_name))
903 ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
910 int pmu_events_table__find_event(const struct pmu_events_table *table,
911 struct perf_pmu *pmu,
913 pmu_event_iter_fn fn,
916 for (size_t i = 0; i < table->num_pmus; i++) {
917 const struct pmu_table_entry *table_pmu = &table->pmus[i];
918 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
921 if (!pmu__name_match(pmu, pmu_name))
924 ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
931 size_t pmu_events_table__num_events(const struct pmu_events_table *table,
932 struct perf_pmu *pmu)
936 for (size_t i = 0; i < table->num_pmus; i++) {
937 const struct pmu_table_entry *table_pmu = &table->pmus[i];
938 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
940 if (pmu__name_match(pmu, pmu_name))
941 count += table_pmu->num_entries;
946 static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table,
947 const struct pmu_table_entry *pmu,
948 pmu_metric_iter_fn fn,
952 struct pmu_metric pm = {
953 .pmu = &big_c_string[pmu->pmu_name.offset],
956 for (uint32_t i = 0; i < pmu->num_entries; i++) {
957 decompress_metric(pmu->entries[i].offset, &pm);
960 ret = fn(&pm, table, data);
967 int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
968 pmu_metric_iter_fn fn,
971 for (size_t i = 0; i < table->num_pmus; i++) {
972 int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
981 static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu)
984 const struct pmu_events_map *map;
985 struct perf_pmu *pmu;
988 const struct pmu_events_map *map;
991 static bool has_last_result, has_last_map_search;
992 const struct pmu_events_map *map = NULL;
996 if (has_last_result && last_result.pmu == pmu)
997 return last_result.map;
999 cpuid = perf_pmu__getcpuid(pmu);
1002 * On some platforms which uses cpus map, cpuid can be NULL for
1003 * PMUs other than CORE PMUs.
1006 goto out_update_last_result;
1008 if (has_last_map_search && !strcmp(last_map_search.cpuid, cpuid)) {
1009 map = last_map_search.map;
1014 map = &pmu_events_map[i++];
1021 if (!strcmp_cpuid_str(map->cpuid, cpuid))
1024 free(last_map_search.cpuid);
1025 last_map_search.cpuid = cpuid;
1026 last_map_search.map = map;
1027 has_last_map_search = true;
1029 out_update_last_result:
1030 last_result.pmu = pmu;
1031 last_result.map = map;
1032 has_last_result = true;
1036 const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
1038 const struct pmu_events_map *map = map_for_pmu(pmu);
1044 return &map->event_table;
1046 for (size_t i = 0; i < map->event_table.num_pmus; i++) {
1047 const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i];
1048 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
1050 if (pmu__name_match(pmu, pmu_name))
1051 return &map->event_table;
1056 const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
1058 const struct pmu_events_map *map = map_for_pmu(pmu);
1064 return &map->metric_table;
1066 for (size_t i = 0; i < map->metric_table.num_pmus; i++) {
1067 const struct pmu_table_entry *table_pmu = &map->metric_table.pmus[i];
1068 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
1070 if (pmu__name_match(pmu, pmu_name))
1071 return &map->metric_table;
1076 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
1078 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1081 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1082 return &tables->event_table;
1087 const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
1089 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1092 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1093 return &tables->metric_table;
1098 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
1100 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1103 int ret = pmu_events_table__for_each_event(&tables->event_table,
1104 /*pmu=*/ NULL, fn, data);
1112 int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
1114 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1117 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1125 const struct pmu_events_table *find_sys_events_table(const char *name)
1127 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1130 if (!strcmp(tables->name, name))
1131 return &tables->event_table;
1136 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
1138 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1141 int ret = pmu_events_table__for_each_event(&tables->event_table,
1142 /*pmu=*/ NULL, fn, data);
1150 int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
1152 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1155 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1164 def print_metricgroups() -> None:
1165 _args.output_file.write("""
1166 static const int metricgroups[][2] = {
1168 for mgroup in sorted(_metricgroups):
1169 description = _metricgroups[mgroup]
1170 _args.output_file.write(
1171 f'\t{{ {_bcs.offsets[mgroup]}, {_bcs.offsets[description]} }}, /* {mgroup} => {description} */\n'
1173 _args.output_file.write("""
1176 const char *describe_metricgroup(const char *group)
1178 int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1;
1180 while (low <= high) {
1181 int mid = (low + high) / 2;
1182 const char *mgroup = &big_c_string[metricgroups[mid][0]];
1183 int cmp = strcmp(mgroup, group);
1186 return &big_c_string[metricgroups[mid][1]];
1187 } else if (cmp < 0) {
1200 def dir_path(path: str) -> str:
1201 """Validate path is a directory for argparse."""
1202 if os.path.isdir(path):
1204 raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
1206 def ftw(path: str, parents: Sequence[str],
1207 action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
1208 """Replicate the directory/file walking behavior of C's file tree walk."""
1209 for item in sorted(os.scandir(path), key=lambda e: e.name):
1210 if _args.model != 'all' and item.is_dir():
1211 # Check if the model matches one in _args.model.
1212 if len(parents) == _args.model.split(',')[0].count('/'):
1213 # We're testing the correct directory.
1214 item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
1215 if 'test' not in item_path and item_path not in _args.model.split(','):
1217 action(parents, item)
1219 ftw(item.path, parents + [item.name], action)
1221 ap = argparse.ArgumentParser()
1222 ap.add_argument('arch', help='Architecture name like x86')
1223 ap.add_argument('model', help='''Select a model such as skylake to
1224 reduce the code size. Normally set to "all". For architectures like
1225 ARM64 with an implementor/model, the model must include the implementor
1226 such as "arm/cortex-a34".''',
1231 help='Root of tree containing architecture directories containing json files'
1234 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
1235 _args = ap.parse_args()
1237 _args.output_file.write("""
1238 #include <pmu-events/pmu-events.h>
1239 #include "util/header.h"
1240 #include "util/pmu.h"
1244 struct compact_pmu_event {
1248 struct pmu_table_entry {
1249 const struct compact_pmu_event *entries;
1250 uint32_t num_entries;
1251 struct compact_pmu_event pmu_name;
1256 for item in os.scandir(_args.starting_dir):
1257 if not item.is_dir():
1259 if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
1260 archs.append(item.name)
1263 raise IOError(f'Missing architecture directory \'{_args.arch}\'')
1267 arch_path = f'{_args.starting_dir}/{arch}'
1268 preprocess_arch_std_files(arch_path)
1269 ftw(arch_path, [], preprocess_one_file)
1272 _args.output_file.write('static const char *const big_c_string =\n')
1273 for s in _bcs.big_string:
1274 _args.output_file.write(s)
1275 _args.output_file.write(';\n\n')
1277 arch_path = f'{_args.starting_dir}/{arch}'
1278 ftw(arch_path, [], process_one_file)
1279 print_pending_events()
1280 print_pending_metrics()
1282 print_mapping_table(archs)
1283 print_system_mapping_table()
1284 print_metricgroups()
1286 if __name__ == '__main__':