@@ -31,6 +31,7 @@
log_header_fmt = '=QQQ'
rec_header_fmt = '=QQII'
+rec_header_fmt_len = struct.calcsize(rec_header_fmt)
class SimpleException(Exception):
pass
@@ -43,35 +44,6 @@ def read_header(fobj, hfmt):
raise SimpleException('Error reading header. Wrong filetype provided?')
return struct.unpack(hfmt, hdr)
-def get_record(event_mapping, event_id_to_name, rechdr, fobj):
- """Deserialize a trace record from a file into a tuple
- (name, timestamp, pid, arg1, ..., arg6)."""
- event_id, timestamp_ns, length, pid = rechdr
- if event_id != dropped_event_id:
- name = event_id_to_name[event_id]
- try:
- event = event_mapping[name]
- except KeyError as e:
- raise SimpleException(
- f'{e} event is logged but is not declared in the trace events'
- 'file, try using trace-events-all instead.'
- )
-
- rec = (name, timestamp_ns, pid)
- for type, name in event.args:
- if is_string(type):
- l = fobj.read(4)
- (len,) = struct.unpack('=L', l)
- s = fobj.read(len)
- rec = rec + (s,)
- else:
- (value,) = struct.unpack('=Q', fobj.read(8))
- rec = rec + (value,)
- else:
- (dropped_count,) = struct.unpack('=Q', fobj.read(8))
- rec = ("dropped", timestamp_ns, pid, dropped_count)
- return rec
-
def get_mapping(fobj):
(event_id, ) = struct.unpack('=Q', fobj.read(8))
(len, ) = struct.unpack('=L', fobj.read(4))
@@ -79,10 +51,11 @@ def get_mapping(fobj):
return (event_id, name)
-def read_record(event_mapping, event_id_to_name, fobj):
- """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
- rechdr = read_header(fobj, rec_header_fmt)
- return get_record(event_mapping, event_id_to_name, rechdr, fobj)
+def read_record(fobj):
+ """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, args)."""
+ event_id, timestamp_ns, record_length, record_pid = read_header(fobj, rec_header_fmt)
+ args_payload = fobj.read(record_length - rec_header_fmt_len)
+ return (event_id, timestamp_ns, record_pid, args_payload)
def read_trace_header(fobj):
"""Read and verify trace file header"""
@@ -97,17 +70,28 @@ def read_trace_header(fobj):
if log_version != 4:
raise ValueError(f'Log format {log_version} not supported with this QEMU release!')
-def read_trace_records(event_mapping, event_id_to_name, fobj):
- """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6).
-
- Note that `event_id_to_name` is modified if the file contains mapping records.
+def read_trace_records(events, fobj, read_header):
+ """Deserialize trace records from a file, yielding record tuples (event, event_num, timestamp, pid, arg1, ..., arg6).
Args:
event_mapping (str -> Event): events dict, indexed by name
- event_id_to_name (int -> str): event names dict, indexed by event ID
fobj (file): input file
+ read_header (bool): whether headers were read from fobj
"""
+ frameinfo = inspect.getframeinfo(inspect.currentframe())
+ dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)",
+ frameinfo.lineno + 1, frameinfo.filename)
+
+ event_mapping = {e.name: e for e in events}
+ event_mapping["dropped"] = dropped_event
+ event_id_to_name = {dropped_event_id: "dropped"}
+
+ # If there is no header assume event ID mapping matches events list
+ if not read_header:
+ for event_id, event in enumerate(events):
+ event_id_to_name[event_id] = event.name
+
while True:
t = fobj.read(8)
if len(t) == 0:
@@ -115,12 +99,35 @@ def read_trace_records(event_mapping, event_id_to_name, fobj):
(rectype, ) = struct.unpack('=Q', t)
if rectype == record_type_mapping:
- event_id, name = get_mapping(fobj)
- event_id_to_name[event_id] = name
+ event_id, event_name = get_mapping(fobj)
+ event_id_to_name[event_id] = event_name
else:
- rec = read_record(event_mapping, event_id_to_name, fobj)
+ event_id, timestamp_ns, pid, args_payload = read_record(fobj)
+ event_name = event_id_to_name[event_id]
- yield rec
+ try:
+ event = event_mapping[event_name]
+ except KeyError as e:
+ raise SimpleException(
+ f'{e} event is logged but is not declared in the trace events'
+ 'file, try using trace-events-all instead.'
+ )
+
+ offset = 0
+ args = []
+ for type, _ in event.args:
+ if is_string(type):
+ (length,) = struct.unpack_from('=L', args_payload, offset=offset)
+ offset += 4
+ s = args_payload[offset:offset+length]
+ offset += length
+ args.append(s)
+ else:
+ (value,) = struct.unpack_from('=Q', args_payload, offset=offset)
+ offset += 8
+ args.append(value)
+
+ yield (event_mapping[event_name], event_name, timestamp_ns, pid) + tuple(args)
class Analyzer:
"""A trace file analyzer which processes trace records.
@@ -202,20 +209,6 @@ def process(events, log, analyzer, read_header=True):
if read_header:
read_trace_header(log)
- frameinfo = inspect.getframeinfo(inspect.currentframe())
- dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)",
- frameinfo.lineno + 1, frameinfo.filename)
- event_mapping = {"dropped": dropped_event}
- event_id_to_name = {dropped_event_id: "dropped"}
-
- for event in events_list:
- event_mapping[event.name] = event
-
- # If there is no header assume event ID mapping matches events list
- if not read_header:
- for event_id, event in enumerate(events_list):
- event_id_to_name[event_id] = event.name
-
def build_fn(analyzer, event):
if isinstance(event, str):
return analyzer.catchall
@@ -238,12 +231,10 @@ def build_fn(analyzer, event):
with analyzer:
fn_cache = {}
- for rec in read_trace_records(event_mapping, event_id_to_name, log):
- event_num = rec[0]
- event = event_mapping[event_num]
- if event_num not in fn_cache:
- fn_cache[event_num] = build_fn(analyzer, event)
- fn_cache[event_num](event, rec)
+ for event, event_id, timestamp_ns, record_pid, *rec_args in read_trace_records(events, log, read_header):
+ if event_id not in fn_cache:
+ fn_cache[event_id] = build_fn(analyzer, event)
+ fn_cache[event_id](event, (event_id, timestamp_ns, record_pid, *rec_args))
if close_log:
log.close()