fxprof-processed-profile-0.7.0/.cargo_vcs_info.json0000644000000001660000000000100157720ustar { "git": { "sha1": "6cf3acd9109d42496569327497b30a3f670601e6" }, "path_in_vcs": "fxprof-processed-profile" }fxprof-processed-profile-0.7.0/.gitignore000064400000000000000000000000231046102023000165420ustar 00000000000000/target Cargo.lock fxprof-processed-profile-0.7.0/Cargo.toml0000644000000021430000000000100137650ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.60" name = "fxprof-processed-profile" version = "0.7.0" authors = ["Markus Stange "] description = "Create profiles in the Firefox Profiler's processed profile JSON format." readme = "README.md" license = "MIT OR Apache-2.0" repository = "https://github.com/mstange/samply/" [dependencies.bitflags] version = "2.5" [dependencies.debugid] version = "0.8.0" [dependencies.fxhash] version = "0.2.1" [dependencies.serde] version = "1.0.197" [dependencies.serde_derive] version = "1.0.188" [dependencies.serde_json] version = "1.0" [dev-dependencies.assert-json-diff] version = "2.0.1" fxprof-processed-profile-0.7.0/Cargo.toml.orig000064400000000000000000000010351046102023000174450ustar 00000000000000[package] name = "fxprof-processed-profile" version = "0.7.0" edition = "2021" rust-version = "1.60" # needed by bytesize authors = ["Markus Stange "] license = "MIT OR Apache-2.0" description = "Create profiles in the Firefox Profiler's processed profile JSON format." repository = "https://github.com/mstange/samply/" readme = "README.md" [dependencies] bitflags = "2.5" serde_json = "1.0" serde = "1.0.197" serde_derive = "1.0.188" debugid = "0.8.0" fxhash = "0.2.1" [dev-dependencies] assert-json-diff = "2.0.1" fxprof-processed-profile-0.7.0/README.md000064400000000000000000000066601046102023000160460ustar 00000000000000# fxprof-processed-profile A crate that allows creating profiles in the [Firefox Profiler](https://github.com/firefox-devtools/profiler)'s ["Processed profile" format](https://github.com/firefox-devtools/profiler/blob/main/docs-developer/processed-profile-format.md). Still work in progress, under-documented, and will have breaking changes frequently. ## Description This crate is a sibling crate to the `gecko_profile` crate. Profiles produced with this crate can be more efficient because they allow the Firefox Profiler to skip a processing step during loading, and because this format supports a "weight" column in the sample table. The sample weight can be used to collapse duplicate consecutive samples into one sample, which means that the individual sample timestamps don't have to be serialized into the JSON. This can save a ton of space. ## About the format When the Firefox Profiler is used with Firefox, the Firefox Profiler receives profile data in the "Gecko profile" format. Then it converts it into the "processed profile" format. The "processed profile" format is the format in which the files are stored when you upload the profile for sharing, or when you download it as a file. It is different from the "Gecko profile" format in the following ways: - There is one flat list of threads across all processes. Each thread comes with some information about its process, which allows the Firefox Profiler to group threads which belong to the same process. - Because of the flat list, the timestamps in all threads (from all processes) are relative to the same reference timestamp. This is different to the "Gecko profile" format where each process has its own time base. - The various tables in each thread are stored in a "struct of arrays" form. For example, the sample table has one flat list of timestamps, one flat list of stack indexes, and so forth. This is different to the "Gecko profile" format which contains one JS object for every individual sample - that object being an array such as `[stack_index, time, eventDelay, cpuDelta]`. The struct-of-arrays form makes the data cheaper to access and is much easier on the browser's GC. - The sample table in the "processed profile" format supports a weight column. The "Gecko profile" format currently does not have support for sample weights. - Each thread has a `funcTable`, a `resourceTable` and a `nativeSymbols` table. These tables do not exist in the "Gecko profile" format. - The structure of the `frameTable` is different. For example, each frame from the native stack has an integer code address, relative to the library that contains this address. In the "Gecko profile" format, the code address is stored in absolute form (process virtual memory address) as a hex string. - Native stacks in the "processed profile" format use "nudged" return addresses, i.e. return address minus one byte, so that they point into the calling instruction. This is different from the "Gecko profile" format, which uses raw return addresses. The "processed profile" format is almost identical to the JavaScript object structure which the Firefox Profiler keeps in memory; [the only difference](https://github.com/firefox-devtools/profiler/blob/af469ed357890f816ab71fb4ba4c9fe125336d94/src/profile-logic/process-profile.js#L1539-L1556) being the use of `stringArray` (which is a plain JSON array of strings) instead of `stringTable` (which is an object containing both the array and a map for fast string-to-index lookup). fxprof-processed-profile-0.7.0/src/category.rs000064400000000000000000000067531046102023000175440ustar 00000000000000use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer}; use super::category_color::CategoryColor; /// A profiling category, can be set on stack frames and markers as part of a [`CategoryPairHandle`]. /// /// Categories can be created with [`Profile::add_category`](crate::Profile::add_category). #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct CategoryHandle(pub(crate) u16); impl CategoryHandle { /// The "Other" category. All profiles have this category. pub const OTHER: Self = CategoryHandle(0); } impl Serialize for CategoryHandle { fn serialize(&self, serializer: S) -> Result { self.0.serialize(serializer) } } /// A profiling subcategory, can be set on stack frames and markers as part of a [`CategoryPairHandle`]. /// /// Subategories can be created with [`Profile::add_subcategory`](crate::Profile::add_subcategory). #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct SubcategoryIndex(pub u8); /// A profiling category pair, consisting of a category and an optional subcategory. Can be set on stack frames and markers. /// /// Category pairs can be created with [`Profile::add_subcategory`](crate::Profile::add_subcategory) /// and from a [`CategoryHandle`]. #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct CategoryPairHandle( pub(crate) CategoryHandle, pub(crate) Option, ); impl From for CategoryPairHandle { fn from(category: CategoryHandle) -> Self { CategoryPairHandle(category, None) } } /// The information about a category. #[derive(Debug)] pub struct Category { pub name: String, pub color: CategoryColor, pub subcategories: Vec, } impl Category { /// Add a subcategory to this category. pub fn add_subcategory(&mut self, subcategory_name: String) -> SubcategoryIndex { let subcategory_index = SubcategoryIndex(u8::try_from(self.subcategories.len()).unwrap()); self.subcategories.push(subcategory_name); subcategory_index } } impl Serialize for Category { fn serialize(&self, serializer: S) -> Result { let mut subcategories = self.subcategories.clone(); subcategories.push("Other".to_string()); let mut map = serializer.serialize_map(None)?; map.serialize_entry("name", &self.name)?; map.serialize_entry("color", &self.color)?; map.serialize_entry("subcategories", &subcategories)?; map.end() } } #[derive(Debug, Clone)] pub enum Subcategory { Normal(SubcategoryIndex), Other(CategoryHandle), } pub struct SerializableSubcategoryColumn<'a>(pub &'a [Subcategory], pub &'a [Category]); impl<'a> Serialize for SerializableSubcategoryColumn<'a> { fn serialize(&self, serializer: S) -> Result { let mut seq = serializer.serialize_seq(Some(self.0.len()))?; for subcategory in self.0 { match subcategory { Subcategory::Normal(index) => seq.serialize_element(&index.0)?, Subcategory::Other(category) => { // There is an implicit "Other" subcategory at the end of each category's // subcategory list. let subcategory_count = self.1[category.0 as usize].subcategories.len(); seq.serialize_element(&subcategory_count)? } } } seq.end() } } fxprof-processed-profile-0.7.0/src/category_color.rs000064400000000000000000000027271046102023000207370ustar 00000000000000use serde::ser::{Serialize, Serializer}; /// One of the available colors for a category. #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq)] pub enum CategoryColor { Transparent, LightBlue, Red, LightRed, Orange, Blue, Green, Purple, Yellow, Brown, Magenta, LightGreen, Gray, DarkGray, } impl Serialize for CategoryColor { fn serialize(&self, serializer: S) -> Result { match self { CategoryColor::Transparent => "transparent".serialize(serializer), CategoryColor::LightBlue => "lightblue".serialize(serializer), CategoryColor::Red => "red".serialize(serializer), CategoryColor::LightRed => "lightred".serialize(serializer), CategoryColor::Orange => "orange".serialize(serializer), CategoryColor::Blue => "blue".serialize(serializer), CategoryColor::Green => "green".serialize(serializer), CategoryColor::Purple => "purple".serialize(serializer), CategoryColor::Yellow => "yellow".serialize(serializer), CategoryColor::Brown => "brown".serialize(serializer), CategoryColor::Magenta => "magenta".serialize(serializer), CategoryColor::LightGreen => "lightgreen".serialize(serializer), CategoryColor::Gray => "grey".serialize(serializer), CategoryColor::DarkGray => "darkgray".serialize(serializer), } } } fxprof-processed-profile-0.7.0/src/counters.rs000064400000000000000000000072031046102023000175600ustar 00000000000000use serde::ser::{Serialize, SerializeMap, Serializer}; use crate::{ProcessHandle, Timestamp}; /// A counter. Can be created with [`Profile::add_counter`](crate::Profile::add_counter). #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct CounterHandle(pub(crate) usize); #[derive(Debug)] pub struct Counter { name: String, category: String, description: String, process: ProcessHandle, pid: String, samples: CounterSamples, } impl Counter { pub fn new( name: &str, category: &str, description: &str, process: ProcessHandle, pid: &str, ) -> Self { Counter { name: name.to_owned(), category: category.to_owned(), description: description.to_owned(), process, pid: pid.to_owned(), samples: CounterSamples::new(), } } pub fn process(&self) -> ProcessHandle { self.process } pub fn add_sample( &mut self, timestamp: Timestamp, value_delta: f64, number_of_operations_delta: u32, ) { self.samples .add_sample(timestamp, value_delta, number_of_operations_delta) } pub fn as_serializable(&self, main_thread_index: usize) -> impl Serialize + '_ { SerializableCounter { counter: self, main_thread_index, } } } struct SerializableCounter<'a> { counter: &'a Counter, /// The index of the main thread for the counter's process in the profile threads list. main_thread_index: usize, } impl<'a> Serialize for SerializableCounter<'a> { fn serialize(&self, serializer: S) -> Result { let mut map = serializer.serialize_map(None)?; map.serialize_entry("category", &self.counter.category)?; map.serialize_entry("name", &self.counter.name)?; map.serialize_entry("description", &self.counter.description)?; map.serialize_entry("mainThreadIndex", &self.main_thread_index)?; map.serialize_entry("pid", &self.counter.pid)?; map.serialize_entry( "sampleGroups", &[SerializableCounterSampleGroup(self.counter)], )?; map.end() } } struct SerializableCounterSampleGroup<'a>(&'a Counter); impl<'a> Serialize for SerializableCounterSampleGroup<'a> { fn serialize(&self, serializer: S) -> Result { let mut map = serializer.serialize_map(None)?; map.serialize_entry("id", &0)?; // It's not clear what the meaning of this ID is. map.serialize_entry("samples", &self.0.samples)?; map.end() } } #[derive(Debug)] struct CounterSamples { time: Vec, number: Vec, count: Vec, } impl CounterSamples { pub fn new() -> Self { Self { time: Vec::new(), number: Vec::new(), count: Vec::new(), } } pub fn add_sample( &mut self, timestamp: Timestamp, value_delta: f64, number_of_operations_delta: u32, ) { self.time.push(timestamp); self.count.push(value_delta); self.number.push(number_of_operations_delta); } } impl Serialize for CounterSamples { fn serialize(&self, serializer: S) -> Result { let len = self.time.len(); let mut map = serializer.serialize_map(None)?; map.serialize_entry("length", &len)?; map.serialize_entry("count", &self.count)?; map.serialize_entry("number", &self.number)?; map.serialize_entry("time", &self.time)?; map.end() } } fxprof-processed-profile-0.7.0/src/cpu_delta.rs000064400000000000000000000031441046102023000176560ustar 00000000000000use serde::ser::{Serialize, Serializer}; use std::time::Duration; /// The amount of CPU time between thread samples. /// /// This is used in the Firefox Profiler UI to draw an activity graph per thread. /// /// A thread only runs on one CPU at any time, and can get scheduled off and on /// the CPU between two samples. The CPU delta is the accumulation of time it /// was running on the CPU. #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct CpuDelta { micros: u64, } impl From for CpuDelta { fn from(duration: Duration) -> Self { Self { micros: duration.as_micros() as u64, } } } impl CpuDelta { /// A CPU delta of zero. pub const ZERO: Self = Self { micros: 0 }; /// Create a CPU delta from integer nanoseconds. pub fn from_nanos(nanos: u64) -> Self { Self { micros: nanos / 1000, } } /// Create a CPU delta from integer microseconds. pub fn from_micros(micros: u64) -> Self { Self { micros } } /// Create a CPU delta from float milliseconds. pub fn from_millis(millis: f64) -> Self { Self { micros: (millis * 1_000.0) as u64, } } /// Whether the CPU delta is zero. pub fn is_zero(&self) -> bool { self.micros == 0 } } impl Serialize for CpuDelta { fn serialize(&self, serializer: S) -> Result { // CPU deltas are serialized as float microseconds, because // we set profile.meta.sampleUnits.threadCPUDelta to "µs". self.micros.serialize(serializer) } } fxprof-processed-profile-0.7.0/src/fast_hash_map.rs000064400000000000000000000002431046102023000205100ustar 00000000000000use fxhash::FxHasher; use std::collections::HashMap; use std::hash::BuildHasherDefault; pub type FastHashMap = HashMap>; fxprof-processed-profile-0.7.0/src/frame.rs000064400000000000000000000043141046102023000170100ustar 00000000000000use bitflags::bitflags; use crate::category::CategoryPairHandle; use crate::global_lib_table::LibraryHandle; use crate::profile::StringHandle; /// A part of the information about a single stack frame. #[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] pub enum Frame { /// A code address taken from the instruction pointer. /// /// This code address will be resolved to a library-relative address using /// the library mappings on the process which were specified using /// [`Profile::add_lib_mapping`](crate::Profile::add_lib_mapping). InstructionPointer(u64), /// A code address taken from a return address /// /// This code address will be resolved to a library-relative address using /// the library mappings on the process which were specified using /// [`Profile::add_lib_mapping`](crate::Profile::add_lib_mapping). ReturnAddress(u64), /// A relative address taken from the instruction pointer which /// has already been resolved to a `LibraryHandle`. RelativeAddressFromInstructionPointer(LibraryHandle, u32), /// A relative address taken from a return address which /// has already been resolved to a `LibraryHandle`. RelativeAddressFromReturnAddress(LibraryHandle, u32), /// A string, containing an index returned by /// [`Profile::intern_string`](crate::Profile::intern_string). Label(StringHandle), } /// All the information about a single stack frame. #[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] pub struct FrameInfo { /// The absolute address or label of this frame. pub frame: Frame, /// The category pair of this frame. pub category_pair: CategoryPairHandle, /// The flags of this frame. Use `FrameFlags::empty()` if unsure. pub flags: FrameFlags, } bitflags! { /// Flags for a stack frame. #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] pub struct FrameFlags: u32 { /// Set on frames which are JavaScript functions. const IS_JS = 0b00000001; /// Set on frames which are not strictly JavaScript functions but which /// should be included in the JS-only call tree, such as DOM API calls. const IS_RELEVANT_FOR_JS = 0b00000010; } } fxprof-processed-profile-0.7.0/src/frame_table.rs000064400000000000000000000155111046102023000201600ustar 00000000000000use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer}; use crate::category::{ Category, CategoryHandle, CategoryPairHandle, SerializableSubcategoryColumn, Subcategory, }; use crate::fast_hash_map::FastHashMap; use crate::frame::FrameFlags; use crate::func_table::{FuncIndex, FuncTable}; use crate::global_lib_table::{GlobalLibIndex, GlobalLibTable}; use crate::native_symbols::{NativeSymbolIndex, NativeSymbols}; use crate::resource_table::ResourceTable; use crate::serialization_helpers::SerializableSingleValueColumn; use crate::thread_string_table::{ThreadInternalStringIndex, ThreadStringTable}; #[derive(Debug, Clone, Default)] pub struct FrameTable { addresses: Vec>, categories: Vec, subcategories: Vec, funcs: Vec, native_symbols: Vec>, internal_frame_to_frame_index: FastHashMap, } impl FrameTable { pub fn new() -> Self { Default::default() } pub fn index_for_frame( &mut self, string_table: &mut ThreadStringTable, resource_table: &mut ResourceTable, func_table: &mut FuncTable, native_symbol_table: &mut NativeSymbols, global_libs: &GlobalLibTable, frame: InternalFrame, ) -> usize { let addresses = &mut self.addresses; let funcs = &mut self.funcs; let native_symbols = &mut self.native_symbols; let categories = &mut self.categories; let subcategories = &mut self.subcategories; *self .internal_frame_to_frame_index .entry(frame.clone()) .or_insert_with(|| { let frame_index = addresses.len(); let (address, location_string_index, native_symbol, resource) = match frame.location { InternalFrameLocation::UnknownAddress(address) => { let location_string = format!("0x{address:x}"); let s = string_table.index_for_string(&location_string); (None, s, None, None) } InternalFrameLocation::AddressInLib(address, lib_index) => { let res = resource_table.resource_for_lib(lib_index, global_libs, string_table); let lib = global_libs.get_lib(lib_index).unwrap(); let native_symbol_and_name = lib.symbol_table.as_deref().and_then(|symbol_table| { let symbol = symbol_table.lookup(address)?; Some( native_symbol_table.symbol_index_and_string_index_for_symbol( lib_index, symbol, string_table, ), ) }); let (native_symbol, s) = match native_symbol_and_name { Some((native_symbol, name_string_index)) => { (Some(native_symbol), name_string_index) } None => { let location_string = format!("0x{address:x}"); (None, string_table.index_for_string(&location_string)) } }; (Some(address), s, native_symbol, Some(res)) } InternalFrameLocation::Label(string_index) => (None, string_index, None, None), }; let func_index = func_table.index_for_func(location_string_index, resource, frame.flags); let CategoryPairHandle(category, subcategory_index) = frame.category_pair; let subcategory = match subcategory_index { Some(index) => Subcategory::Normal(index), None => Subcategory::Other(category), }; addresses.push(address); categories.push(category); subcategories.push(subcategory); funcs.push(func_index); native_symbols.push(native_symbol); frame_index }) } pub fn as_serializable<'a>(&'a self, categories: &'a [Category]) -> impl Serialize + 'a { SerializableFrameTable { table: self, categories, } } } struct SerializableFrameTable<'a> { table: &'a FrameTable, categories: &'a [Category], } impl<'a> Serialize for SerializableFrameTable<'a> { fn serialize(&self, serializer: S) -> Result { let len = self.table.addresses.len(); let mut map = serializer.serialize_map(None)?; map.serialize_entry("length", &len)?; map.serialize_entry( "address", &SerializableFrameTableAddressColumn(&self.table.addresses), )?; map.serialize_entry("inlineDepth", &SerializableSingleValueColumn(0u32, len))?; map.serialize_entry("category", &self.table.categories)?; map.serialize_entry( "subcategory", &SerializableSubcategoryColumn(&self.table.subcategories, self.categories), )?; map.serialize_entry("func", &self.table.funcs)?; map.serialize_entry("nativeSymbol", &self.table.native_symbols)?; map.serialize_entry("innerWindowID", &SerializableSingleValueColumn((), len))?; map.serialize_entry("implementation", &SerializableSingleValueColumn((), len))?; map.serialize_entry("line", &SerializableSingleValueColumn((), len))?; map.serialize_entry("column", &SerializableSingleValueColumn((), len))?; map.serialize_entry("optimizations", &SerializableSingleValueColumn((), len))?; map.end() } } struct SerializableFrameTableAddressColumn<'a>(&'a [Option]); impl<'a> Serialize for SerializableFrameTableAddressColumn<'a> { fn serialize(&self, serializer: S) -> Result { let mut seq = serializer.serialize_seq(Some(self.0.len()))?; for address in self.0 { match address { Some(address) => seq.serialize_element(&address)?, None => seq.serialize_element(&-1)?, } } seq.end() } } #[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct InternalFrame { pub location: InternalFrameLocation, pub category_pair: CategoryPairHandle, pub flags: FrameFlags, } #[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq, Hash)] pub enum InternalFrameLocation { UnknownAddress(u64), AddressInLib(u32, GlobalLibIndex), Label(ThreadInternalStringIndex), } fxprof-processed-profile-0.7.0/src/func_table.rs000064400000000000000000000072721046102023000200260ustar 00000000000000use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer}; use crate::fast_hash_map::FastHashMap; use crate::frame::FrameFlags; use crate::resource_table::ResourceIndex; use crate::serialization_helpers::SerializableSingleValueColumn; use crate::thread_string_table::ThreadInternalStringIndex; #[derive(Debug, Clone, Default)] pub struct FuncTable { names: Vec, resources: Vec>, flags: Vec, func_name_and_resource_and_flags_to_func_index: FastHashMap<(ThreadInternalStringIndex, Option, FrameFlags), usize>, contains_js_function: bool, } impl FuncTable { pub fn new() -> Self { Default::default() } pub fn index_for_func( &mut self, name: ThreadInternalStringIndex, resource: Option, flags: FrameFlags, ) -> FuncIndex { let func_index = *self .func_name_and_resource_and_flags_to_func_index .entry((name, resource, flags)) .or_insert_with(|| { let func_index = self.names.len(); self.names.push(name); self.resources.push(resource); self.flags.push(flags); func_index }); if flags.intersects(FrameFlags::IS_JS | FrameFlags::IS_RELEVANT_FOR_JS) { self.contains_js_function = true; } FuncIndex(func_index as u32) } pub fn contains_js_function(&self) -> bool { self.contains_js_function } } #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct FuncIndex(u32); impl Serialize for FuncIndex { fn serialize(&self, serializer: S) -> Result { serializer.serialize_u32(self.0) } } impl Serialize for FuncTable { fn serialize(&self, serializer: S) -> Result { let len = self.names.len(); let mut map = serializer.serialize_map(None)?; map.serialize_entry("length", &len)?; map.serialize_entry("name", &self.names)?; map.serialize_entry( "isJS", &SerializableFlagColumn(&self.flags, FrameFlags::IS_JS), )?; map.serialize_entry( "relevantForJS", &SerializableFlagColumn(&self.flags, FrameFlags::IS_RELEVANT_FOR_JS), )?; map.serialize_entry( "resource", &SerializableFuncTableResourceColumn(&self.resources), )?; map.serialize_entry("fileName", &SerializableSingleValueColumn((), len))?; map.serialize_entry("lineNumber", &SerializableSingleValueColumn((), len))?; map.serialize_entry("columnNumber", &SerializableSingleValueColumn((), len))?; map.end() } } struct SerializableFuncTableResourceColumn<'a>(&'a [Option]); impl<'a> Serialize for SerializableFuncTableResourceColumn<'a> { fn serialize(&self, serializer: S) -> Result { let mut seq = serializer.serialize_seq(Some(self.0.len()))?; for resource in self.0 { match resource { Some(resource) => seq.serialize_element(&resource)?, None => seq.serialize_element(&-1)?, } } seq.end() } } pub struct SerializableFlagColumn<'a>(&'a [FrameFlags], FrameFlags); impl<'a> Serialize for SerializableFlagColumn<'a> { fn serialize(&self, serializer: S) -> Result { let mut seq = serializer.serialize_seq(Some(self.0.len()))?; for item_flags in self.0 { seq.serialize_element(&item_flags.contains(self.1))?; } seq.end() } } fxprof-processed-profile-0.7.0/src/global_lib_table.rs000064400000000000000000000051671046102023000211620ustar 00000000000000use std::sync::Arc; use serde::ser::{Serialize, Serializer}; use crate::fast_hash_map::FastHashMap; use crate::{LibraryInfo, SymbolTable}; #[derive(Debug)] pub struct GlobalLibTable { /// All libraries added via `Profile::add_lib`. May or may not be used. /// Indexed by `LibraryHandle.0`. all_libs: Vec, // append-only for stable LibraryHandles /// Indexed by `GlobalLibIndex.0`. used_libs: Vec, // append-only for stable GlobalLibIndexes lib_map: FastHashMap, used_lib_map: FastHashMap, } impl GlobalLibTable { pub fn new() -> Self { Self { all_libs: Vec::new(), used_libs: Vec::new(), lib_map: FastHashMap::default(), used_lib_map: FastHashMap::default(), } } pub fn handle_for_lib(&mut self, lib: LibraryInfo) -> LibraryHandle { let all_libs = &mut self.all_libs; *self.lib_map.entry(lib.clone()).or_insert_with(|| { let handle = LibraryHandle(all_libs.len()); all_libs.push(lib); handle }) } pub fn set_lib_symbol_table(&mut self, library: LibraryHandle, symbol_table: Arc) { self.all_libs[library.0].symbol_table = Some(symbol_table); } pub fn index_for_used_lib(&mut self, lib_handle: LibraryHandle) -> GlobalLibIndex { let used_libs = &mut self.used_libs; *self.used_lib_map.entry(lib_handle).or_insert_with(|| { let index = GlobalLibIndex(used_libs.len()); used_libs.push(lib_handle); index }) } pub fn get_lib(&self, index: GlobalLibIndex) -> Option<&LibraryInfo> { let handle = self.used_libs.get(index.0)?; self.all_libs.get(handle.0) } } impl Serialize for GlobalLibTable { fn serialize(&self, serializer: S) -> Result { serializer.collect_seq(self.used_libs.iter().map(|handle| &self.all_libs[handle.0])) } } /// An index for a *used* library, i.e. a library for which there exists at /// least one frame in any process's frame table which refers to this lib. #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct GlobalLibIndex(usize); impl Serialize for GlobalLibIndex { fn serialize(&self, serializer: S) -> Result { serializer.serialize_u32(self.0 as u32) } } /// The handle for a library, obtained from [`Profile::add_lib`](crate::Profile::add_lib). #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct LibraryHandle(usize); fxprof-processed-profile-0.7.0/src/lib.rs000064400000000000000000000053231046102023000164650ustar 00000000000000//! This crate allows you to create a profile that can be loaded into //! the [Firefox Profiler](https://profiler.firefox.com/). //! //! Specifically, this uses the ["Processed profile format"](https://github.com/firefox-devtools/profiler/blob/main/docs-developer/processed-profile-format.md). //! //! Use [`Profile::new`] to create a new [`Profile`] object. Then add all the //! information into it. To convert it to JSON, use [`serde_json`], for //! example [`serde_json::to_writer`] or [`serde_json::to_string`]. //! //! ## Example //! //! ``` //! use fxprof_processed_profile::{Profile, CategoryHandle, CpuDelta, Frame, FrameInfo, FrameFlags, SamplingInterval, Timestamp}; //! use std::time::SystemTime; //! //! # fn write_profile(output_file: std::fs::File) -> Result<(), Box> { //! let mut profile = Profile::new("My app", SystemTime::now().into(), SamplingInterval::from_millis(1)); //! let process = profile.add_process("App process", 54132, Timestamp::from_millis_since_reference(0.0)); //! let thread = profile.add_thread(process, 54132000, Timestamp::from_millis_since_reference(0.0), true); //! profile.set_thread_name(thread, "Main thread"); //! let stack = vec![ //! FrameInfo { frame: Frame::Label(profile.intern_string("Root node")), category_pair: CategoryHandle::OTHER.into(), flags: FrameFlags::empty() }, //! FrameInfo { frame: Frame::Label(profile.intern_string("First callee")), category_pair: CategoryHandle::OTHER.into(), flags: FrameFlags::empty() } //! ]; //! profile.add_sample(thread, Timestamp::from_millis_since_reference(0.0), stack.into_iter(), CpuDelta::ZERO, 1); //! //! let writer = std::io::BufWriter::new(output_file); //! serde_json::to_writer(writer, &profile)?; //! # Ok(()) //! # } //! ``` pub use debugid; mod category; mod category_color; mod counters; mod cpu_delta; mod fast_hash_map; mod frame; mod frame_table; mod func_table; mod global_lib_table; mod lib_mappings; mod library_info; mod marker_table; mod markers; mod native_symbols; mod process; mod profile; mod reference_timestamp; mod resource_table; mod sample_table; mod serialization_helpers; mod stack_table; mod string_table; mod thread; mod thread_string_table; mod timestamp; pub use category::{CategoryHandle, CategoryPairHandle}; pub use category_color::CategoryColor; pub use counters::CounterHandle; pub use cpu_delta::CpuDelta; pub use frame::{Frame, FrameFlags, FrameInfo}; pub use global_lib_table::LibraryHandle; pub use lib_mappings::LibMappings; pub use library_info::{LibraryInfo, Symbol, SymbolTable}; pub use markers::*; pub use process::ThreadHandle; pub use profile::{Profile, SamplingInterval, StringHandle}; pub use reference_timestamp::ReferenceTimestamp; pub use thread::ProcessHandle; pub use timestamp::*; fxprof-processed-profile-0.7.0/src/lib_mappings.rs000064400000000000000000000132511046102023000203620ustar 00000000000000use std::collections::BTreeMap; /// Keeps track of mapped libraries in an address space. Stores a value /// for each mapping, and allows efficient lookup of that value based on /// an address. /// /// A "library" here is a loose term; it could be a normal shared library, /// or the main binary, but it could also be a synthetic library for JIT /// code. For normal libraries, there's usually just one mapping per library. /// For JIT code, you could have many small mappings, one per JIT function, /// all pointing to the synthetic JIT "library". #[derive(Debug, Clone)] pub struct LibMappings { /// A BTreeMap of non-overlapping Mappings. The key is the start_avma of the mapping. /// /// When a new mapping is added, overlapping mappings are removed. map: BTreeMap>, } impl Default for LibMappings { fn default() -> Self { Self::new() } } impl LibMappings { /// Creates a new empty instance. pub fn new() -> Self { Self { map: BTreeMap::new(), } } /// Add a mapping to this address space. Any existing mappings which overlap with the /// new mapping are removed. /// /// `start_avma` and `end_avma` describe the address range that this mapping /// occupies. /// /// AVMA = "actual virtual memory address" /// /// `relative_address_at_start` is the "relative address" which corresponds /// to `start_avma`, in the library that is mapped in this mapping. This is zero if /// `start_avm` is the base address of the library. /// /// A relative address is a `u32` value which is relative to the library base address. /// So you will usually set `relative_address_at_start` to `start_avma - base_avma`. /// /// For ELF binaries, the base address is the AVMA of the first segment, i.e. the /// start_avma of the mapping created by the first ELF `LOAD` command. /// /// For mach-O binaries, the base address is the vmaddr of the `__TEXT` segment. /// /// For Windows binaries, the base address is the image load address. pub fn add_mapping( &mut self, start_avma: u64, end_avma: u64, relative_address_at_start: u32, value: T, ) { let removal_avma_range_start = if let Some(mapping_overlapping_with_start_avma) = self.lookup_impl(start_avma) { mapping_overlapping_with_start_avma.start_avma } else { start_avma }; // self.map.drain(removal_avma_range_start..end_avma); let overlapping_keys: Vec = self .map .range(removal_avma_range_start..end_avma) .map(|(start_avma, _)| *start_avma) .collect(); for key in overlapping_keys { self.map.remove(&key); } self.map.insert( start_avma, Mapping { start_avma, end_avma, relative_address_at_start, value, }, ); } /// Remove a mapping which starts at the given address. If found, this returns /// the `relative_address_at_start` and the associated value of the mapping. pub fn remove_mapping(&mut self, start_avma: u64) -> Option<(u32, T)> { self.map .remove(&start_avma) .map(|m| (m.relative_address_at_start, m.value)) } /// Clear all mappings. pub fn clear(&mut self) { self.map.clear(); } /// Look up the mapping which covers the given address and return /// the stored value. pub fn lookup(&self, avma: u64) -> Option<&T> { self.lookup_impl(avma).map(|m| &m.value) } /// Look up the mapping which covers the given address and return /// its `Mapping``. fn lookup_impl(&self, avma: u64) -> Option<&Mapping> { let (_start_avma, last_mapping_starting_at_or_before_avma) = self.map.range(..=avma).next_back()?; if avma < last_mapping_starting_at_or_before_avma.end_avma { Some(last_mapping_starting_at_or_before_avma) } else { None } } /// Converts an absolute address (AVMA, actual virtual memory address) into /// a relative address and the mapping's associated value. pub fn convert_address(&self, avma: u64) -> Option<(u32, &T)> { let mapping = match self.lookup_impl(avma) { Some(mapping) => mapping, None => return None, }; let offset_from_mapping_start = (avma - mapping.start_avma) as u32; let relative_address = mapping.relative_address_at_start + offset_from_mapping_start; Some((relative_address, &mapping.value)) } } #[derive(Debug, Clone, PartialEq, PartialOrd, Ord, Eq)] struct Mapping { start_avma: u64, end_avma: u64, relative_address_at_start: u32, value: T, } #[cfg(test)] mod test { use super::*; #[test] fn test_lib_mappings() { let mut m = LibMappings::new(); m.add_mapping(100, 200, 100, "100..200"); m.add_mapping(200, 250, 200, "200..250"); assert_eq!(m.lookup(200), Some(&"200..250")); m.add_mapping(180, 220, 180, "180..220"); assert_eq!(m.lookup(200), Some(&"180..220")); assert_eq!(m.lookup(170), None); assert_eq!(m.lookup(220), None); m.add_mapping(225, 250, 225, "225..250"); m.add_mapping(255, 270, 255, "255..270"); m.add_mapping(100, 150, 100, "100..150"); assert_eq!(m.lookup(90), None); assert_eq!(m.lookup(150), None); assert_eq!(m.lookup(149), Some(&"100..150")); assert_eq!(m.lookup(200), Some(&"180..220")); assert_eq!(m.lookup(260), Some(&"255..270")); } } fxprof-processed-profile-0.7.0/src/library_info.rs000064400000000000000000000127301046102023000203760ustar 00000000000000use debugid::DebugId; use serde::{ser::SerializeMap, Serialize, Serializer}; use std::sync::Arc; /// A library ("binary" / "module" / "DSO") which is loaded into a process. /// This can be the main executable file or a dynamic library, or any other /// mapping of executable memory. /// /// Library information makes after-the-fact symbolication possible: The /// profile JSON contains raw code addresses, and then the symbols for these /// addresses get resolved later. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct LibraryInfo { /// The name of this library that should be displayed in the profiler. /// Usually this is the filename of the binary, but it could also be any other /// name, such as "\[kernel.kallsyms\]" or "\[vdso\]" or "JIT". pub name: String, /// The debug name of this library which should be used when looking up symbols. /// On Windows this is the filename of the PDB file, on other platforms it's /// usually the same as the filename of the binary. pub debug_name: String, /// The absolute path to the binary file. pub path: String, /// The absolute path to the debug file. On Linux and macOS this is the same as /// the path to the binary file. On Windows this is the path to the PDB file. pub debug_path: String, /// The debug ID of the library. This lets symbolication confirm that it's /// getting symbols for the right file, and it can sometimes allow obtaining a /// symbol file from a symbol server. pub debug_id: DebugId, /// The code ID of the library. This lets symbolication confirm that it's /// getting symbols for the right file, and it can sometimes allow obtaining a /// symbol file from a symbol server. pub code_id: Option, /// An optional string with the CPU arch of this library, for example "x86_64", /// "arm64", or "arm64e". This is used for macOS system libraries in the dyld /// shared cache, in order to avoid loading the wrong cache files, as a /// performance optimization. In the past, this was also used to find the /// correct sub-binary in a mach-O fat binary. But we now use the debug_id for that /// purpose. pub arch: Option, /// An optional symbol table, for "pre-symbolicating" stack frames. /// /// Usually, symbolication is something that should happen asynchronously, /// because it can be very slow, so the regular way to use the profiler is to /// store only frame addresses and no symbols in the profile JSON, and perform /// symbolication only once the profile is loaded in the Firefox Profiler UI. /// /// However, sometimes symbols are only available during recording and are not /// easily accessible afterwards. One such example the symbol table of the /// Linux kernel: Users with root privileges can access the symbol table of the /// currently-running kernel via `/proc/kallsyms`, but we don't want to have /// to run the local symbol server with root privileges. So it's easier to /// resolve kernel symbols when generating the profile JSON. /// /// This way of symbolicating does not support file names, line numbers, or /// inline frames. It is intended for relatively "small" symbol tables for which /// an address lookup is fast. pub symbol_table: Option>, } impl Serialize for LibraryInfo { fn serialize(&self, serializer: S) -> Result { let breakpad_id = self.debug_id.breakpad().to_string(); let code_id = self.code_id.as_ref().map(|cid| cid.to_string()); let mut map = serializer.serialize_map(None)?; map.serialize_entry("name", &self.name)?; map.serialize_entry("path", &self.path)?; map.serialize_entry("debugName", &self.debug_name)?; map.serialize_entry("debugPath", &self.debug_path)?; map.serialize_entry("breakpadId", &breakpad_id)?; map.serialize_entry("codeId", &code_id)?; map.serialize_entry("arch", &self.arch)?; map.end() } } /// A symbol table which contains a list of [`Symbol`]s, used in [`LibraryInfo`]. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct SymbolTable { symbols: Vec, } impl SymbolTable { /// Create a [`SymbolTable`] from a list of [`Symbol`]s. pub fn new(mut symbols: Vec) -> Self { symbols.sort(); symbols.dedup_by_key(|symbol| symbol.address); Self { symbols } } /// Look up the symbol for an address. This address is relative to the library's base address. pub fn lookup(&self, address: u32) -> Option<&Symbol> { let index = match self .symbols .binary_search_by_key(&address, |symbol| symbol.address) { Ok(i) => i, Err(0) => return None, Err(next_i) => next_i - 1, }; let symbol = &self.symbols[index]; match symbol.size { Some(size) if address < symbol.address.saturating_add(size) => Some(symbol), Some(_size) => None, None => Some(symbol), } } } /// A single symbol from a [`SymbolTable`]. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Symbol { /// The symbol's address, as a "relative address", i.e. relative to the library's base address. pub address: u32, /// The symbol's size, if known. This is often just set based on the address of the next symbol. pub size: Option, /// The symbol name. pub name: String, } fxprof-processed-profile-0.7.0/src/marker_table.rs000064400000000000000000000050401046102023000203430ustar 00000000000000use serde::ser::{Serialize, SerializeMap, Serializer}; use serde_json::Value; use crate::serialization_helpers::SerializableOptionalTimestampColumn; use crate::thread_string_table::ThreadInternalStringIndex; use crate::{CategoryHandle, MarkerTiming, Timestamp}; #[derive(Debug, Clone, Default)] pub struct MarkerTable { marker_categories: Vec, marker_name_string_indexes: Vec, marker_starts: Vec>, marker_ends: Vec>, marker_phases: Vec, marker_datas: Vec, } impl MarkerTable { pub fn new() -> Self { Default::default() } pub fn add_marker( &mut self, category: CategoryHandle, name: ThreadInternalStringIndex, timing: MarkerTiming, data: Value, ) { let (s, e, phase) = match timing { MarkerTiming::Instant(s) => (Some(s), None, Phase::Instant), MarkerTiming::Interval(s, e) => (Some(s), Some(e), Phase::Interval), MarkerTiming::IntervalStart(s) => (Some(s), None, Phase::IntervalStart), MarkerTiming::IntervalEnd(e) => (None, Some(e), Phase::IntervalEnd), }; self.marker_categories.push(category); self.marker_name_string_indexes.push(name); self.marker_starts.push(s); self.marker_ends.push(e); self.marker_phases.push(phase); self.marker_datas.push(data); } } impl Serialize for MarkerTable { fn serialize(&self, serializer: S) -> Result { let len = self.marker_name_string_indexes.len(); let mut map = serializer.serialize_map(None)?; map.serialize_entry("length", &len)?; map.serialize_entry("category", &self.marker_categories)?; map.serialize_entry("data", &self.marker_datas)?; map.serialize_entry( "endTime", &SerializableOptionalTimestampColumn(&self.marker_ends), )?; map.serialize_entry("name", &self.marker_name_string_indexes)?; map.serialize_entry("phase", &self.marker_phases)?; map.serialize_entry( "startTime", &SerializableOptionalTimestampColumn(&self.marker_starts), )?; map.end() } } #[derive(Debug, Clone, Copy)] #[repr(u8)] enum Phase { Instant = 0, Interval = 1, IntervalStart = 2, IntervalEnd = 3, } impl Serialize for Phase { fn serialize(&self, serializer: S) -> Result { serializer.serialize_u8(*self as u8) } } fxprof-processed-profile-0.7.0/src/markers.rs000064400000000000000000000171651046102023000173720ustar 00000000000000/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ use serde_derive::Serialize; use serde_json::Value; use super::timestamp::Timestamp; /// Specifies timestamps for a marker. #[derive(Debug, Clone)] pub enum MarkerTiming { /// Instant markers describe a single point in time. Instant(Timestamp), /// Interval markers describe a time interval with a start and end timestamp. Interval(Timestamp, Timestamp), /// A marker for just the start of an actual marker. Can be paired with an /// `IntervalEnd` marker of the same name; if no end marker is supplied, this /// creates a marker that extends to the end of the profile. /// /// This can be used for long-running markers for pieces of activity that may /// not have completed by the time the profile is captured. IntervalStart(Timestamp), /// A marker for just the end of an actual marker. Can be paired with an /// `IntervalStart` marker of the same name; if no start marker is supplied, /// this creates a marker which started before the beginning of the profile. /// /// This can be used to mark pieces of activity which started before profiling /// began. IntervalEnd(Timestamp), } /// The trait that all markers implement. /// /// /// ``` /// use fxprof_processed_profile::{ProfilerMarker, MarkerLocation, MarkerFieldFormat, MarkerSchema, MarkerDynamicField, MarkerSchemaField}; /// use serde_json::json; /// /// /// An example marker type with some text content. /// #[derive(Debug, Clone)] /// pub struct TextMarker(pub String); /// /// impl ProfilerMarker for TextMarker { /// const MARKER_TYPE_NAME: &'static str = "Text"; /// /// fn json_marker_data(&self) -> serde_json::Value { /// json!({ /// "type": Self::MARKER_TYPE_NAME, /// "name": self.0 /// }) /// } /// /// fn schema() -> MarkerSchema { /// MarkerSchema { /// type_name: Self::MARKER_TYPE_NAME, /// locations: vec![MarkerLocation::MarkerChart, MarkerLocation::MarkerTable], /// chart_label: Some("{marker.data.name}"), /// tooltip_label: None, /// table_label: Some("{marker.name} - {marker.data.name}"), /// fields: vec![MarkerSchemaField::Dynamic(MarkerDynamicField { /// key: "name", /// label: "Details", /// format: MarkerFieldFormat::String, /// searchable: true, /// })], /// } /// } /// } /// ``` pub trait ProfilerMarker { /// The name of the marker type. const MARKER_TYPE_NAME: &'static str; /// A static method that returns a `MarkerSchema`, which contains all the /// information needed to stream the display schema associated with a /// marker type. fn schema() -> MarkerSchema; /// A method that streams the marker payload data as a serde_json object. fn json_marker_data(&self) -> Value; } /// Describes a marker type. #[derive(Debug, Clone, Serialize)] #[serde(rename_all = "camelCase")] pub struct MarkerSchema { /// The name of this marker type. #[serde(rename = "name")] pub type_name: &'static str, /// List of marker display locations. Empty for SpecialFrontendLocation. #[serde(rename = "display")] pub locations: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub chart_label: Option<&'static str>, #[serde(skip_serializing_if = "Option::is_none")] pub tooltip_label: Option<&'static str>, #[serde(skip_serializing_if = "Option::is_none")] pub table_label: Option<&'static str>, /// The marker fields. These can be specified on each marker. #[serde(rename = "data")] pub fields: Vec, } /// The location of markers with this type. /// /// Markers can be shown in different parts of the Firefox Profiler UI. /// /// Multiple [`MarkerLocation`]s can be specified for a single marker type. #[derive(Debug, Clone, Serialize)] #[serde(rename_all = "kebab-case")] pub enum MarkerLocation { MarkerChart, MarkerTable, /// This adds markers to the main marker timeline in the header. TimelineOverview, /// In the timeline, this is a section that breaks out markers that are /// related to memory. When memory counters are enabled, this is its own /// track, otherwise it is displayed with the main thread. TimelineMemory, /// This adds markers to the IPC timeline area in the header. TimelineIPC, /// This adds markers to the FileIO timeline area in the header. #[serde(rename = "timeline-fileio")] TimelineFileIO, /// TODO - This is not supported yet. StackChart, } /// The description of a marker field in the marker type's schema. #[derive(Debug, Clone, Serialize)] #[serde(untagged)] pub enum MarkerSchemaField { /// Static fields have the same value on all markers. This is used for /// a "Description" field in the tooltip, for example. Static(MarkerStaticField), /// Dynamic fields have a per-marker value. The ProfilerMarker implementation /// on the marker type needs to serialize a field on the data JSON object with /// the matching key. Dynamic(MarkerDynamicField), } /// The field description of a marker field which has the same key and value on all markers with this schema. #[derive(Debug, Clone, Serialize)] pub struct MarkerStaticField { pub label: &'static str, pub value: &'static str, } /// The field description of a marker field which can have a different value for each marker. #[derive(Debug, Clone, Serialize)] pub struct MarkerDynamicField { /// The field key. pub key: &'static str, /// The user-visible label of this field. #[serde(skip_serializing_if = "str::is_empty")] pub label: &'static str, /// The format of this field. pub format: MarkerFieldFormat, /// Whether this field's value should be matched against search terms. pub searchable: bool, } /// The field format of a marker field. #[derive(Debug, Clone, Serialize)] #[serde(rename_all = "kebab-case")] pub enum MarkerFieldFormat { // ---------------------------------------------------- // String types. /// A URL, supports PII sanitization Url, /// A file path, supports PII sanitization. FilePath, /// A plain String, never sanitized for PII. /// Important: Do not put URL or file path information here, as it will not /// be sanitized during profile upload. Please be careful with including /// other types of PII here as well. String, // ---------------------------------------------------- // Numeric types /// For time data that represents a duration of time. /// e.g. "Label: 5s, 5ms, 5μs" Duration, /// Data that happened at a specific time, relative to the start of the /// profile. e.g. "Label: 15.5s, 20.5ms, 30.5μs" Time, /// The following are alternatives to display a time only in a specific unit /// of time. Seconds, // "Label: 5s" Milliseconds, // "Label: 5ms" Microseconds, // "Label: 5μs" Nanoseconds, // "Label: 5ns" /// e.g. "Label: 5.55mb, 5 bytes, 312.5kb" Bytes, /// This should be a value between 0 and 1. /// "Label: 50%" Percentage, // The integer should be used for generic representations of numbers. // Do not use it for time information. // "Label: 52, 5,323, 1,234,567" Integer, // The decimal should be used for generic representations of numbers. // Do not use it for time information. // "Label: 52.23, 0.0054, 123,456.78" Decimal, } fxprof-processed-profile-0.7.0/src/native_symbols.rs000064400000000000000000000050601046102023000207530ustar 00000000000000use serde::ser::{Serialize, SerializeMap, Serializer}; use crate::{ fast_hash_map::FastHashMap, global_lib_table::GlobalLibIndex, library_info::Symbol, thread_string_table::{ThreadInternalStringIndex, ThreadStringTable}, }; /// The native symbols that are used by frames in a thread's `FrameTable`. /// They can be from different libraries. Only used symbols are included. #[derive(Debug, Clone, Default)] pub struct NativeSymbols { addresses: Vec, function_sizes: Vec>, lib_indexes: Vec, names: Vec, lib_and_symbol_address_to_symbol_index: FastHashMap<(GlobalLibIndex, u32), usize>, } impl NativeSymbols { pub fn new() -> Self { Default::default() } pub fn symbol_index_and_string_index_for_symbol( &mut self, lib_index: GlobalLibIndex, symbol: &Symbol, string_table: &mut ThreadStringTable, ) -> (NativeSymbolIndex, ThreadInternalStringIndex) { let addresses = &mut self.addresses; let function_sizes = &mut self.function_sizes; let lib_indexes = &mut self.lib_indexes; let names = &mut self.names; let symbol_index = *self .lib_and_symbol_address_to_symbol_index .entry((lib_index, symbol.address)) .or_insert_with(|| { let native_symbol_index = addresses.len(); addresses.push(symbol.address); function_sizes.push(symbol.size); lib_indexes.push(lib_index); names.push(string_table.index_for_string(&symbol.name)); native_symbol_index }); let name_string_index = names[symbol_index]; (NativeSymbolIndex(symbol_index as u32), name_string_index) } } #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct NativeSymbolIndex(u32); impl Serialize for NativeSymbolIndex { fn serialize(&self, serializer: S) -> Result { serializer.serialize_u32(self.0) } } impl Serialize for NativeSymbols { fn serialize(&self, serializer: S) -> Result { let len = self.names.len(); let mut map = serializer.serialize_map(None)?; map.serialize_entry("length", &len)?; map.serialize_entry("address", &self.addresses)?; map.serialize_entry("functionSize", &self.function_sizes)?; map.serialize_entry("libIndex", &self.lib_indexes)?; map.serialize_entry("name", &self.names)?; map.end() } } fxprof-processed-profile-0.7.0/src/process.rs000064400000000000000000000063351046102023000174010ustar 00000000000000use std::cmp::Ordering; use std::hash::Hash; use crate::frame_table::InternalFrameLocation; use crate::global_lib_table::{GlobalLibTable, LibraryHandle}; use crate::lib_mappings::LibMappings; use crate::Timestamp; /// A thread. Can be created with [`Profile::add_thread`](crate::Profile::add_thread). #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct ThreadHandle(pub(crate) usize); #[derive(Debug)] pub struct Process { pid: String, name: String, threads: Vec, start_time: Timestamp, end_time: Option, libs: LibMappings, } impl Process { pub fn new(name: &str, pid: String, start_time: Timestamp) -> Self { Self { pid, threads: Vec::new(), libs: LibMappings::new(), start_time, end_time: None, name: name.to_owned(), } } pub fn thread_handle_for_allocations(&self) -> Option { self.threads.first().cloned() } pub fn set_start_time(&mut self, start_time: Timestamp) { self.start_time = start_time; } pub fn start_time(&self) -> Timestamp { self.start_time } pub fn set_end_time(&mut self, end_time: Timestamp) { self.end_time = Some(end_time); } pub fn end_time(&self) -> Option { self.end_time } pub fn set_name(&mut self, name: &str) { self.name = name.to_string(); } pub fn name(&self) -> &str { &self.name } pub fn add_thread(&mut self, thread: ThreadHandle) { self.threads.push(thread); } pub fn pid(&self) -> &str { &self.pid } pub fn cmp_for_json_order(&self, other: &Process) -> Ordering { if let Some(ordering) = self.start_time.partial_cmp(&other.start_time) { if ordering != Ordering::Equal { return ordering; } } self.pid.cmp(&other.pid) } pub fn threads(&self) -> &[ThreadHandle] { &self.threads } pub fn convert_address( &mut self, global_libs: &mut GlobalLibTable, kernel_libs: &mut LibMappings, address: u64, ) -> InternalFrameLocation { // Try to find the address in the kernel libs first, and then in the process libs. match kernel_libs .convert_address(address) .or_else(|| self.libs.convert_address(address)) { Some((relative_address, lib_handle)) => { let global_lib_index = global_libs.index_for_used_lib(*lib_handle); InternalFrameLocation::AddressInLib(relative_address, global_lib_index) } None => InternalFrameLocation::UnknownAddress(address), } } pub fn add_lib_mapping( &mut self, lib: LibraryHandle, start_avma: u64, end_avma: u64, relative_address_at_start: u32, ) { self.libs .add_mapping(start_avma, end_avma, relative_address_at_start, lib); } pub fn remove_lib_mapping(&mut self, start_avma: u64) { self.libs.remove_mapping(start_avma); } pub fn remove_all_lib_mappings(&mut self) { self.libs.clear(); } } fxprof-processed-profile-0.7.0/src/profile.rs000064400000000000000000000772261046102023000173720ustar 00000000000000use std::sync::Arc; use std::time::Duration; use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer}; use serde_json::json; use crate::category::{Category, CategoryHandle, CategoryPairHandle}; use crate::category_color::CategoryColor; use crate::counters::{Counter, CounterHandle}; use crate::cpu_delta::CpuDelta; use crate::fast_hash_map::FastHashMap; use crate::frame::{Frame, FrameInfo}; use crate::frame_table::{InternalFrame, InternalFrameLocation}; use crate::global_lib_table::{GlobalLibTable, LibraryHandle}; use crate::lib_mappings::LibMappings; use crate::library_info::LibraryInfo; use crate::process::{Process, ThreadHandle}; use crate::reference_timestamp::ReferenceTimestamp; use crate::string_table::{GlobalStringIndex, GlobalStringTable}; use crate::thread::{ProcessHandle, Thread}; use crate::{MarkerSchema, MarkerTiming, ProfilerMarker, SymbolTable, Timestamp}; /// The sampling interval used during profile recording. /// /// This doesn't have to match the actual delta between sample timestamps. /// It just describes the intended interval. /// /// For profiles without sampling data, this can be set to a meaningless /// dummy value. #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct SamplingInterval { nanos: u64, } impl SamplingInterval { /// Create a sampling interval from a sampling frequency in Hz. /// /// Panics on zero or negative values. pub fn from_hz(samples_per_second: f32) -> Self { assert!(samples_per_second > 0.0); let nanos = (1_000_000_000.0 / samples_per_second) as u64; Self::from_nanos(nanos) } /// Create a sampling interval from a value in milliseconds. pub fn from_millis(millis: u64) -> Self { Self::from_nanos(millis * 1_000_000) } /// Create a sampling interval from a value in nanoseconds pub fn from_nanos(nanos: u64) -> Self { Self { nanos } } /// Convert the interval to nanoseconds. pub fn nanos(&self) -> u64 { self.nanos } /// Convert the interval to float seconds. pub fn as_secs_f64(&self) -> f64 { self.nanos as f64 / 1_000_000_000.0 } } impl From for SamplingInterval { fn from(duration: Duration) -> Self { Self::from_nanos(duration.as_nanos() as u64) } } /// A handle for an interned string, returned from [`Profile::intern_string`]. #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct StringHandle(GlobalStringIndex); /// Stores the profile data and can be serialized as JSON, via [`serde::Serialize`]. /// /// The profile data is organized into a list of processes with threads. /// Each thread has its own samples and markers. /// /// ``` /// use fxprof_processed_profile::{Profile, CategoryHandle, CpuDelta, Frame, FrameInfo, FrameFlags, SamplingInterval, Timestamp}; /// use std::time::SystemTime; /// /// # fn write_profile(output_file: std::fs::File) -> Result<(), Box> { /// let mut profile = Profile::new("My app", SystemTime::now().into(), SamplingInterval::from_millis(1)); /// let process = profile.add_process("App process", 54132, Timestamp::from_millis_since_reference(0.0)); /// let thread = profile.add_thread(process, 54132000, Timestamp::from_millis_since_reference(0.0), true); /// profile.set_thread_name(thread, "Main thread"); /// let stack = vec![ /// FrameInfo { frame: Frame::Label(profile.intern_string("Root node")), category_pair: CategoryHandle::OTHER.into(), flags: FrameFlags::empty() }, /// FrameInfo { frame: Frame::Label(profile.intern_string("First callee")), category_pair: CategoryHandle::OTHER.into(), flags: FrameFlags::empty() } /// ]; /// profile.add_sample(thread, Timestamp::from_millis_since_reference(0.0), stack.into_iter(), CpuDelta::ZERO, 1); /// /// let writer = std::io::BufWriter::new(output_file); /// serde_json::to_writer(writer, &profile)?; /// # Ok(()) /// # } /// ``` #[derive(Debug)] pub struct Profile { pub(crate) product: String, pub(crate) interval: SamplingInterval, pub(crate) global_libs: GlobalLibTable, pub(crate) kernel_libs: LibMappings, pub(crate) categories: Vec, // append-only for stable CategoryHandles pub(crate) processes: Vec, // append-only for stable ProcessHandles pub(crate) counters: Vec, pub(crate) threads: Vec, // append-only for stable ThreadHandles pub(crate) reference_timestamp: ReferenceTimestamp, pub(crate) string_table: GlobalStringTable, pub(crate) marker_schemas: FastHashMap<&'static str, MarkerSchema>, used_pids: FastHashMap, used_tids: FastHashMap, } impl Profile { /// Create a new profile. /// /// The `product` is the name of the main application which was profiled. /// The `reference_timestamp` is some arbitrary absolute timestamp which all /// other timestamps in the profile data are relative to. The `interval` is the intended /// time delta between samples. pub fn new( product: &str, reference_timestamp: ReferenceTimestamp, interval: SamplingInterval, ) -> Self { Profile { interval, product: product.to_string(), threads: Vec::new(), global_libs: GlobalLibTable::new(), kernel_libs: LibMappings::new(), reference_timestamp, processes: Vec::new(), string_table: GlobalStringTable::new(), marker_schemas: FastHashMap::default(), categories: vec![Category { name: "Other".to_string(), color: CategoryColor::Gray, subcategories: Vec::new(), }], used_pids: FastHashMap::default(), used_tids: FastHashMap::default(), counters: Vec::new(), } } /// Change the declared sampling interval. pub fn set_interval(&mut self, interval: SamplingInterval) { self.interval = interval; } /// Change the reference timestamp. pub fn set_reference_timestamp(&mut self, reference_timestamp: ReferenceTimestamp) { self.reference_timestamp = reference_timestamp; } /// Change the product name. pub fn set_product(&mut self, product: &str) { self.product = product.to_string(); } /// Add a category and return its handle. /// /// Categories are used for stack frames and markers, as part of a "category pair". pub fn add_category(&mut self, name: &str, color: CategoryColor) -> CategoryHandle { let handle = CategoryHandle(self.categories.len() as u16); self.categories.push(Category { name: name.to_string(), color, subcategories: Vec::new(), }); handle } /// Add a subcategory for a category, and return the "category pair" handle. pub fn add_subcategory(&mut self, category: CategoryHandle, name: &str) -> CategoryPairHandle { let subcategory = self.categories[category.0 as usize].add_subcategory(name.into()); CategoryPairHandle(category, Some(subcategory)) } /// Add an empty process. The name, pid and start time can be changed afterwards, /// but they are required here because they have to be present in the profile JSON. pub fn add_process(&mut self, name: &str, pid: u32, start_time: Timestamp) -> ProcessHandle { let pid = self.make_unique_pid(pid); let handle = ProcessHandle(self.processes.len()); self.processes.push(Process::new(name, pid, start_time)); handle } fn make_unique_pid(&mut self, pid: u32) -> String { Self::make_unique_pid_or_tid(&mut self.used_pids, pid) } fn make_unique_tid(&mut self, tid: u32) -> String { Self::make_unique_pid_or_tid(&mut self.used_tids, tid) } /// Appends ".1" / ".2" etc. to the pid or tid if needed. /// /// The map contains the next suffix for each pid/tid, or no entry if the pid/tid /// hasn't been used before and needs no suffix. fn make_unique_pid_or_tid(map: &mut FastHashMap, id: u32) -> String { match map.entry(id) { std::collections::hash_map::Entry::Occupied(mut entry) => { let suffix = *entry.get(); *entry.get_mut() += 1; format!("{id}.{suffix}") } std::collections::hash_map::Entry::Vacant(entry) => { entry.insert(1); format!("{id}") } } } /// Create a counter. Counters let you make graphs with a time axis and a Y axis. One example of a /// counter is memory usage. /// /// # Example /// /// ``` /// use fxprof_processed_profile::{Profile, CategoryHandle, CpuDelta, Frame, SamplingInterval, Timestamp}; /// use std::time::SystemTime; /// /// let mut profile = Profile::new("My app", SystemTime::now().into(), SamplingInterval::from_millis(1)); /// let process = profile.add_process("App process", 54132, Timestamp::from_millis_since_reference(0.0)); /// let memory_counter = profile.add_counter(process, "malloc", "Memory", "Amount of allocated memory"); /// profile.add_counter_sample(memory_counter, Timestamp::from_millis_since_reference(0.0), 0.0, 0); /// profile.add_counter_sample(memory_counter, Timestamp::from_millis_since_reference(1.0), 1000.0, 2); /// profile.add_counter_sample(memory_counter, Timestamp::from_millis_since_reference(2.0), 800.0, 1); /// ``` pub fn add_counter( &mut self, process: ProcessHandle, name: &str, category: &str, description: &str, ) -> CounterHandle { let handle = CounterHandle(self.counters.len()); self.counters.push(Counter::new( name, category, description, process, self.processes[process.0].pid(), )); handle } /// Change the start time of a process. pub fn set_process_start_time(&mut self, process: ProcessHandle, start_time: Timestamp) { self.processes[process.0].set_start_time(start_time); } /// Set the end time of a process. pub fn set_process_end_time(&mut self, process: ProcessHandle, end_time: Timestamp) { self.processes[process.0].set_end_time(end_time); } /// Change the name of a process. pub fn set_process_name(&mut self, process: ProcessHandle, name: &str) { self.processes[process.0].set_name(name); } /// Get the [`LibraryHandle`] for a library. This handle is used in [`Profile::add_lib_mapping`] /// and in the pre-resolved [`Frame`] variants. /// /// Knowing the library information allows symbolication of native stacks once the /// profile is opened in the Firefox Profiler. pub fn add_lib(&mut self, library: LibraryInfo) -> LibraryHandle { self.global_libs.handle_for_lib(library) } /// Set the symbol table for a library. /// /// This symbol table can also be specified in the [`LibraryInfo`] which is given to /// [`Profile::add_lib`]. However, sometimes you may want to have the [`LibraryHandle`] /// for a library before you know about all its symbols. In those cases, you can call /// [`Profile::add_lib`] with `symbol_table` set to `None`, and then supply the symbol /// table afterwards. /// /// Symbol tables are optional. pub fn set_lib_symbol_table(&mut self, library: LibraryHandle, symbol_table: Arc) { self.global_libs.set_lib_symbol_table(library, symbol_table); } /// For a given process, define where in the virtual memory of this process the given library /// is mapped. /// /// Existing mappings which overlap with the range `start_avma..end_avma` will be removed. /// /// A single library can have multiple mappings in the same process. /// /// The new mapping will be respected by future [`Profile::add_sample`] calls, when resolving /// absolute frame addresses to library-relative addresses. pub fn add_lib_mapping( &mut self, process: ProcessHandle, lib: LibraryHandle, start_avma: u64, end_avma: u64, relative_address_at_start: u32, ) { self.processes[process.0].add_lib_mapping( lib, start_avma, end_avma, relative_address_at_start, ); } /// Mark the library mapping at the specified start address in the specified process as /// unloaded, so that future calls to [`Profile::add_sample`] know about the removal. pub fn remove_lib_mapping(&mut self, process: ProcessHandle, start_avma: u64) { self.processes[process.0].remove_lib_mapping(start_avma); } /// Clear all library mappings in the specified process. pub fn clear_process_lib_mappings(&mut self, process: ProcessHandle) { self.processes[process.0].remove_all_lib_mappings(); } /// Add a kernel library mapping. This allows symbolication of kernel stacks once the profile is /// opened in the Firefox Profiler. Kernel libraries are global and not tied to a process. /// /// Each kernel library covers an address range in the kernel address space, which is /// global across all processes. Future calls to [`Profile::add_sample`] with native /// frames resolve the frame's code address with respect to the currently loaded kernel /// and process libraries. pub fn add_kernel_lib_mapping( &mut self, lib: LibraryHandle, start_avma: u64, end_avma: u64, relative_address_at_start: u32, ) { self.kernel_libs .add_mapping(start_avma, end_avma, relative_address_at_start, lib); } /// Mark the kernel library at the specified start address as /// unloaded, so that future calls to [`Profile::add_sample`] know about the unloading. pub fn remove_kernel_lib_mapping(&mut self, start_avma: u64) { self.kernel_libs.remove_mapping(start_avma); } /// Add an empty thread to the specified process. pub fn add_thread( &mut self, process: ProcessHandle, tid: u32, start_time: Timestamp, is_main: bool, ) -> ThreadHandle { let tid = self.make_unique_tid(tid); let handle = ThreadHandle(self.threads.len()); self.threads .push(Thread::new(process, tid, start_time, is_main)); self.processes[process.0].add_thread(handle); handle } /// Change the name of a thread. pub fn set_thread_name(&mut self, thread: ThreadHandle, name: &str) { self.threads[thread.0].set_name(name); } /// Change the start time of a thread. pub fn set_thread_start_time(&mut self, thread: ThreadHandle, start_time: Timestamp) { self.threads[thread.0].set_start_time(start_time); } /// Set the end time of a thread. pub fn set_thread_end_time(&mut self, thread: ThreadHandle, end_time: Timestamp) { self.threads[thread.0].set_end_time(end_time); } /// Turn the string into in a [`StringHandle`], for use in [`Frame::Label`]. pub fn intern_string(&mut self, s: &str) -> StringHandle { StringHandle(self.string_table.index_for_string(s)) } /// Get the string for a string handle. This is sometimes useful when writing tests. /// /// Panics if the handle wasn't found, which can happen if you pass a handle /// from a different Profile instance. pub fn get_string(&self, handle: StringHandle) -> &str { self.string_table.get_string(handle.0).unwrap() } /// Add a sample to the given thread. /// /// The sample has a timestamp, a stack, a CPU delta, and a weight. /// /// The stack frames are supplied as an iterator. Every frame has an associated /// category pair. /// /// The CPU delta is the amount of CPU time that the CPU was busy with work for this /// thread since the previous sample. It should always be less than or equal the /// time delta between the sample timestamps. /// /// The weight affects the sample's stack's score in the call tree. You usually set /// this to 1. You can use weights greater than one if you want to combine multiple /// adjacent samples with the same stack into one sample, to save space. However, /// this discards any CPU deltas between the adjacent samples, so it's only really /// useful if no CPU time has occurred between the samples, and for that use case the /// [`Profile::add_sample_same_stack_zero_cpu`] method should be preferred. /// /// You can can also set the weight to something negative, such as -1, to create a /// "diff profile". For example, if you have partitioned your samples into "before" /// and "after" groups, you can use -1 for all "before" samples and 1 for all "after" /// samples, and the call tree will show you which stacks occur more frequently in /// the "after" part of the profile, by sorting those stacks to the top. pub fn add_sample( &mut self, thread: ThreadHandle, timestamp: Timestamp, frames: impl Iterator, cpu_delta: CpuDelta, weight: i32, ) { let stack_index = self.stack_index_for_frames(thread, frames); self.threads[thread.0].add_sample(timestamp, stack_index, cpu_delta, weight); } /// Add a sample with a CPU delta of zero. Internally, multiple consecutive /// samples with a delta of zero will be combined into one sample with an accumulated /// weight. pub fn add_sample_same_stack_zero_cpu( &mut self, thread: ThreadHandle, timestamp: Timestamp, weight: i32, ) { self.threads[thread.0].add_sample_same_stack_zero_cpu(timestamp, weight); } /// Add an allocation or deallocation sample to the given thread. This is used /// to collect stacks showing where allocations and deallocations happened. /// /// When loading profiles with allocation samples in the Firefox Profiler, the /// UI will display a dropdown above the call tree to switch between regular /// samples and allocation samples. /// /// An allocation sample has a timestamp, a stack, a memory address, and an allocation size. /// /// The size should be in bytes, with positive values for allocations and negative /// values for deallocations. /// /// The memory address allows correlating the allocation and deallocation stacks of the /// same object. This lets the UI display just the stacks for objects which haven't /// been deallocated yet ("Retained memory"). /// /// To avoid having to capture stacks for every single allocation, you can sample just /// a subset of allocations. The sampling should be done based on the allocation size /// ("probability per byte"). The decision whether to sample should be done at /// allocation time and remembered for the lifetime of the allocation, so that for /// each allocated object you either sample both its allocation and deallocation, or /// neither. /// /// The stack frames are supplied as an iterator. Every frame has an associated /// category pair. pub fn add_allocation_sample( &mut self, thread: ThreadHandle, timestamp: Timestamp, frames: impl Iterator, allocation_address: u64, allocation_size: i64, ) { // The profile format strictly separates sample data from different threads. // For allocation samples, this separation is a bit unfortunate, especially // when it comes to the "Retained Memory" panel which shows allocation stacks // for just objects that haven't been deallocated yet. This panel is per-thread, // and it needs to know about deallocations even if they happened on a different // thread from the allocation. // To resolve this conundrum, for now, we will put all allocation and deallocation // samples on a single thread per process, regardless of what thread they actually // happened on. // The Gecko profiler puts all allocation samples on the main thread, for example. // Here in fxprof-processed-profile, we just deem the first thread of each process // as the processes "allocation thread". let process_handle = self.threads[thread.0].process(); let process = &self.processes[process_handle.0]; let allocation_thread_handle = process.thread_handle_for_allocations().unwrap(); let stack_index = self.stack_index_for_frames(allocation_thread_handle, frames); self.threads[allocation_thread_handle.0].add_allocation_sample( timestamp, stack_index, allocation_address, allocation_size, ); } /// Add a marker to the given thread. pub fn add_marker( &mut self, thread: ThreadHandle, category: CategoryHandle, name: &str, marker: T, timing: MarkerTiming, ) { self.marker_schemas .entry(T::MARKER_TYPE_NAME) .or_insert_with(T::schema); self.threads[thread.0].add_marker(category, name, marker, timing, None); } /// Add a marker to the given thread, with a stack. pub fn add_marker_with_stack( &mut self, thread: ThreadHandle, category: CategoryHandle, name: &str, marker: T, timing: MarkerTiming, stack_frames: impl Iterator, ) { self.marker_schemas .entry(T::MARKER_TYPE_NAME) .or_insert_with(T::schema); let stack_index = self.stack_index_for_frames(thread, stack_frames); self.threads[thread.0].add_marker(category, name, marker, timing, stack_index); } /// Add a data point to a counter. For a memory counter, `value_delta` is the number /// of bytes that have been allocated / deallocated since the previous counter sample, and /// `number_of_operations` is the number of `malloc` / `free` calls since the previous /// counter sample. Both numbers are deltas. /// /// The graph in the profiler UI will connect subsequent data points with diagonal lines. /// Counters are intended for values that are measured at a time-based sample rate; for example, /// you could add a counter sample once every millisecond with the current memory usage. /// /// Alternatively, you can emit a new data point only whenever the value changes. /// In that case you probably want to emit two values per change: one right before (with /// the old value) and one right at the timestamp of change (with the new value). This way /// you'll get more horizontal lines, and the diagonal line will be very short. pub fn add_counter_sample( &mut self, counter: CounterHandle, timestamp: Timestamp, value_delta: f64, number_of_operations_delta: u32, ) { self.counters[counter.0].add_sample(timestamp, value_delta, number_of_operations_delta) } // frames is ordered from caller to callee, i.e. root function first, pc last fn stack_index_for_frames( &mut self, thread: ThreadHandle, frames: impl Iterator, ) -> Option { let thread = &mut self.threads[thread.0]; let process = &mut self.processes[thread.process().0]; let mut prefix = None; for frame_info in frames { let location = match frame_info.frame { Frame::InstructionPointer(ip) => { process.convert_address(&mut self.global_libs, &mut self.kernel_libs, ip) } Frame::ReturnAddress(ra) => process.convert_address( &mut self.global_libs, &mut self.kernel_libs, ra.saturating_sub(1), ), Frame::RelativeAddressFromInstructionPointer(lib_handle, relative_address) => { let global_lib_index = self.global_libs.index_for_used_lib(lib_handle); InternalFrameLocation::AddressInLib(relative_address, global_lib_index) } Frame::RelativeAddressFromReturnAddress(lib_handle, relative_address) => { let global_lib_index = self.global_libs.index_for_used_lib(lib_handle); let nudged_relative_address = relative_address.saturating_sub(1); InternalFrameLocation::AddressInLib(nudged_relative_address, global_lib_index) } Frame::Label(string_index) => { let thread_string_index = thread.convert_string_index(&self.string_table, string_index.0); InternalFrameLocation::Label(thread_string_index) } }; let internal_frame = InternalFrame { location, flags: frame_info.flags, category_pair: frame_info.category_pair, }; let frame_index = thread.frame_index_for_frame(internal_frame, &self.global_libs); prefix = Some(thread.stack_index_for_stack(prefix, frame_index, frame_info.category_pair)); } prefix } /// Returns a flattened list of `ThreadHandle`s in the right order. /// // The processed profile format has all threads from all processes in a flattened threads list. // Each thread duplicates some information about its process, which allows the Firefox Profiler // UI to group threads from the same process. fn sorted_threads(&self) -> (Vec, Vec) { let mut sorted_threads = Vec::with_capacity(self.threads.len()); let mut first_thread_index_per_process = vec![0; self.processes.len()]; let mut sorted_processes: Vec<_> = (0..self.processes.len()).map(ProcessHandle).collect(); sorted_processes.sort_by(|a_handle, b_handle| { let a = &self.processes[a_handle.0]; let b = &self.processes[b_handle.0]; a.cmp_for_json_order(b) }); for process in sorted_processes { let prev_len = sorted_threads.len(); first_thread_index_per_process[process.0] = prev_len; sorted_threads.extend_from_slice(self.processes[process.0].threads()); let sorted_threads_for_this_process = &mut sorted_threads[prev_len..]; sorted_threads_for_this_process.sort_by(|a_handle, b_handle| { let a = &self.threads[a_handle.0]; let b = &self.threads[b_handle.0]; a.cmp_for_json_order(b) }); } (sorted_threads, first_thread_index_per_process) } fn serializable_threads<'a>( &'a self, sorted_threads: &'a [ThreadHandle], ) -> SerializableProfileThreadsProperty<'a> { SerializableProfileThreadsProperty { threads: &self.threads, processes: &self.processes, categories: &self.categories, sorted_threads, } } fn serializable_counters<'a>( &'a self, first_thread_index_per_process: &'a [usize], ) -> SerializableProfileCountersProperty<'a> { SerializableProfileCountersProperty { counters: &self.counters, first_thread_index_per_process, } } fn contains_js_function(&self) -> bool { self.threads.iter().any(|t| t.contains_js_function()) } } impl Serialize for Profile { fn serialize(&self, serializer: S) -> Result { let (sorted_threads, first_thread_index_per_process) = self.sorted_threads(); let mut map = serializer.serialize_map(None)?; map.serialize_entry("meta", &SerializableProfileMeta(self))?; map.serialize_entry("libs", &self.global_libs)?; map.serialize_entry("threads", &self.serializable_threads(&sorted_threads))?; map.serialize_entry("pages", &[] as &[()])?; map.serialize_entry("profilerOverhead", &[] as &[()])?; map.serialize_entry( "counters", &self.serializable_counters(&first_thread_index_per_process), )?; map.end() } } struct SerializableProfileMeta<'a>(&'a Profile); impl<'a> Serialize for SerializableProfileMeta<'a> { fn serialize(&self, serializer: S) -> Result { let mut map = serializer.serialize_map(None)?; map.serialize_entry("categories", &self.0.categories)?; map.serialize_entry("debug", &false)?; map.serialize_entry( "extensions", &json!({ "length": 0, "baseURL": [], "id": [], "name": [], }), )?; map.serialize_entry("interval", &(self.0.interval.as_secs_f64() * 1000.0))?; map.serialize_entry("preprocessedProfileVersion", &46)?; map.serialize_entry("processType", &0)?; map.serialize_entry("product", &self.0.product)?; map.serialize_entry( "sampleUnits", &json!({ "time": "ms", "eventDelay": "ms", "threadCPUDelta": "µs", }), )?; map.serialize_entry("startTime", &self.0.reference_timestamp)?; map.serialize_entry("symbolicated", &false)?; map.serialize_entry("pausedRanges", &[] as &[()])?; map.serialize_entry("version", &24)?; map.serialize_entry("usesOnlyOneStackType", &(!self.0.contains_js_function()))?; map.serialize_entry("doesNotUseFrameImplementation", &true)?; map.serialize_entry("sourceCodeIsNotOnSearchfox", &true)?; let mut marker_schemas: Vec = self.0.marker_schemas.values().cloned().collect(); marker_schemas.sort_by_key(|schema| schema.type_name); map.serialize_entry("markerSchema", &marker_schemas)?; map.end() } } struct SerializableProfileThreadsProperty<'a> { threads: &'a [Thread], processes: &'a [Process], categories: &'a [Category], sorted_threads: &'a [ThreadHandle], } impl<'a> Serialize for SerializableProfileThreadsProperty<'a> { fn serialize(&self, serializer: S) -> Result { let mut seq = serializer.serialize_seq(Some(self.threads.len()))?; for thread in self.sorted_threads { let categories = &self.categories; let thread = &self.threads[thread.0]; let process = &self.processes[thread.process().0]; seq.serialize_element(&SerializableProfileThread(process, thread, categories))?; } seq.end() } } struct SerializableProfileCountersProperty<'a> { counters: &'a [Counter], first_thread_index_per_process: &'a [usize], } impl<'a> Serialize for SerializableProfileCountersProperty<'a> { fn serialize(&self, serializer: S) -> Result { let mut seq = serializer.serialize_seq(Some(self.counters.len()))?; for counter in self.counters { let main_thread_index = self.first_thread_index_per_process[counter.process().0]; seq.serialize_element(&counter.as_serializable(main_thread_index))?; } seq.end() } } struct SerializableProfileThread<'a>(&'a Process, &'a Thread, &'a [Category]); impl<'a> Serialize for SerializableProfileThread<'a> { fn serialize(&self, serializer: S) -> Result { let SerializableProfileThread(process, thread, categories) = self; let process_start_time = process.start_time(); let process_end_time = process.end_time(); let process_name = process.name(); let pid = process.pid(); thread.serialize_with( serializer, categories, process_start_time, process_end_time, process_name, pid, ) } } fxprof-processed-profile-0.7.0/src/reference_timestamp.rs000064400000000000000000000026451046102023000217440ustar 00000000000000use serde::ser::{Serialize, Serializer}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; /// A timestamp which anchors the profile in absolute time. /// /// In the profile JSON, this uses a UNIX timestamp. /// /// All timestamps in the profile are relative to this reference timestamp. #[derive(Debug, Clone, Copy, PartialOrd, PartialEq)] pub struct ReferenceTimestamp { ms_since_unix_epoch: f64, } impl ReferenceTimestamp { /// Create a reference timestamp from a [`Duration`] since the UNIX epoch. pub fn from_duration_since_unix_epoch(duration: Duration) -> Self { Self::from_millis_since_unix_epoch(duration.as_secs_f64() * 1000.0) } /// Create a reference timestamp from milliseconds since the UNIX epoch. pub fn from_millis_since_unix_epoch(ms_since_unix_epoch: f64) -> Self { Self { ms_since_unix_epoch, } } /// Create a reference timestamp from a [`SystemTime`]. pub fn from_system_time(system_time: SystemTime) -> Self { Self::from_duration_since_unix_epoch(system_time.duration_since(UNIX_EPOCH).unwrap()) } } impl From for ReferenceTimestamp { fn from(system_time: SystemTime) -> Self { Self::from_system_time(system_time) } } impl Serialize for ReferenceTimestamp { fn serialize(&self, serializer: S) -> Result { self.ms_since_unix_epoch.serialize(serializer) } } fxprof-processed-profile-0.7.0/src/resource_table.rs000064400000000000000000000043041046102023000207130ustar 00000000000000use serde::ser::{Serialize, SerializeMap, Serializer}; use crate::fast_hash_map::FastHashMap; use crate::global_lib_table::{GlobalLibIndex, GlobalLibTable}; use crate::serialization_helpers::SerializableSingleValueColumn; use crate::thread_string_table::ThreadInternalStringIndex; use crate::thread_string_table::ThreadStringTable; #[derive(Debug, Clone, Default)] pub struct ResourceTable { resource_libs: Vec, resource_names: Vec, lib_to_resource: FastHashMap, } impl ResourceTable { pub fn new() -> Self { Default::default() } pub fn resource_for_lib( &mut self, lib_index: GlobalLibIndex, global_libs: &GlobalLibTable, thread_string_table: &mut ThreadStringTable, ) -> ResourceIndex { let resource_libs = &mut self.resource_libs; let resource_names = &mut self.resource_names; *self.lib_to_resource.entry(lib_index).or_insert_with(|| { let resource = ResourceIndex(resource_libs.len() as u32); let lib_name = &global_libs.get_lib(lib_index).unwrap().name; resource_libs.push(lib_index); resource_names.push(thread_string_table.index_for_string(lib_name)); resource }) } } impl Serialize for ResourceTable { fn serialize(&self, serializer: S) -> Result { const RESOURCE_TYPE_LIB: u32 = 1; let len = self.resource_libs.len(); let mut map = serializer.serialize_map(None)?; map.serialize_entry("length", &len)?; map.serialize_entry("lib", &self.resource_libs)?; map.serialize_entry("name", &self.resource_names)?; map.serialize_entry("host", &SerializableSingleValueColumn((), len))?; map.serialize_entry( "type", &SerializableSingleValueColumn(RESOURCE_TYPE_LIB, len), )?; map.end() } } #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct ResourceIndex(u32); impl Serialize for ResourceIndex { fn serialize(&self, serializer: S) -> Result { serializer.serialize_u32(self.0) } } fxprof-processed-profile-0.7.0/src/sample_table.rs000064400000000000000000000245701046102023000203540ustar 00000000000000use crate::Timestamp; use crate::{cpu_delta::CpuDelta, serialization_helpers::SerializableSingleValueColumn}; use serde::ser::{Serialize, SerializeMap, Serializer}; use std::fmt::{Display, Formatter}; /// The sample table contains stacks with timestamps and some extra information. /// /// In the most common case, this is used for time-based sampling: At a fixed but /// configurable rate, a profiler samples the current stack of each thread and records /// it in the profile. #[derive(Debug, Clone, Default)] pub struct SampleTable { sample_type: WeightType, sample_weights: Vec, sample_timestamps: Vec, /// An index into the thread's stack table for each sample. `None` means the empty stack. sample_stack_indexes: Vec>, /// CPU usage delta since the previous sample for this thread, for each sample. sample_cpu_deltas: Vec, } /// Profile samples can come in a variety of forms and represent different information. /// The Gecko Profiler by default uses sample counts, as it samples on a fixed interval. /// These samples are all weighted equally by default, with a weight of one. However in /// comparison profiles, some weights are negative, creating a "diff" profile. /// /// In addition, tracing formats can fit into the sample-based format by reporting /// the "self time" of the profile. Each of these "self time" samples would then /// provide the weight, in duration. Currently, the tracing format assumes that /// the timing comes in milliseconds (see 'tracing-ms') but if needed, microseconds /// or nanoseconds support could be added. /// /// e.g. The following tracing data could be represented as samples: /// /// ```ignore /// 0 1 2 3 4 5 6 7 8 9 10 /// | | | | | | | | | | | /// - - - - - - - - - - - /// A A A A A A A A A A A /// B B D D D D /// C C E E E E /// ``` /// This chart represents the self time. /// ```ignore /// 0 1 2 3 4 5 6 7 8 9 10 /// | | | | | | | | | | | /// A A C C E E E E A A A /// ``` /// And finally this is what the samples table would look like. /// ```ignore /// SamplesTable = { /// time: [0, 2, 4, 8], /// stack: [A, ABC, ADE, A], /// weight: [2, 2, 4, 3], /// } /// ``` /// /// JS type definition: /// ```ts /// export type WeightType = 'samples' | 'tracing-ms' | 'bytes'; /// ``` /// /// Documentation and code from: /// #[derive(Debug, Clone)] pub enum WeightType { /// The weight is an integer multiplier. /// /// This affects the total + self score of each call node in the call tree, /// and the order in the tree because the tree is ordered from large "totals" /// to small "totals". /// It also affects the width of the sample's stack's box in the flame graph. Samples, /// Each sample will have a weight in terms of (fractional) milliseconds. /// Not supported by fxprof-processed-profile at the moment. #[allow(dead_code)] TracingMs, /// Each sample will have a weight in terms of bytes allocated. Bytes, } impl Default for WeightType { fn default() -> Self { WeightType::Samples } } impl Display for WeightType { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { WeightType::Samples => write!(f, "samples"), WeightType::TracingMs => write!(f, "tracing-ms"), WeightType::Bytes => write!(f, "bytes"), } } } impl Serialize for WeightType { fn serialize(&self, serializer: S) -> Result { match self { WeightType::Samples => serializer.serialize_str("samples"), WeightType::TracingMs => serializer.serialize_str("tracing-ms"), WeightType::Bytes => serializer.serialize_str("bytes"), } } } impl SampleTable { pub fn new() -> Self { Default::default() } pub fn add_sample( &mut self, timestamp: Timestamp, stack_index: Option, cpu_delta: CpuDelta, weight: i32, ) { self.sample_weights.push(weight); self.sample_timestamps.push(timestamp); self.sample_stack_indexes.push(stack_index); self.sample_cpu_deltas.push(cpu_delta); } pub fn modify_last_sample(&mut self, timestamp: Timestamp, weight: i32) { *self.sample_weights.last_mut().unwrap() += weight; *self.sample_timestamps.last_mut().unwrap() = timestamp; } } impl Serialize for SampleTable { fn serialize(&self, serializer: S) -> Result { let len = self.sample_timestamps.len(); let mut map = serializer.serialize_map(None)?; map.serialize_entry("length", &len)?; map.serialize_entry("stack", &self.sample_stack_indexes)?; map.serialize_entry("time", &self.sample_timestamps)?; map.serialize_entry("weight", &self.sample_weights)?; map.serialize_entry("weightType", &self.sample_type.to_string())?; map.serialize_entry("threadCPUDelta", &self.sample_cpu_deltas)?; map.end() } } /// JS documentation of the native allocations table: /// /// ```ignore /// /** /// * This variant is the original version of the table, before the memory address /// * and threadId were added. /// */ /// export type UnbalancedNativeAllocationsTable = {| /// time: Milliseconds[], /// // "weight" is used here rather than "bytes", so that this type will match the /// // SamplesLikeTableShape. /// weight: Bytes[], /// weightType: 'bytes', /// stack: Array, /// length: number, /// |}; /// /// /** /// * The memory address and thread ID were added later. /// */ /// export type BalancedNativeAllocationsTable = {| /// ...UnbalancedNativeAllocationsTable, /// memoryAddress: number[], /// threadId: number[], /// |}; /// ``` /// /// In this crate we always create a `BalancedNativeAllocationsTable`. We require /// a memory address for each allocation / deallocation sample. #[derive(Debug, Clone, Default)] pub struct NativeAllocationsTable { /// The timstamps for each sample time: Vec, /// The stack index for each sample stack: Vec>, /// The size in bytes (positive for allocations, negative for deallocations) for each sample allocation_size: Vec, /// The memory address of the allocation for each sample allocation_address: Vec, } impl NativeAllocationsTable { /// Add a sample to the [`NativeAllocations`] table. pub fn add_sample( &mut self, timestamp: Timestamp, stack_index: Option, allocation_address: u64, allocation_size: i64, ) { self.time.push(timestamp); self.stack.push(stack_index); self.allocation_address.push(allocation_address); self.allocation_size.push(allocation_size); } } impl Serialize for NativeAllocationsTable { fn serialize(&self, serializer: S) -> Result { let len = self.time.len(); let mut map = serializer.serialize_map(None)?; map.serialize_entry("time", &self.time)?; map.serialize_entry("weight", &self.allocation_size)?; map.serialize_entry("weightType", &WeightType::Bytes)?; map.serialize_entry("stack", &self.stack)?; map.serialize_entry("memoryAddress", &self.allocation_address)?; // The threadId column is currently unused by the Firefox Profiler. // Fill the column with zeros because the type definitions require it to be a number. // A better alternative would be to use thread indexes or the threads' string TIDs. map.serialize_entry("threadId", &SerializableSingleValueColumn(0, len))?; map.serialize_entry("length", &len)?; map.end() } } #[cfg(test)] mod tests { use super::*; use assert_json_diff::assert_json_eq; use serde_json::json; #[test] fn test_serialize_native_allocations() { // example of `nativeAllocations`: // // "nativeAllocations": { // "time": [ // 274364.1082197344, // 274364.17226073437, // 274364.2063027344, // 274364.2229277344, // 274364.44117773435, // 274366.4713027344, // 274366.48871973436, // 274366.6601777344, // 274366.6705107344 // ], // "weight": [ // 4096, // -4096, // 4096, // -4096, // 147456, // 4096, // -4096, // 96, // -96 // ], // "weightType": "bytes", // "stack": [ // 71, // 88, // 119, // 138, // null, // 171, // 190, // 210, // 214 // ], // "memoryAddress": [ // 4388749312, // 4388749312, // 4388749312, // 4388749312, // 4376330240, // 4388749312, // 4388749312, // 4377576256, // 4377576256 // ], // "threadId": [ // 0, // 0, // 0, // 0, // 0, // 0, // 0, // 0, // 0 // ], // "length": 9 // }, let mut native_allocations_table = NativeAllocationsTable::default(); native_allocations_table.add_sample( Timestamp::from_millis_since_reference(274_363.248_375), None, 5969772544, 147456, ); assert_json_eq!( native_allocations_table, json!({ "time": [ 274363.248375 ], "weight": [ 147456 ], "weightType": "bytes", "stack": [ null ], "memoryAddress": [ 5969772544u64 ], "threadId": [ 0 ], "length": 1 }) ); } } fxprof-processed-profile-0.7.0/src/serialization_helpers.rs000064400000000000000000000017711046102023000223210ustar 00000000000000use serde::ser::{Serialize, SerializeSeq, Serializer}; use crate::Timestamp; pub struct SerializableSingleValueColumn(pub T, pub usize); impl Serialize for SerializableSingleValueColumn { fn serialize(&self, serializer: S) -> Result { let mut seq = serializer.serialize_seq(Some(self.1))?; for _ in 0..self.1 { seq.serialize_element(&self.0)?; } seq.end() } } pub struct SerializableOptionalTimestampColumn<'a>(pub &'a [Option]); impl<'a> Serialize for SerializableOptionalTimestampColumn<'a> { fn serialize(&self, serializer: S) -> Result { let mut seq = serializer.serialize_seq(Some(self.0.len()))?; for timestamp in self.0 { match timestamp { Some(timestamp) => seq.serialize_element(×tamp)?, None => seq.serialize_element(&0.0)?, } } seq.end() } } fxprof-processed-profile-0.7.0/src/stack_table.rs000064400000000000000000000121331046102023000201700ustar 00000000000000use serde::ser::{Serialize, SerializeMap, Serializer}; use crate::category::{ Category, CategoryHandle, CategoryPairHandle, SerializableSubcategoryColumn, Subcategory, }; use crate::fast_hash_map::FastHashMap; /// The stack table stores the tree of stack nodes of a thread. The shape of the tree is encoded in /// the prefix column: Root stack nodes have null as their prefix, and every non-root stack has the /// stack index of its "caller" / "parent" as its prefix. Every stack node also has a frame and a /// category. A "call stack" is a list of frames. Every stack index in the stack table represents /// such a call stack; the "list of frames" is obtained by walking the path in the tree from the /// root to the given stack node. /// /// Stacks are used in the thread's samples; each sample refers to a stack index. Stacks can be /// shared between samples. /// /// With this representation, every sample only needs to store a single integer to identify the /// sample's stack. We take advantage of the fact that many call stacks in the profile have a /// shared prefix; storing these stacks as a tree saves a lot of space compared to storing them as /// actual lists of frames. /// /// The category of a stack node is always non-null and is derived from a stack's frame and its /// prefix. Frames can have null categories, stacks cannot. If a stack's frame has a null category, /// the stack inherits the category of its prefix stack. Root stacks whose frame has a null stack /// have their category set to the "default category". (The default category is currently defined /// as the category in the profile's category list whose color is "grey", and such a category is /// required to be present.) /// /// You could argue that the stack table's category column is derived data and as such doesn't need /// to be stored in the profile itself. This is true, but storing this information in the stack /// table makes it a lot easier to carry it through various transforms that we apply to threads. /// For example, here's a case where a stack's category is not recoverable from any other /// information in the transformed thread: /// /// In the call path /// someJSFunction [JS] -> Node.insertBefore [DOM] -> nsAttrAndChildArray::InsertChildAt, /// /// the stack node for nsAttrAndChildArray::InsertChildAt should inherit the category DOM from its /// "Node.insertBefore" prefix stack. And it should keep the DOM category even if you apply the /// "Merge node into calling function" transform to Node.insertBefore. This transform removes the /// stack node "Node.insertBefore" from the stackTable, so the information about the DOM category /// would be lost if it wasn't inherited into the nsAttrAndChildArray::InsertChildAt stack before /// transforms are applied. #[derive(Debug, Clone, Default)] pub struct StackTable { stack_prefixes: Vec>, stack_frames: Vec, /// Imported profiles may not have categories. In this case fill the array with 0s. stack_categories: Vec, stack_subcategories: Vec, // (parent stack, frame_index) -> stack index index: FastHashMap<(Option, usize), usize>, } impl StackTable { pub fn new() -> Self { Default::default() } pub fn index_for_stack( &mut self, prefix: Option, frame: usize, category_pair: CategoryPairHandle, ) -> usize { match self.index.get(&(prefix, frame)) { Some(stack) => *stack, None => { let CategoryPairHandle(category, subcategory_index) = category_pair; let subcategory = match subcategory_index { Some(index) => Subcategory::Normal(index), None => Subcategory::Other(category), }; let stack = self.stack_prefixes.len(); self.stack_prefixes.push(prefix); self.stack_frames.push(frame); self.stack_categories.push(category); self.stack_subcategories.push(subcategory); self.index.insert((prefix, frame), stack); stack } } } pub fn serialize_with_categories<'a>( &'a self, categories: &'a [Category], ) -> impl Serialize + 'a { SerializableStackTable { table: self, categories, } } } struct SerializableStackTable<'a> { table: &'a StackTable, categories: &'a [Category], } impl<'a> Serialize for SerializableStackTable<'a> { fn serialize(&self, serializer: S) -> Result { let len = self.table.stack_prefixes.len(); let mut map = serializer.serialize_map(Some(3))?; map.serialize_entry("length", &len)?; map.serialize_entry("prefix", &self.table.stack_prefixes)?; map.serialize_entry("frame", &self.table.stack_frames)?; map.serialize_entry("category", &self.table.stack_categories)?; map.serialize_entry( "subcategory", &SerializableSubcategoryColumn(&self.table.stack_subcategories, self.categories), )?; map.end() } } fxprof-processed-profile-0.7.0/src/string_table.rs000064400000000000000000000033671046102023000204020ustar 00000000000000use std::ops::Deref; use serde::{Serialize, Serializer}; use crate::fast_hash_map::FastHashMap; #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct StringIndex(u32); #[derive(Debug, Clone, Default)] pub struct StringTable { strings: Vec, index: FastHashMap, } impl StringTable { pub fn index_for_string(&mut self, s: &str) -> StringIndex { match self.index.get(s) { Some(string_index) => *string_index, None => { let string_index = StringIndex(self.strings.len() as u32); self.strings.push(s.to_string()); self.index.insert(s.to_string(), string_index); string_index } } } pub fn get_string(&self, index: StringIndex) -> Option<&str> { self.strings.get(index.0 as usize).map(Deref::deref) } } impl Serialize for StringTable { fn serialize(&self, serializer: S) -> Result { self.strings.serialize(serializer) } } #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct GlobalStringIndex(StringIndex); #[derive(Debug, Clone, Default)] pub struct GlobalStringTable { table: StringTable, } impl GlobalStringTable { pub fn new() -> Self { Default::default() } pub fn index_for_string(&mut self, s: &str) -> GlobalStringIndex { GlobalStringIndex(self.table.index_for_string(s)) } pub fn get_string(&self, index: GlobalStringIndex) -> Option<&str> { self.table.get_string(index.0) } } impl Serialize for StringIndex { fn serialize(&self, serializer: S) -> Result { serializer.serialize_u32(self.0) } } fxprof-processed-profile-0.7.0/src/thread.rs000064400000000000000000000203131046102023000171620ustar 00000000000000use std::borrow::Cow; use std::cmp::Ordering; use serde::ser::{SerializeMap, Serializer}; use serde_json::json; use crate::category::{Category, CategoryPairHandle}; use crate::cpu_delta::CpuDelta; use crate::frame_table::{FrameTable, InternalFrame}; use crate::func_table::FuncTable; use crate::global_lib_table::GlobalLibTable; use crate::marker_table::MarkerTable; use crate::native_symbols::NativeSymbols; use crate::resource_table::ResourceTable; use crate::sample_table::{NativeAllocationsTable, SampleTable}; use crate::stack_table::StackTable; use crate::string_table::{GlobalStringIndex, GlobalStringTable}; use crate::thread_string_table::{ThreadInternalStringIndex, ThreadStringTable}; use crate::{CategoryHandle, MarkerTiming, ProfilerMarker, Timestamp}; /// A process. Can be created with [`Profile::add_process`](crate::Profile::add_process). #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct ProcessHandle(pub(crate) usize); #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct CounterHandle(pub(crate) usize); #[derive(Debug)] pub struct Thread { process: ProcessHandle, tid: String, name: Option, start_time: Timestamp, end_time: Option, is_main: bool, stack_table: StackTable, frame_table: FrameTable, func_table: FuncTable, samples: SampleTable, native_allocations: Option, markers: MarkerTable, resources: ResourceTable, native_symbols: NativeSymbols, string_table: ThreadStringTable, last_sample_stack: Option, last_sample_was_zero_cpu: bool, } impl Thread { pub fn new(process: ProcessHandle, tid: String, start_time: Timestamp, is_main: bool) -> Self { Self { process, tid, name: None, start_time, end_time: None, is_main, stack_table: StackTable::new(), frame_table: FrameTable::new(), func_table: FuncTable::new(), samples: SampleTable::new(), native_allocations: None, markers: MarkerTable::new(), resources: ResourceTable::new(), native_symbols: NativeSymbols::new(), string_table: ThreadStringTable::new(), last_sample_stack: None, last_sample_was_zero_cpu: false, } } pub fn set_name(&mut self, name: &str) { self.name = Some(name.to_string()); } pub fn set_start_time(&mut self, start_time: Timestamp) { self.start_time = start_time; } pub fn set_end_time(&mut self, end_time: Timestamp) { self.end_time = Some(end_time); } pub fn process(&self) -> ProcessHandle { self.process } pub fn convert_string_index( &mut self, global_table: &GlobalStringTable, index: GlobalStringIndex, ) -> ThreadInternalStringIndex { self.string_table .index_for_global_string(index, global_table) } pub fn frame_index_for_frame( &mut self, frame: InternalFrame, global_libs: &GlobalLibTable, ) -> usize { self.frame_table.index_for_frame( &mut self.string_table, &mut self.resources, &mut self.func_table, &mut self.native_symbols, global_libs, frame, ) } pub fn stack_index_for_stack( &mut self, prefix: Option, frame: usize, category_pair: CategoryPairHandle, ) -> usize { self.stack_table .index_for_stack(prefix, frame, category_pair) } pub fn add_sample( &mut self, timestamp: Timestamp, stack_index: Option, cpu_delta: CpuDelta, weight: i32, ) { self.samples .add_sample(timestamp, stack_index, cpu_delta, weight); self.last_sample_stack = stack_index; self.last_sample_was_zero_cpu = cpu_delta == CpuDelta::ZERO; } pub fn add_allocation_sample( &mut self, timestamp: Timestamp, stack_index: Option, allocation_address: u64, allocation_size: i64, ) { // Create allocations table, if it doesn't exist yet. let allocations = self.native_allocations.get_or_insert_with(Default::default); // Add the allocation sample. allocations.add_sample(timestamp, stack_index, allocation_address, allocation_size); } pub fn add_sample_same_stack_zero_cpu(&mut self, timestamp: Timestamp, weight: i32) { if self.last_sample_was_zero_cpu { self.samples.modify_last_sample(timestamp, weight); } else { let stack_index = self.last_sample_stack; self.samples .add_sample(timestamp, stack_index, CpuDelta::ZERO, weight); self.last_sample_was_zero_cpu = true; } } pub fn add_marker( &mut self, category: CategoryHandle, name: &str, marker: T, timing: MarkerTiming, stack_index: Option, ) { let name_string_index = self.string_table.index_for_string(name); let mut data = marker.json_marker_data(); if let Some(stack_index) = stack_index { if let Some(obj) = data.as_object_mut() { obj.insert("cause".to_string(), json!({ "stack": stack_index })); } } self.markers .add_marker(category, name_string_index, timing, data); } pub fn contains_js_function(&self) -> bool { self.func_table.contains_js_function() } pub fn cmp_for_json_order(&self, other: &Thread) -> Ordering { let ordering = (!self.is_main).cmp(&(!other.is_main)); if ordering != Ordering::Equal { return ordering; } if let Some(ordering) = self.start_time.partial_cmp(&other.start_time) { if ordering != Ordering::Equal { return ordering; } } let ordering = self.name.cmp(&other.name); if ordering != Ordering::Equal { return ordering; } self.tid.cmp(&other.tid) } pub fn serialize_with( &self, serializer: S, categories: &[Category], process_start_time: Timestamp, process_end_time: Option, process_name: &str, pid: &str, ) -> Result { let thread_name: Cow = match (self.is_main, &self.name) { (true, _) => process_name.into(), (false, Some(name)) => name.into(), (false, None) => format!("Thread <{}>", self.tid).into(), }; let thread_register_time = self.start_time; let thread_unregister_time = self.end_time; let mut map = serializer.serialize_map(None)?; map.serialize_entry("frameTable", &self.frame_table.as_serializable(categories))?; map.serialize_entry("funcTable", &self.func_table)?; map.serialize_entry("markers", &self.markers)?; map.serialize_entry("name", &thread_name)?; map.serialize_entry("isMainThread", &self.is_main)?; map.serialize_entry("nativeSymbols", &self.native_symbols)?; map.serialize_entry("pausedRanges", &[] as &[()])?; map.serialize_entry("pid", &pid)?; map.serialize_entry("processName", process_name)?; map.serialize_entry("processShutdownTime", &process_end_time)?; map.serialize_entry("processStartupTime", &process_start_time)?; map.serialize_entry("processType", &"default")?; map.serialize_entry("registerTime", &thread_register_time)?; map.serialize_entry("resourceTable", &self.resources)?; map.serialize_entry("samples", &self.samples)?; if let Some(allocations) = &self.native_allocations { map.serialize_entry("nativeAllocations", &allocations)?; } map.serialize_entry( "stackTable", &self.stack_table.serialize_with_categories(categories), )?; map.serialize_entry("stringArray", &self.string_table)?; map.serialize_entry("tid", &self.tid)?; map.serialize_entry("unregisterTime", &thread_unregister_time)?; map.end() } } fxprof-processed-profile-0.7.0/src/thread_string_table.rs000064400000000000000000000030411046102023000217160ustar 00000000000000use serde::ser::{Serialize, Serializer}; use crate::string_table::{GlobalStringIndex, GlobalStringTable, StringIndex}; use crate::{fast_hash_map::FastHashMap, string_table::StringTable}; #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct ThreadInternalStringIndex(pub StringIndex); impl Serialize for ThreadInternalStringIndex { fn serialize(&self, serializer: S) -> Result { self.0.serialize(serializer) } } #[derive(Debug, Clone, Default)] pub struct ThreadStringTable { table: StringTable, global_to_local_string: FastHashMap, } impl ThreadStringTable { pub fn new() -> Self { Default::default() } pub fn index_for_string(&mut self, s: &str) -> ThreadInternalStringIndex { ThreadInternalStringIndex(self.table.index_for_string(s)) } pub fn index_for_global_string( &mut self, global_index: GlobalStringIndex, global_table: &GlobalStringTable, ) -> ThreadInternalStringIndex { let table = &mut self.table; *self .global_to_local_string .entry(global_index) .or_insert_with(|| { let s = global_table.get_string(global_index).unwrap(); ThreadInternalStringIndex(table.index_for_string(s)) }) } } impl Serialize for ThreadStringTable { fn serialize(&self, serializer: S) -> Result { self.table.serialize(serializer) } } fxprof-processed-profile-0.7.0/src/timestamp.rs000064400000000000000000000016071046102023000177230ustar 00000000000000use serde::ser::{Serialize, Serializer}; /// The type used for sample and marker timestamps. /// /// Timestamps in the profile are stored in reference to the profile's [`ReferenceTimestamp`](crate::ReferenceTimestamp). #[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] pub struct Timestamp { nanos: u64, } impl Timestamp { pub fn from_nanos_since_reference(nanos: u64) -> Self { Self { nanos } } pub fn from_millis_since_reference(millis: f64) -> Self { Self { nanos: (millis * 1_000_000.0) as u64, } } } impl Serialize for Timestamp { fn serialize(&self, serializer: S) -> Result { // In the profile JSON, timestamps are currently expressed as float milliseconds // since profile.meta.startTime. serializer.serialize_f64((self.nanos as f64) / 1_000_000.0) } } fxprof-processed-profile-0.7.0/tests/integration_tests/main.rs000064400000000000000000001316361046102023000227720ustar 00000000000000use assert_json_diff::assert_json_eq; use debugid::DebugId; use serde_json::json; use fxprof_processed_profile::{ CategoryColor, CategoryHandle, CpuDelta, Frame, FrameFlags, FrameInfo, LibraryInfo, MarkerDynamicField, MarkerFieldFormat, MarkerLocation, MarkerSchema, MarkerSchemaField, MarkerStaticField, MarkerTiming, Profile, ProfilerMarker, ReferenceTimestamp, SamplingInterval, Symbol, SymbolTable, Timestamp, }; use std::sync::Arc; use std::time::Duration; // TODO: Add tests for CategoryPairHandle, ProcessHandle, ThreadHandle /// An example marker type with some text content. #[derive(Debug, Clone)] pub struct TextMarker(pub String); impl ProfilerMarker for TextMarker { const MARKER_TYPE_NAME: &'static str = "Text"; fn json_marker_data(&self) -> serde_json::Value { json!({ "type": Self::MARKER_TYPE_NAME, "name": self.0 }) } fn schema() -> MarkerSchema { MarkerSchema { type_name: Self::MARKER_TYPE_NAME, locations: vec![MarkerLocation::MarkerChart, MarkerLocation::MarkerTable], chart_label: Some("{marker.data.name}"), tooltip_label: None, table_label: Some("{marker.name} - {marker.data.name}"), fields: vec![MarkerSchemaField::Dynamic(MarkerDynamicField { key: "name", label: "Details", format: MarkerFieldFormat::String, searchable: true, })], } } } #[test] fn profile_without_js() { struct CustomMarker { event_name: String, allocation_size: u32, url: String, latency: Duration, } impl ProfilerMarker for CustomMarker { const MARKER_TYPE_NAME: &'static str = "custom"; fn schema() -> MarkerSchema { MarkerSchema { type_name: Self::MARKER_TYPE_NAME, locations: vec![MarkerLocation::MarkerChart, MarkerLocation::MarkerTable], chart_label: None, tooltip_label: Some("Custom tooltip label"), table_label: None, fields: vec![ MarkerSchemaField::Dynamic(MarkerDynamicField { key: "eventName", label: "Event name", format: MarkerFieldFormat::String, searchable: true, }), MarkerSchemaField::Dynamic(MarkerDynamicField { key: "allocationSize", label: "Allocation size", format: MarkerFieldFormat::Bytes, searchable: true, }), MarkerSchemaField::Dynamic(MarkerDynamicField { key: "url", label: "URL", format: MarkerFieldFormat::Url, searchable: true, }), MarkerSchemaField::Dynamic(MarkerDynamicField { key: "latency", label: "Latency", format: MarkerFieldFormat::Duration, searchable: true, }), MarkerSchemaField::Static(MarkerStaticField { label: "Description", value: "This is a test marker with a custom schema.", }), ], } } fn json_marker_data(&self) -> serde_json::Value { json!({ "type": Self::MARKER_TYPE_NAME, "eventName": self.event_name, "allocationSize": self.allocation_size, "url": self.url, "latency": self.latency.as_secs_f64() * 1000.0, }) } } let mut profile = Profile::new( "test", ReferenceTimestamp::from_millis_since_unix_epoch(1636162232627.0), SamplingInterval::from_millis(1), ); let process = profile.add_process("test", 123, Timestamp::from_millis_since_reference(0.0)); let thread = profile.add_thread( process, 12345, Timestamp::from_millis_since_reference(0.0), true, ); profile.add_sample( thread, Timestamp::from_millis_since_reference(0.0), vec![].into_iter(), CpuDelta::ZERO, 1, ); let libc_handle = profile.add_lib(LibraryInfo { name: "libc.so.6".to_string(), debug_name: "libc.so.6".to_string(), path: "/usr/lib/x86_64-linux-gnu/libc.so.6".to_string(), code_id: Some("f0fc29165cbe6088c0e1adf03b0048fbecbc003a".to_string()), debug_path: "/usr/lib/x86_64-linux-gnu/libc.so.6".to_string(), debug_id: DebugId::from_breakpad("1629FCF0BE5C8860C0E1ADF03B0048FB0").unwrap(), arch: None, symbol_table: Some(Arc::new(SymbolTable::new(vec![ Symbol { address: 1700001, size: Some(180), name: "libc_symbol_1".to_string(), }, Symbol { address: 674226, size: Some(44), name: "libc_symbol_3".to_string(), }, Symbol { address: 172156, size: Some(20), name: "libc_symbol_2".to_string(), }, ]))), }); profile.add_lib_mapping( process, libc_handle, 0x00007f76b7e85000, 0x00007f76b8019000, (0x00007f76b7e85000u64 - 0x00007f76b7e5d000u64) as u32, ); let dump_syms_lib_handle = profile.add_lib(LibraryInfo { name: "dump_syms".to_string(), debug_name: "dump_syms".to_string(), path: "/home/mstange/code/dump_syms/target/release/dump_syms".to_string(), code_id: Some("510d0a5c19eadf8043f203b4525be9be3dcb9554".to_string()), debug_path: "/home/mstange/code/dump_syms/target/release/dump_syms".to_string(), debug_id: DebugId::from_breakpad("5C0A0D51EA1980DF43F203B4525BE9BE0").unwrap(), arch: None, symbol_table: None, }); profile.add_lib_mapping( process, dump_syms_lib_handle, 0x000055ba9ebf6000, 0x000055ba9f07e000, (0x000055ba9ebf6000u64 - 0x000055ba9eb4d000u64) as u32, ); let category = profile.add_category("Regular", CategoryColor::Blue); profile.add_sample( thread, Timestamp::from_millis_since_reference(1.0), vec![ 0x7f76b7ffc0e7, 0x55ba9eda3d7f, 0x55ba9ed8bb62, 0x55ba9ec92419, 0x55ba9ec2b778, 0x55ba9ec0f705, 0x7ffdb4824838, ] .into_iter() .enumerate() .rev() .map(|(i, addr)| { if i == 0 { Frame::InstructionPointer(addr) } else { Frame::ReturnAddress(addr) } }) .map(|frame| FrameInfo { frame, category_pair: category.into(), flags: FrameFlags::empty(), }), CpuDelta::ZERO, 1, ); profile.add_sample( thread, Timestamp::from_millis_since_reference(2.0), vec![ 0x55ba9eda018e, 0x55ba9ec3c3cf, 0x55ba9ec2a2d7, 0x55ba9ec53993, 0x7f76b7e8707d, 0x55ba9ec0f705, 0x7ffdb4824838, ] .into_iter() .enumerate() .rev() .map(|(i, addr)| { if i == 0 { Frame::InstructionPointer(addr) } else { Frame::ReturnAddress(addr) } }) .map(|frame| FrameInfo { frame, category_pair: category.into(), flags: FrameFlags::empty(), }), CpuDelta::ZERO, 1, ); profile.add_sample( thread, Timestamp::from_millis_since_reference(3.0), vec![ 0x7f76b7f019c6, 0x55ba9edc48f5, 0x55ba9ec010e3, 0x55ba9eca41b9, 0x7f76b7e8707d, 0x55ba9ec0f705, 0x7ffdb4824838, ] .into_iter() .enumerate() .rev() .map(|(i, addr)| { if i == 0 { Frame::InstructionPointer(addr) } else { Frame::ReturnAddress(addr) } }) .map(|frame| FrameInfo { frame, category_pair: category.into(), flags: FrameFlags::empty(), }), CpuDelta::ZERO, 1, ); profile.add_marker( thread, CategoryHandle::OTHER, "Experimental", TextMarker("Hello world!".to_string()), MarkerTiming::Instant(Timestamp::from_millis_since_reference(0.0)), ); profile.add_marker( thread, CategoryHandle::OTHER, "CustomName", CustomMarker { event_name: "My event".to_string(), allocation_size: 512000, url: "https://mozilla.org/".to_string(), latency: Duration::from_millis(123), }, MarkerTiming::Interval( Timestamp::from_millis_since_reference(0.0), Timestamp::from_millis_since_reference(2.0), ), ); let memory_counter = profile.add_counter(process, "malloc", "Memory", "Amount of allocated memory"); profile.add_counter_sample( memory_counter, Timestamp::from_millis_since_reference(0.0), 0.0, 0, ); profile.add_counter_sample( memory_counter, Timestamp::from_millis_since_reference(1.0), 1000.0, 2, ); profile.add_counter_sample( memory_counter, Timestamp::from_millis_since_reference(2.0), 800.0, 1, ); // eprintln!("{}", serde_json::to_string_pretty(&profile).unwrap()); assert_json_eq!( profile, json!( { "meta": { "categories": [ { "name": "Other", "color": "grey", "subcategories": [ "Other" ] }, { "name": "Regular", "color": "blue", "subcategories": [ "Other" ] } ], "debug": false, "extensions": { "baseURL": [], "id": [], "length": 0, "name": [] }, "interval": 1.0, "preprocessedProfileVersion": 46, "processType": 0, "product": "test", "sampleUnits": { "eventDelay": "ms", "threadCPUDelta": "µs", "time": "ms" }, "startTime": 1636162232627.0, "symbolicated": false, "pausedRanges": [], "version": 24, "usesOnlyOneStackType": true, "doesNotUseFrameImplementation": true, "sourceCodeIsNotOnSearchfox": true, "markerSchema": [ { "name": "Text", "display": [ "marker-chart", "marker-table" ], "chartLabel": "{marker.data.name}", "tableLabel": "{marker.name} - {marker.data.name}", "data": [ { "key": "name", "label": "Details", "format": "string", "searchable": true } ] }, { "name": "custom", "display": [ "marker-chart", "marker-table" ], "tooltipLabel": "Custom tooltip label", "data": [ { "key": "eventName", "label": "Event name", "format": "string", "searchable": true }, { "key": "allocationSize", "label": "Allocation size", "format": "bytes", "searchable": true }, { "key": "url", "label": "URL", "format": "url", "searchable": true }, { "key": "latency", "label": "Latency", "format": "duration", "searchable": true }, { "label": "Description", "value": "This is a test marker with a custom schema." } ] } ] }, "libs": [ { "name": "dump_syms", "path": "/home/mstange/code/dump_syms/target/release/dump_syms", "debugName": "dump_syms", "debugPath": "/home/mstange/code/dump_syms/target/release/dump_syms", "breakpadId": "5C0A0D51EA1980DF43F203B4525BE9BE0", "codeId": "510d0a5c19eadf8043f203b4525be9be3dcb9554", "arch": null }, { "name": "libc.so.6", "path": "/usr/lib/x86_64-linux-gnu/libc.so.6", "debugName": "libc.so.6", "debugPath": "/usr/lib/x86_64-linux-gnu/libc.so.6", "breakpadId": "1629FCF0BE5C8860C0E1ADF03B0048FB0", "codeId": "f0fc29165cbe6088c0e1adf03b0048fbecbc003a", "arch": null } ], "threads": [ { "frameTable": { "length": 16, "address": [ -1, 796420, 911223, 1332248, 2354017, 2452862, 1700071, 172156, 1075602, 905942, 979918, 2437518, 1405368, 737506, 2586868, 674246 ], "inlineDepth": [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], "category": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ], "subcategory": [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], "func": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], "nativeSymbol": [ null, null, null, null, null, null, 0, 1, null, null, null, null, null, null, null, 2 ], "innerWindowID": [ null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null ], "implementation": [ null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null ], "line": [ null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null ], "column": [ null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null ], "optimizations": [ null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null ] }, "funcTable": { "length": 16, "name": [ 0, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 ], "isJS": [ false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false ], "relevantForJS": [ false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false ], "resource": [ -1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1 ], "fileName": [ null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null ], "lineNumber": [ null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null ], "columnNumber": [ null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null ] }, "markers": { "length": 2, "category": [ 0, 0 ], "data": [ { "name": "Hello world!", "type": "Text" }, { "allocationSize": 512000, "eventName": "My event", "latency": 123.0, "type": "custom", "url": "https://mozilla.org/" } ], "endTime": [ 0.0, 2.0 ], "name": [ 18, 19 ], "phase": [ 0, 1 ], "startTime": [ 0.0, 0.0 ] }, "name": "test", "isMainThread": true, "nativeSymbols": { "length": 3, "address": [ 1700001, 172156, 674226 ], "functionSize": [ 180, 20, 44 ], "libIndex": [ 1, 1, 1 ], "name": [ 8, 9, 17 ] }, "pausedRanges": [], "pid": "123", "processName": "test", "processShutdownTime": null, "processStartupTime": 0.0, "processType": "default", "registerTime": 0.0, "resourceTable": { "length": 2, "lib": [ 0, 1 ], "name": [ 1, 7 ], "host": [ null, null ], "type": [ 1, 1 ] }, "samples": { "length": 4, "stack": [ null, 6, 11, 15 ], "time": [ 0.0, 1.0, 2.0, 3.0 ], "weight": [ 1, 1, 1, 1 ], "weightType": "samples", "threadCPUDelta": [ 0, 0, 0, 0 ] }, "stackTable": { "length": 16, "prefix": [ null, 0, 1, 2, 3, 4, 5, 1, 7, 8, 9, 10, 7, 12, 13, 14 ], "frame": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], "category": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ], "subcategory": [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] }, "stringArray": [ "0x7ffdb4824837", "dump_syms", "0xc2704", "0xde777", "0x145418", "0x23eb61", "0x256d7e", "libc.so.6", "libc_symbol_1", "libc_symbol_2", "0x106992", "0xdd2d6", "0xef3ce", "0x25318e", "0x1571b8", "0xb40e2", "0x2778f4", "libc_symbol_3", "Experimental", "CustomName" ], "tid": "12345", "unregisterTime": null } ], "pages": [], "profilerOverhead": [], "counters": [ { "category": "Memory", "name": "malloc", "description": "Amount of allocated memory", "mainThreadIndex": 0, "pid": "123", "sampleGroups": [ { "id": 0, "samples": { "length": 3, "count": [ 0.0, 1000.0, 800.0 ], "number": [ 0, 2, 1 ], "time": [ 0.0, 1.0, 2.0 ] } } ] } ] } ) ) } #[test] fn profile_with_js() { let mut profile = Profile::new( "test with js", ReferenceTimestamp::from_millis_since_unix_epoch(1636162232627.0), SamplingInterval::from_millis(1), ); let process = profile.add_process("test2", 123, Timestamp::from_millis_since_reference(0.0)); let thread = profile.add_thread( process, 12346, Timestamp::from_millis_since_reference(0.0), true, ); let some_label_string = profile.intern_string("Some label string"); let category = profile.add_category("Regular", CategoryColor::Green); profile.add_sample( thread, Timestamp::from_millis_since_reference(1.0), vec![ FrameInfo { frame: Frame::Label(some_label_string), category_pair: category.into(), flags: FrameFlags::IS_JS, }, FrameInfo { frame: Frame::ReturnAddress(0x7f76b7ffc0e7), category_pair: category.into(), flags: FrameFlags::empty(), }, ] .into_iter(), CpuDelta::ZERO, 1, ); // eprintln!("{}", serde_json::to_string_pretty(&profile).unwrap()); assert_json_eq!( profile, json!( { "meta": { "categories": [ { "name": "Other", "color": "grey", "subcategories": [ "Other" ] }, { "name": "Regular", "color": "green", "subcategories": [ "Other" ] } ], "debug": false, "extensions": { "baseURL": [], "id": [], "length": 0, "name": [] }, "interval": 1.0, "preprocessedProfileVersion": 46, "processType": 0, "product": "test with js", "sampleUnits": { "eventDelay": "ms", "threadCPUDelta": "µs", "time": "ms" }, "startTime": 1636162232627.0, "symbolicated": false, "pausedRanges": [], "version": 24, "usesOnlyOneStackType": false, "doesNotUseFrameImplementation": true, "sourceCodeIsNotOnSearchfox": true, "markerSchema": [] }, "libs": [], "threads": [ { "frameTable": { "length": 2, "address": [ -1, -1 ], "inlineDepth": [ 0, 0 ], "category": [ 1, 1 ], "subcategory": [ 0, 0 ], "func": [ 0, 1 ], "nativeSymbol": [ null, null ], "innerWindowID": [ null, null ], "implementation": [ null, null ], "line": [ null, null ], "column": [ null, null ], "optimizations": [ null, null ] }, "funcTable": { "length": 2, "name": [ 0, 1 ], "isJS": [ true, false ], "relevantForJS": [ false, false ], "resource": [ -1, -1 ], "fileName": [ null, null ], "lineNumber": [ null, null ], "columnNumber": [ null, null ] }, "markers": { "length": 0, "category": [], "data": [], "endTime": [], "name": [], "phase": [], "startTime": [] }, "name": "test2", "isMainThread": true, "nativeSymbols": { "length": 0, "address": [], "functionSize": [], "libIndex": [], "name": [] }, "pausedRanges": [], "pid": "123", "processName": "test2", "processShutdownTime": null, "processStartupTime": 0.0, "processType": "default", "registerTime": 0.0, "resourceTable": { "length": 0, "lib": [], "name": [], "host": [], "type": [] }, "samples": { "length": 1, "stack": [ 1 ], "time": [ 1.0 ], "weight": [ 1 ], "weightType": "samples", "threadCPUDelta": [ 0 ] }, "stackTable": { "length": 2, "prefix": [ null, 0 ], "frame": [ 0, 1 ], "category": [ 1, 1 ], "subcategory": [ 0, 0 ] }, "stringArray": [ "Some label string", "0x7f76b7ffc0e6" ], "tid": "12346", "unregisterTime": null } ], "pages": [], "profilerOverhead": [], "counters": [] } ) ) } #[test] fn profile_counters_with_sorted_processes() { let mut profile = Profile::new( "test", ReferenceTimestamp::from_millis_since_unix_epoch(1636162232627.0), SamplingInterval::from_millis(1), ); // Setting the timestamps first `1` and then `0` intentionally to make sure that the processes // are sorted and order has been reversed. let process0 = profile.add_process("test 1", 123, Timestamp::from_millis_since_reference(1.0)); let process1 = profile.add_process("test 2", 123, Timestamp::from_millis_since_reference(0.0)); let thread0 = profile.add_thread( process0, 12345, Timestamp::from_millis_since_reference(0.0), true, ); let thread1 = profile.add_thread( process1, 54321, Timestamp::from_millis_since_reference(1.0), true, ); profile.add_sample( thread0, Timestamp::from_millis_since_reference(1.0), vec![].into_iter(), CpuDelta::ZERO, 1, ); profile.add_sample( thread1, Timestamp::from_millis_since_reference(0.0), vec![].into_iter(), CpuDelta::ZERO, 1, ); let memory_counter0 = profile.add_counter(process0, "malloc", "Memory 1", "Amount of allocated memory"); profile.add_counter_sample( memory_counter0, Timestamp::from_millis_since_reference(1.0), 0.0, 0, ); let memory_counter1 = profile.add_counter(process0, "malloc", "Memory 2", "Amount of allocated memory"); profile.add_counter_sample( memory_counter1, Timestamp::from_millis_since_reference(0.0), 0.0, 0, ); // eprintln!("{}", serde_json::to_string_pretty(&profile).unwrap()); assert_json_eq!( profile, json!( { "meta": { "categories": [ { "name": "Other", "color": "grey", "subcategories": [ "Other" ] } ], "debug": false, "extensions": { "baseURL": [], "id": [], "length": 0, "name": [] }, "interval": 1.0, "preprocessedProfileVersion": 46, "processType": 0, "product": "test", "sampleUnits": { "eventDelay": "ms", "threadCPUDelta": "µs", "time": "ms" }, "startTime": 1636162232627.0, "symbolicated": false, "pausedRanges": [], "version": 24, "usesOnlyOneStackType": true, "doesNotUseFrameImplementation": true, "sourceCodeIsNotOnSearchfox": true, "markerSchema": [] }, "libs": [], "threads": [ { "frameTable": { "length": 0, "address": [], "inlineDepth": [], "category": [], "subcategory": [], "func": [], "nativeSymbol": [], "innerWindowID": [], "implementation": [], "line": [], "column": [], "optimizations": [] }, "funcTable": { "length": 0, "name": [], "isJS": [], "relevantForJS": [], "resource": [], "fileName": [], "lineNumber": [], "columnNumber": [] }, "markers": { "length": 0, "category": [], "data": [], "endTime": [], "name": [], "phase": [], "startTime": [] }, "name": "test 2", "isMainThread": true, "nativeSymbols": { "length": 0, "address": [], "functionSize": [], "libIndex": [], "name": [] }, "pausedRanges": [], "pid": "123.1", "processName": "test 2", "processShutdownTime": null, "processStartupTime": 0.0, "processType": "default", "registerTime": 1.0, "resourceTable": { "length": 0, "lib": [], "name": [], "host": [], "type": [] }, "samples": { "length": 1, "stack": [ null ], "time": [ 0.0 ], "weight": [ 1 ], "weightType": "samples", "threadCPUDelta": [ 0 ] }, "stackTable": { "length": 0, "prefix": [], "frame": [], "category": [], "subcategory": [] }, "stringArray": [], "tid": "54321", "unregisterTime": null }, { "frameTable": { "length": 0, "address": [], "inlineDepth": [], "category": [], "subcategory": [], "func": [], "nativeSymbol": [], "innerWindowID": [], "implementation": [], "line": [], "column": [], "optimizations": [] }, "funcTable": { "length": 0, "name": [], "isJS": [], "relevantForJS": [], "resource": [], "fileName": [], "lineNumber": [], "columnNumber": [] }, "markers": { "length": 0, "category": [], "data": [], "endTime": [], "name": [], "phase": [], "startTime": [] }, "name": "test 1", "isMainThread": true, "nativeSymbols": { "length": 0, "address": [], "functionSize": [], "libIndex": [], "name": [] }, "pausedRanges": [], "pid": "123", "processName": "test 1", "processShutdownTime": null, "processStartupTime": 1.0, "processType": "default", "registerTime": 0.0, "resourceTable": { "length": 0, "lib": [], "name": [], "host": [], "type": [] }, "samples": { "length": 1, "stack": [ null ], "time": [ 1.0 ], "weight": [ 1 ], "weightType": "samples", "threadCPUDelta": [ 0 ] }, "stackTable": { "length": 0, "prefix": [], "frame": [], "category": [], "subcategory": [] }, "stringArray": [], "tid": "12345", "unregisterTime": null } ], "pages": [], "profilerOverhead": [], "counters": [ { "category": "Memory 1", "name": "malloc", "description": "Amount of allocated memory", "mainThreadIndex": 1, "pid": "123", "sampleGroups": [ { "id": 0, "samples": { "length": 1, "count": [ 0.0 ], "number": [ 0 ], "time": [ 1.0 ] } } ] }, { "category": "Memory 2", "name": "malloc", "description": "Amount of allocated memory", "mainThreadIndex": 1, "pid": "123", "sampleGroups": [ { "id": 0, "samples": { "length": 1, "count": [ 0.0 ], "number": [ 0 ], "time": [ 0.0 ] } } ] } ] } ) ) }