This is an automated email from the ASF dual-hosted git repository.

piotr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iggy.git


The following commit(s) were added to refs/heads/master by this push:
     new 71eb4d5f0 feat(bench): add latency distribution chart with log-normal 
fit (#2832)
71eb4d5f0 is described below

commit 71eb4d5f088c92f67af15e3faef5997851677307
Author: Hubert Gruszecki <[email protected]>
AuthorDate: Fri Feb 27 20:25:25 2026 +0100

    feat(bench): add latency distribution chart with log-normal fit (#2832)
    
    Benchmark reports lacked visibility into latency shape
    beyond summary percentiles. Operators couldn't distinguish
    bimodal distributions from skewed unimodal ones.
    
    Compute per-group latency histograms (50 bins) with
    log-normal PDF fit and P5/P50/P95/P99 scatter markers.
    Wire them through report types, chart generation, and
    the dashboard frontend as a new "Distribution" tab.
---
 .../frontend/src/components/chart/single_chart.rs  |   7 +
 .../frontend/src/components/chart/trend_chart.rs   |   4 +-
 .../frontend/src/components/layout/topbar.rs       |  35 ++++
 .../selectors/measurement_type_selector.rs         |  15 +-
 core/bench/dashboard/shared/src/lib.rs             |  13 +-
 core/bench/report/src/lib.rs                       | 169 +++++++++++++++++
 core/bench/report/src/plotting/chart.rs            |  66 ++++++-
 core/bench/report/src/plotting/chart_kind.rs       |   2 +
 core/bench/report/src/types/group_metrics.rs       |  12 +-
 core/bench/report/src/types/individual_metrics.rs  |   5 +
 .../bench/report/src/types/latency_distribution.rs |  55 ++++++
 core/bench/report/src/types/mod.rs                 |   1 +
 core/bench/src/analytics/metrics/group.rs          |  14 ++
 core/bench/src/analytics/metrics/individual.rs     |  42 +++--
 .../src/analytics/metrics/latency_distribution.rs  | 202 +++++++++++++++++++++
 core/bench/src/analytics/metrics/mod.rs            |   1 +
 core/bench/src/plot.rs                             |   9 +
 core/bench/src/runner.rs                           |  10 +
 18 files changed, 633 insertions(+), 29 deletions(-)

diff --git a/core/bench/dashboard/frontend/src/components/chart/single_chart.rs 
b/core/bench/dashboard/frontend/src/components/chart/single_chart.rs
index 54ceb1767..974e19f7e 100644
--- a/core/bench/dashboard/frontend/src/components/chart/single_chart.rs
+++ b/core/bench/dashboard/frontend/src/components/chart/single_chart.rs
@@ -111,6 +111,13 @@ pub fn single_chart(props: &SingleChartProps) -> Html {
                         MeasurementType::Throughput => {
                             bench_report::create_throughput_chart(data, 
config.is_dark, true)
                         }
+                        MeasurementType::Distribution => {
+                            bench_report::create_latency_distribution_chart(
+                                data,
+                                config.is_dark,
+                                true,
+                            )
+                        }
                     };
 
                     let renderer = if config.is_dark {
diff --git a/core/bench/dashboard/frontend/src/components/chart/trend_chart.rs 
b/core/bench/dashboard/frontend/src/components/chart/trend_chart.rs
index 2d12da1e6..26b877c1d 100644
--- a/core/bench/dashboard/frontend/src/components/chart/trend_chart.rs
+++ b/core/bench/dashboard/frontend/src/components/chart/trend_chart.rs
@@ -80,7 +80,9 @@ pub fn trend_chart(props: &TrendChartProps) -> Html {
             move |(data, measurement_type, is_dark, size)| {
                 if !data.is_empty() {
                     let plot_type = match measurement_type {
-                        MeasurementType::Latency => PlotType::Latency,
+                        MeasurementType::Latency | 
MeasurementType::Distribution => {
+                            PlotType::Latency
+                        }
                         MeasurementType::Throughput => PlotType::Throughput,
                     };
 
diff --git a/core/bench/dashboard/frontend/src/components/layout/topbar.rs 
b/core/bench/dashboard/frontend/src/components/layout/topbar.rs
index 2a4ff5f01..c6ec04efa 100644
--- a/core/bench/dashboard/frontend/src/components/layout/topbar.rs
+++ b/core/bench/dashboard/frontend/src/components/layout/topbar.rs
@@ -40,6 +40,24 @@ pub fn topbar(props: &TopBarProps) -> Html {
     let selected_measurement = ui_state.selected_measurement.clone();
     let is_benchmark_tooltip_visible = ui_state.is_benchmark_tooltip_visible;
     let is_server_stats_tooltip_visible = 
ui_state.is_server_stats_tooltip_visible;
+    let has_distribution = benchmark_ctx
+        .state
+        .selected_benchmark
+        .as_ref()
+        .is_some_and(|b| b.has_latency_distribution());
+
+    {
+        let ui_state = ui_state.clone();
+        let selected_measurement = selected_measurement.clone();
+        use_effect_with(
+            (has_distribution, selected_measurement),
+            move |(has_dist, measurement)| {
+                if !has_dist && *measurement == MeasurementType::Distribution {
+                    
ui_state.dispatch(UiAction::SetMeasurementType(MeasurementType::Latency));
+                }
+            },
+        );
+    }
 
     let on_download_artifacts = {
         let benchmark_ctx = benchmark_ctx.clone();
@@ -142,6 +160,23 @@ pub fn topbar(props: &TopBarProps) -> Html {
                                     >
                                         { "Latency" }
                                     </button>
+                                    {
+                                        if has_distribution {
+                                            html! {
+                                                <button
+                                                    class={classes!(
+                                                        "measurement-button",
+                                                        (selected_measurement 
== MeasurementType::Distribution).then_some("active")
+                                                    )}
+                                                    
onclick={on_measurement_select.reform(|_| MeasurementType::Distribution)}
+                                                >
+                                                    { "Distribution" }
+                                                </button>
+                                            }
+                                        } else {
+                                            html! {}
+                                        }
+                                    }
                                     <button
                                         class={classes!(
                                             "measurement-button",
diff --git 
a/core/bench/dashboard/frontend/src/components/selectors/measurement_type_selector.rs
 
b/core/bench/dashboard/frontend/src/components/selectors/measurement_type_selector.rs
index c279647ef..d8023ed93 100644
--- 
a/core/bench/dashboard/frontend/src/components/selectors/measurement_type_selector.rs
+++ 
b/core/bench/dashboard/frontend/src/components/selectors/measurement_type_selector.rs
@@ -23,6 +23,7 @@ use yew::prelude::*;
 pub enum MeasurementType {
     Latency,
     Throughput,
+    Distribution,
 }
 
 impl Display for MeasurementType {
@@ -30,6 +31,7 @@ impl Display for MeasurementType {
         match self {
             MeasurementType::Latency => write!(f, "Latency"),
             MeasurementType::Throughput => write!(f, "Throughput"),
+            MeasurementType::Distribution => write!(f, "Distribution"),
         }
     }
 }
@@ -41,6 +43,7 @@ impl FromStr for MeasurementType {
         match s {
             "Latency" => Ok(MeasurementType::Latency),
             "Throughput" => Ok(MeasurementType::Throughput),
+            "Distribution" => Ok(MeasurementType::Distribution),
             _ => Err(()),
         }
     }
@@ -55,24 +58,30 @@ pub struct MeasurementTypeSelectorProps {
 
 #[function_component(MeasurementTypeSelector)]
 pub fn measurement_type_selector(props: &MeasurementTypeSelectorProps) -> Html 
{
-    let is_latency = matches!(props.selected_measurement, 
MeasurementType::Latency);
+    let selected = &props.selected_measurement;
 
     html! {
         <div class="view-mode-container">
             <h3>{"Measurements"}</h3>
             <div class="segmented-control">
                 <button
-                    class={if is_latency { "segment active" } else { "segment" 
}}
+                    class={if *selected == MeasurementType::Latency { "segment 
active" } else { "segment" }}
                     onclick={props.on_measurement_select.reform(|_| 
MeasurementType::Latency)}
                 >
                     {"Latency"}
                 </button>
                 <button
-                    class={if !is_latency { "segment active" } else { 
"segment" }}
+                    class={if *selected == MeasurementType::Throughput { 
"segment active" } else { "segment" }}
                     onclick={props.on_measurement_select.reform(|_| 
MeasurementType::Throughput)}
                 >
                     {"Throughput"}
                 </button>
+                <button
+                    class={if *selected == MeasurementType::Distribution { 
"segment active" } else { "segment" }}
+                    onclick={props.on_measurement_select.reform(|_| 
MeasurementType::Distribution)}
+                >
+                    {"Distribution"}
+                </button>
             </div>
         </div>
     }
diff --git a/core/bench/dashboard/shared/src/lib.rs 
b/core/bench/dashboard/shared/src/lib.rs
index 40b8afc42..7dd11cf4e 100644
--- a/core/bench/dashboard/shared/src/lib.rs
+++ b/core/bench/dashboard/shared/src/lib.rs
@@ -20,7 +20,8 @@ pub mod title;
 
 use bench_report::{
     group_metrics_summary::BenchmarkGroupMetricsSummary, 
hardware::BenchmarkHardware,
-    individual_metrics_summary::BenchmarkIndividualMetricsSummary, 
params::BenchmarkParams,
+    individual_metrics_summary::BenchmarkIndividualMetricsSummary,
+    latency_distribution::LatencyDistribution, params::BenchmarkParams,
     server_stats::BenchmarkServerStats,
 };
 use serde::{Deserialize, Serialize};
@@ -42,6 +43,16 @@ pub struct BenchmarkReportLight {
 #[derive(Debug, Serialize, Clone, PartialEq, Deserialize)]
 pub struct BenchmarkGroupMetricsLight {
     pub summary: BenchmarkGroupMetricsSummary,
+    #[serde(default, skip_serializing_if = "Option::is_none")]
+    pub latency_distribution: Option<LatencyDistribution>,
+}
+
+impl BenchmarkReportLight {
+    pub fn has_latency_distribution(&self) -> bool {
+        self.group_metrics
+            .iter()
+            .any(|gm| gm.latency_distribution.is_some())
+    }
 }
 
 /// Same as BenchmarkIndividualMetrics, but without the time series
diff --git a/core/bench/report/src/lib.rs b/core/bench/report/src/lib.rs
index ae979607e..5c201dfb6 100644
--- a/core/bench/report/src/lib.rs
+++ b/core/bench/report/src/lib.rs
@@ -25,7 +25,9 @@ mod prints;
 use crate::report::BenchmarkReport;
 use actor_kind::ActorKind;
 use charming::Chart;
+use charming::datatype::DataPoint;
 use group_metrics_kind::GroupMetricsKind;
+use latency_distribution::LatencyDistribution;
 use plotting::chart::IggyChart;
 use plotting::chart_kind::ChartKind;
 
@@ -145,3 +147,170 @@ pub fn create_latency_chart(
 
     chart.inner
 }
+
+pub fn create_latency_distribution_chart(
+    report: &BenchmarkReport,
+    dark: bool,
+    strip_title_and_subtext: bool,
+) -> Chart {
+    let title = report.title(ChartKind::LatencyDistribution);
+
+    let mut chart = IggyChart::new(&title, &report.subtext(), dark, 
strip_title_and_subtext)
+        .with_y_axis("Density");
+
+    let group_colors = ["#5470C6", "#91CC75", "#EE6666", "#FAC858"];
+    let pdf_colors = ["#FF4500", "#228B22", "#8B008B", "#FF8C00"];
+    let percentile_colors = ["#E63946", "#457B9D", "#2A9D8F", "#F4A261"];
+
+    let mut has_data = false;
+
+    for (idx, metrics) in report
+        .group_metrics
+        .iter()
+        .filter(|m| m.summary.kind != GroupMetricsKind::ProducersAndConsumers)
+        .enumerate()
+    {
+        let dist = match &metrics.latency_distribution {
+            Some(d) => d,
+            None => continue,
+        };
+
+        has_data = true;
+        let group_label = format!("{}s", metrics.summary.kind.actor());
+        let color_idx = idx % group_colors.len();
+
+        chart = add_distribution_series(
+            chart,
+            dist,
+            &group_label,
+            group_colors[color_idx],
+            pdf_colors[color_idx],
+            percentile_colors[color_idx],
+        );
+    }
+
+    if !has_data {
+        chart = chart.with_category_x_axis("Latency [ms]", vec!["No 
data".to_owned()]);
+    }
+
+    chart.inner
+}
+
+fn add_distribution_series(
+    mut chart: IggyChart,
+    dist: &LatencyDistribution,
+    group_label: &str,
+    hist_color: &str,
+    pdf_color: &str,
+    pct_color: &str,
+) -> IggyChart {
+    if dist.bins.is_empty() {
+        return chart;
+    }
+
+    let bin_width = if dist.bins.len() > 1 {
+        dist.bins[1].edge_ms - dist.bins[0].edge_ms
+    } else {
+        1.0
+    };
+
+    // Category labels at bin centers
+    let categories: Vec<String> = dist
+        .bins
+        .iter()
+        .map(|b| format_latency(b.edge_ms + bin_width / 2.0))
+        .collect();
+
+    chart = chart.with_category_x_axis("Latency [ms]", categories);
+
+    // Histogram bars
+    let bar_data: Vec<f64> = dist.bins.iter().map(|b| b.density).collect();
+    chart = chart.add_bar_series(
+        &format!("{group_label} Histogram"),
+        bar_data,
+        hist_color,
+        0.6,
+    );
+
+    // Log-normal PDF curve at bin centers
+    let mu = dist.log_normal_params.mu;
+    let sigma = dist.log_normal_params.sigma;
+    let pdf_data: Vec<Vec<f64>> = dist
+        .bins
+        .iter()
+        .enumerate()
+        .map(|(i, b)| {
+            let x = b.edge_ms + bin_width / 2.0;
+            vec![i as f64, log_normal_pdf(x, mu, sigma)]
+        })
+        .collect();
+
+    chart = chart.add_smooth_line_series(
+        &format!("{group_label} Log-Normal PDF"),
+        pdf_data,
+        pdf_color,
+        2.5,
+    );
+
+    // Percentile scatter markers placed at closest bin
+    let pcts = [
+        ("P5", dist.percentiles.p05_ms),
+        ("P50", dist.percentiles.p50_ms),
+        ("P95", dist.percentiles.p95_ms),
+        ("P99", dist.percentiles.p99_ms),
+    ];
+
+    let scatter_data: Vec<DataPoint> = pcts
+        .iter()
+        .map(|(label, val)| {
+            let y = log_normal_pdf(*val, mu, sigma);
+            let bin_idx = find_closest_bin(&dist.bins, bin_width, *val);
+            DataPoint::from((vec![bin_idx as f64, y], 
format!("{label}={:.2}ms", val)))
+        })
+        .collect();
+
+    chart = chart.add_scatter_series(
+        &format!("{group_label} Percentiles"),
+        scatter_data,
+        12.0,
+        pct_color,
+    );
+
+    chart
+}
+
+fn find_closest_bin(
+    bins: &[crate::latency_distribution::HistogramBin],
+    bin_width: f64,
+    value: f64,
+) -> usize {
+    let half = bin_width / 2.0;
+    bins.iter()
+        .enumerate()
+        .min_by(|(_, a), (_, b)| {
+            let da = (a.edge_ms + half - value).abs();
+            let db = (b.edge_ms + half - value).abs();
+            da.partial_cmp(&db).unwrap_or(std::cmp::Ordering::Equal)
+        })
+        .map(|(i, _)| i)
+        .unwrap_or(0)
+}
+
+fn format_latency(ms: f64) -> String {
+    if ms < 1.0 {
+        format!("{ms:.2}")
+    } else if ms < 10.0 {
+        format!("{ms:.1}")
+    } else {
+        format!("{ms:.0}")
+    }
+}
+
+fn log_normal_pdf(x: f64, mu: f64, sigma: f64) -> f64 {
+    if x <= 0.0 || sigma <= 0.0 {
+        return 0.0;
+    }
+    let ln_x = x.ln();
+    let exponent = -((ln_x - mu).powi(2)) / (2.0 * sigma * sigma);
+    exponent.exp() / (x * sigma * std::f64::consts::TAU.sqrt())
+}
diff --git a/core/bench/report/src/plotting/chart.rs 
b/core/bench/report/src/plotting/chart.rs
index bb6e5ff7e..982212691 100644
--- a/core/bench/report/src/plotting/chart.rs
+++ b/core/bench/report/src/plotting/chart.rs
@@ -22,11 +22,13 @@ use charming::{
         Axis, DataView, DataZoom, DataZoomType, Feature, Grid, Legend, 
LegendSelectedMode,
         LegendType, Restore, SaveAsImage, Title, Toolbox, ToolboxDataZoom,
     },
+    datatype::DataPoint,
     element::{
-        AxisLabel, AxisPointer, AxisPointerType, AxisType, Emphasis, 
ItemStyle, LineStyle,
-        NameLocation, Orient, SplitLine, Symbol, TextAlign, TextStyle, Tooltip,
+        AxisLabel, AxisPointer, AxisPointerType, AxisType, Emphasis, 
ItemStyle, Label,
+        LabelPosition, LineStyle, NameLocation, Orient, SplitLine, Symbol, 
TextAlign, TextStyle,
+        Tooltip,
     },
-    series::Line,
+    series::{Bar, Line, Scatter},
 };
 
 pub struct IggyChart {
@@ -243,6 +245,64 @@ impl IggyChart {
         self
     }
 
+    /// Add a bar series (for histograms).
+    pub fn add_bar_series(mut self, name: &str, data: Vec<f64>, color: &str, 
opacity: f64) -> Self {
+        let bar = Bar::new()
+            .name(name)
+            .data(data)
+            .bar_width("100%")
+            .bar_gap("-100%")
+            .item_style(ItemStyle::new().color(color).opacity(opacity));
+
+        self.inner = self.inner.series(bar);
+        self
+    }
+
+    /// Add a smooth line series (for PDF curves).
+    pub fn add_smooth_line_series(
+        mut self,
+        name: &str,
+        data: Vec<Vec<f64>>,
+        color: &str,
+        width: f64,
+    ) -> Self {
+        let line = Line::new()
+            .name(name)
+            .data(data)
+            .show_symbol(false)
+            .smooth(true)
+            .line_style(LineStyle::new().width(width))
+            .item_style(ItemStyle::new().color(color));
+
+        self.inner = self.inner.series(line);
+        self
+    }
+
+    /// Add a scatter series with labels (for percentile markers).
+    pub fn add_scatter_series(
+        mut self,
+        name: &str,
+        data: Vec<DataPoint>,
+        symbol_size: f64,
+        color: &str,
+    ) -> Self {
+        let scatter = Scatter::new()
+            .name(name)
+            .data(data)
+            .symbol_size(symbol_size)
+            .item_style(ItemStyle::new().color(color))
+            .label(
+                Label::new()
+                    .show(true)
+                    .position(LabelPosition::Top)
+                    .font_size(11)
+                    .formatter("{b}"),
+            );
+
+        self.inner = self.inner.series(scatter);
+        self
+    }
+
     /// Add a new line series to the chart with specified Y axis.
     /// y_axis_index: 0 for left axis, 1 for right axis
     pub fn add_dual_time_line_series(
diff --git a/core/bench/report/src/plotting/chart_kind.rs 
b/core/bench/report/src/plotting/chart_kind.rs
index fcb8141be..342e4c8ca 100644
--- a/core/bench/report/src/plotting/chart_kind.rs
+++ b/core/bench/report/src/plotting/chart_kind.rs
@@ -24,4 +24,6 @@ pub enum ChartKind {
     Throughput,
     #[display("Latency")]
     Latency,
+    #[display("Latency Distribution")]
+    LatencyDistribution,
 }
diff --git a/core/bench/report/src/types/group_metrics.rs 
b/core/bench/report/src/types/group_metrics.rs
index e7e42119c..2e71757cc 100644
--- a/core/bench/report/src/types/group_metrics.rs
+++ b/core/bench/report/src/types/group_metrics.rs
@@ -16,7 +16,10 @@
  * under the License.
  */
 
-use super::{group_metrics_summary::BenchmarkGroupMetricsSummary, 
time_series::TimeSeries};
+use super::{
+    group_metrics_summary::BenchmarkGroupMetricsSummary, 
latency_distribution::LatencyDistribution,
+    time_series::TimeSeries,
+};
 use crate::utils::{max, min, std_dev};
 use serde::de::{self, MapAccess, Visitor};
 use serde::{Deserialize, Deserializer, Serialize};
@@ -28,6 +31,8 @@ pub struct BenchmarkGroupMetrics {
     pub avg_throughput_mb_ts: TimeSeries,
     pub avg_throughput_msg_ts: TimeSeries,
     pub avg_latency_ts: TimeSeries,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub latency_distribution: Option<LatencyDistribution>,
 }
 
 // Custom deserializer implementation
@@ -53,6 +58,7 @@ impl<'de> Deserialize<'de> for BenchmarkGroupMetrics {
                 let mut avg_throughput_mb_ts: Option<TimeSeries> = None;
                 let mut avg_throughput_msg_ts: Option<TimeSeries> = None;
                 let mut avg_latency_ts: Option<TimeSeries> = None;
+                let mut latency_distribution: Option<LatencyDistribution> = 
None;
 
                 while let Some(key) = map.next_key::<String>()? {
                     match key.as_str() {
@@ -68,6 +74,9 @@ impl<'de> Deserialize<'de> for BenchmarkGroupMetrics {
                         "avg_latency_ts" => {
                             avg_latency_ts = Some(map.next_value()?);
                         }
+                        "latency_distribution" => {
+                            latency_distribution = map.next_value()?;
+                        }
                         _ => {
                             let _ = map.next_value::<serde::de::IgnoredAny>()?;
                         }
@@ -108,6 +117,7 @@ impl<'de> Deserialize<'de> for BenchmarkGroupMetrics {
                     avg_throughput_mb_ts,
                     avg_throughput_msg_ts,
                     avg_latency_ts,
+                    latency_distribution,
                 })
             }
         }
diff --git a/core/bench/report/src/types/individual_metrics.rs 
b/core/bench/report/src/types/individual_metrics.rs
index 2ba13c3c8..20e7c6dc3 100644
--- a/core/bench/report/src/types/individual_metrics.rs
+++ b/core/bench/report/src/types/individual_metrics.rs
@@ -30,6 +30,10 @@ pub struct BenchmarkIndividualMetrics {
     pub throughput_mb_ts: TimeSeries,
     pub throughput_msg_ts: TimeSeries,
     pub latency_ts: TimeSeries,
+    /// Per-batch latencies in ms, sorted ascending. Only populated in-memory
+    /// during report building for distribution computation; never serialized.
+    #[serde(skip)]
+    pub raw_latencies_ms: Vec<f64>,
 }
 
 // Custom deserializer implementation
@@ -109,6 +113,7 @@ impl<'de> Deserialize<'de> for BenchmarkIndividualMetrics {
                     throughput_mb_ts,
                     throughput_msg_ts,
                     latency_ts,
+                    raw_latencies_ms: Vec::new(),
                 })
             }
         }
diff --git a/core/bench/report/src/types/latency_distribution.rs 
b/core/bench/report/src/types/latency_distribution.rs
new file mode 100644
index 000000000..dbcddd7bb
--- /dev/null
+++ b/core/bench/report/src/types/latency_distribution.rs
@@ -0,0 +1,55 @@
+/* Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+use crate::utils::round_float;
+use serde::{Deserialize, Serialize};
+
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
+pub struct LatencyDistribution {
+    pub bins: Vec<HistogramBin>,
+    pub log_normal_params: LogNormalParams,
+    pub percentiles: DistributionPercentiles,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
+pub struct HistogramBin {
+    #[serde(serialize_with = "round_float")]
+    pub edge_ms: f64,
+    #[serde(serialize_with = "round_float")]
+    pub density: f64,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
+pub struct LogNormalParams {
+    #[serde(serialize_with = "round_float")]
+    pub mu: f64,
+    #[serde(serialize_with = "round_float")]
+    pub sigma: f64,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
+pub struct DistributionPercentiles {
+    #[serde(serialize_with = "round_float")]
+    pub p05_ms: f64,
+    #[serde(serialize_with = "round_float")]
+    pub p50_ms: f64,
+    #[serde(serialize_with = "round_float")]
+    pub p95_ms: f64,
+    #[serde(serialize_with = "round_float")]
+    pub p99_ms: f64,
+}
diff --git a/core/bench/report/src/types/mod.rs 
b/core/bench/report/src/types/mod.rs
index 0bd38c503..4c35ebab8 100644
--- a/core/bench/report/src/types/mod.rs
+++ b/core/bench/report/src/types/mod.rs
@@ -24,6 +24,7 @@ pub mod group_metrics_summary;
 pub mod hardware;
 pub mod individual_metrics;
 pub mod individual_metrics_summary;
+pub mod latency_distribution;
 pub mod numeric_parameter;
 pub mod params;
 pub mod report;
diff --git a/core/bench/src/analytics/metrics/group.rs 
b/core/bench/src/analytics/metrics/group.rs
index 9e0bd4cf6..e9f40d932 100644
--- a/core/bench/src/analytics/metrics/group.rs
+++ b/core/bench/src/analytics/metrics/group.rs
@@ -19,6 +19,7 @@
 #![allow(clippy::cast_precision_loss)]
 #![allow(clippy::struct_field_names)]
 
+use super::latency_distribution::compute_latency_distribution;
 use crate::analytics::time_series::{
     calculator::TimeSeriesCalculator,
     processors::{TimeSeriesProcessor, moving_average::MovingAverageProcessor},
@@ -62,6 +63,18 @@ pub fn from_individual_metrics(
     let (min_latency_ms_value, max_latency_ms_value) =
         calculate_min_max_latencies(stats, &time_series.2);
 
+    let mut all_latencies: Vec<f64> = stats
+        .iter()
+        .flat_map(|s| s.raw_latencies_ms.iter().copied())
+        .collect();
+    all_latencies.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
+
+    let latency_distribution = if all_latencies.is_empty() {
+        None
+    } else {
+        Some(compute_latency_distribution(&all_latencies))
+    };
+
     let summary = BenchmarkGroupMetricsSummary {
         kind,
         total_throughput_megabytes_per_second: 
throughput_metrics.total_megabytes_per_sec,
@@ -86,6 +99,7 @@ pub fn from_individual_metrics(
         avg_throughput_mb_ts: time_series.0,
         avg_throughput_msg_ts: time_series.1,
         avg_latency_ts: time_series.2,
+        latency_distribution,
     })
 }
 
diff --git a/core/bench/src/analytics/metrics/individual.rs 
b/core/bench/src/analytics/metrics/individual.rs
index 7ab7c61d9..e1db6bc7e 100644
--- a/core/bench/src/analytics/metrics/individual.rs
+++ b/core/bench/src/analytics/metrics/individual.rs
@@ -63,7 +63,13 @@ pub fn from_records(
         total_messages,
     );
 
-    let latency_metrics = calculate_latency_metrics(records, &latency_ts);
+    let mut raw_latencies_ms: Vec<f64> = records
+        .iter()
+        .map(|r| r.latency_us as f64 / 1_000.0)
+        .collect();
+    raw_latencies_ms.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
+
+    let latency_metrics = calculate_latency_metrics(&raw_latencies_ms, 
&latency_ts);
 
     BenchmarkIndividualMetrics {
         summary: BenchmarkIndividualMetricsSummary {
@@ -92,6 +98,7 @@ pub fn from_records(
         throughput_mb_ts,
         throughput_msg_ts,
         latency_ts,
+        raw_latencies_ms,
     }
 }
 
@@ -127,6 +134,7 @@ fn create_empty_metrics(
         throughput_mb_ts: TimeSeries::default(),
         throughput_msg_ts: TimeSeries::default(),
         latency_ts: TimeSeries::default(),
+        raw_latencies_ms: Vec::new(),
     }
 }
 
@@ -213,28 +221,22 @@ struct LatencyMetrics {
 }
 
 fn calculate_latency_metrics(
-    records: &[BenchmarkRecord],
+    sorted_latencies_ms: &[f64],
     latency_ts: &TimeSeries,
 ) -> LatencyMetrics {
-    let mut latencies_ms: Vec<f64> = records
-        .iter()
-        .map(|r| r.latency_us as f64 / 1_000.0)
-        .collect();
-    latencies_ms.sort_by(|a, b| a.partial_cmp(b).unwrap());
-
-    let p50 = calculate_percentile(&latencies_ms, 50.0);
-    let p90 = calculate_percentile(&latencies_ms, 90.0);
-    let p95 = calculate_percentile(&latencies_ms, 95.0);
-    let p99 = calculate_percentile(&latencies_ms, 99.0);
-    let p999 = calculate_percentile(&latencies_ms, 99.9);
-    let p9999 = calculate_percentile(&latencies_ms, 99.99);
+    let p50 = calculate_percentile(sorted_latencies_ms, 50.0);
+    let p90 = calculate_percentile(sorted_latencies_ms, 90.0);
+    let p95 = calculate_percentile(sorted_latencies_ms, 95.0);
+    let p99 = calculate_percentile(sorted_latencies_ms, 99.0);
+    let p999 = calculate_percentile(sorted_latencies_ms, 99.9);
+    let p9999 = calculate_percentile(sorted_latencies_ms, 99.99);
 
-    let avg = latencies_ms.iter().sum::<f64>() / latencies_ms.len() as f64;
-    let len = latencies_ms.len() / 2;
-    let median = if latencies_ms.len().is_multiple_of(2) {
-        f64::midpoint(latencies_ms[len - 1], latencies_ms[len])
+    let avg = sorted_latencies_ms.iter().sum::<f64>() / 
sorted_latencies_ms.len() as f64;
+    let len = sorted_latencies_ms.len() / 2;
+    let median = if sorted_latencies_ms.len().is_multiple_of(2) {
+        f64::midpoint(sorted_latencies_ms[len - 1], sorted_latencies_ms[len])
     } else {
-        latencies_ms[len]
+        sorted_latencies_ms[len]
     };
 
     let min = min(latency_ts).unwrap_or(0.0);
@@ -256,7 +258,7 @@ fn calculate_latency_metrics(
     }
 }
 
-fn calculate_percentile(sorted_data: &[f64], percentile: f64) -> f64 {
+pub fn calculate_percentile(sorted_data: &[f64], percentile: f64) -> f64 {
     if sorted_data.is_empty() {
         return 0.0;
     }
diff --git a/core/bench/src/analytics/metrics/latency_distribution.rs 
b/core/bench/src/analytics/metrics/latency_distribution.rs
new file mode 100644
index 000000000..3135046c6
--- /dev/null
+++ b/core/bench/src/analytics/metrics/latency_distribution.rs
@@ -0,0 +1,202 @@
+/* Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#![allow(clippy::cast_precision_loss)]
+
+use super::individual::calculate_percentile;
+use bench_report::latency_distribution::{
+    DistributionPercentiles, HistogramBin, LatencyDistribution, 
LogNormalParams,
+};
+
+const NUM_BINS: usize = 50;
+
+/// Upper range is clamped at P99 * this factor to focus the chart on
+/// the interesting region while still showing the tail onset.
+const RANGE_FACTOR: f64 = 1.5;
+
+/// Builds a `LatencyDistribution` from pre-sorted positive latency samples 
(ms).
+///
+/// The histogram range is clamped to [min, P99 * 1.5] with linear bins.
+/// This focuses on the region where the distribution shape is visible,
+/// matching how latency distributions are typically visualized.
+pub fn compute_latency_distribution(sorted_latencies_ms: &[f64]) -> 
LatencyDistribution {
+    let percentiles = DistributionPercentiles {
+        p05_ms: calculate_percentile(sorted_latencies_ms, 5.0),
+        p50_ms: calculate_percentile(sorted_latencies_ms, 50.0),
+        p95_ms: calculate_percentile(sorted_latencies_ms, 95.0),
+        p99_ms: calculate_percentile(sorted_latencies_ms, 99.0),
+    };
+    let log_normal_params = estimate_log_normal(sorted_latencies_ms);
+    let bins = build_histogram(sorted_latencies_ms, percentiles.p99_ms);
+
+    LatencyDistribution {
+        bins,
+        log_normal_params,
+        percentiles,
+    }
+}
+
+/// Builds a histogram with uniform bin widths over `[min, P99 * 
RANGE_FACTOR]`.
+///
+/// Samples beyond the upper limit are counted into the last bin.
+/// `Density = count / (total_samples * bin_width)`, so the histogram area
+/// sums to the fraction of data within the displayed range (~0.99).
+fn build_histogram(sorted: &[f64], p99: f64) -> Vec<HistogramBin> {
+    let min_val = sorted[0];
+    let max_val = (p99 * RANGE_FACTOR).min(sorted[sorted.len() - 1]);
+
+    let range = max_val - min_val;
+    if range <= 0.0 {
+        return vec![HistogramBin {
+            edge_ms: min_val,
+            density: 1.0,
+        }];
+    }
+
+    let bin_width = range / NUM_BINS as f64;
+    let n = sorted.len() as f64;
+    let mut bins = Vec::with_capacity(NUM_BINS);
+    let mut sorted_idx = 0;
+
+    for i in 0..NUM_BINS {
+        let edge = (i as f64).mul_add(bin_width, min_val);
+        let upper = if i == NUM_BINS - 1 {
+            f64::INFINITY
+        } else {
+            edge + bin_width
+        };
+
+        let mut count = 0u64;
+        while sorted_idx < sorted.len() && sorted[sorted_idx] < upper {
+            count += 1;
+            sorted_idx += 1;
+        }
+
+        let density = count as f64 / (n * bin_width);
+        bins.push(HistogramBin {
+            edge_ms: edge,
+            density,
+        });
+    }
+
+    bins
+}
+
+fn estimate_log_normal(sorted: &[f64]) -> LogNormalParams {
+    let positive: Vec<f64> = sorted.iter().copied().filter(|&x| x > 
0.0).collect();
+
+    if positive.is_empty() {
+        return LogNormalParams {
+            mu: 0.0,
+            sigma: 0.0,
+        };
+    }
+
+    let n = positive.len() as f64;
+    let ln_sum: f64 = positive.iter().map(|x| x.ln()).sum();
+    let mu = ln_sum / n;
+
+    let variance = positive
+        .iter()
+        .map(|x| {
+            let diff = x.ln() - mu;
+            diff * diff
+        })
+        .sum::<f64>()
+        / n;
+
+    LogNormalParams {
+        mu,
+        sigma: variance.sqrt(),
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn histogram_density_reasonable() {
+        let mut samples: Vec<f64> = (1..=10_000).map(|i| f64::from(i) * 
0.1).collect();
+        samples.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
+
+        let dist = compute_latency_distribution(&samples);
+
+        assert_eq!(dist.bins.len(), NUM_BINS);
+
+        let bin_width = dist.bins[1].edge_ms - dist.bins[0].edge_ms;
+        let total_area: f64 = dist.bins.iter().map(|b| b.density * 
bin_width).sum();
+
+        // Area should be close to 1.0 (will be slightly > 1.0 because
+        // the last bin catches tail samples beyond the clipped range)
+        assert!(
+            total_area > 0.9 && total_area < 1.2,
+            "Histogram area should be ~1.0, got {total_area}"
+        );
+    }
+
+    #[test]
+    fn log_normal_params_reasonable() {
+        let samples: Vec<f64> = (1..=1000).map(f64::from).collect();
+        let dist = compute_latency_distribution(&samples);
+
+        assert!(dist.log_normal_params.mu > 0.0);
+        assert!(dist.log_normal_params.sigma > 0.0);
+    }
+
+    #[test]
+    fn percentiles_ordered() {
+        let samples: Vec<f64> = (1..=10_000).map(|i| f64::from(i) * 
0.01).collect();
+        let dist = compute_latency_distribution(&samples);
+
+        assert!(dist.percentiles.p05_ms < dist.percentiles.p50_ms);
+        assert!(dist.percentiles.p50_ms < dist.percentiles.p95_ms);
+        assert!(dist.percentiles.p95_ms < dist.percentiles.p99_ms);
+    }
+
+    #[test]
+    fn single_value_distribution() {
+        let samples = vec![5.0; 100];
+        let dist = compute_latency_distribution(&samples);
+
+        assert_eq!(dist.bins.len(), 1);
+        assert!((dist.percentiles.p05_ms - 5.0).abs() < f64::EPSILON);
+        assert!((dist.percentiles.p99_ms - 5.0).abs() < f64::EPSILON);
+    }
+
+    #[test]
+    fn skewed_distribution_bins_cover_range() {
+        let mut samples = Vec::new();
+        for i in 1..=9000 {
+            samples.push(f64::from(i).mul_add(0.001, 0.5));
+        }
+        for i in 1..=1000 {
+            samples.push(f64::from(i).mul_add(0.1, 10.0));
+        }
+        samples.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
+
+        let dist = compute_latency_distribution(&samples);
+
+        // Most bins should have non-zero density since we clip at P99*1.5
+        let nonzero_bins = dist.bins.iter().filter(|b| b.density > 
0.0).count();
+        assert!(
+            nonzero_bins > NUM_BINS / 3,
+            "Expected >33% non-zero bins, got {nonzero_bins}/{NUM_BINS}"
+        );
+    }
+}
diff --git a/core/bench/src/analytics/metrics/mod.rs 
b/core/bench/src/analytics/metrics/mod.rs
index 9136e160b..ae5d4aa05 100644
--- a/core/bench/src/analytics/metrics/mod.rs
+++ b/core/bench/src/analytics/metrics/mod.rs
@@ -18,3 +18,4 @@
 
 pub mod group;
 pub mod individual;
+pub mod latency_distribution;
diff --git a/core/bench/src/plot.rs b/core/bench/src/plot.rs
index fae733aad..19c58555a 100644
--- a/core/bench/src/plot.rs
+++ b/core/bench/src/plot.rs
@@ -28,6 +28,7 @@ use tracing::info;
 pub enum ChartType {
     Throughput,
     Latency,
+    LatencyDistribution,
 }
 
 impl ChartType {
@@ -35,6 +36,7 @@ impl ChartType {
         match self {
             Self::Throughput => "throughput",
             Self::Latency => "latency",
+            Self::LatencyDistribution => "latency_distribution",
         }
     }
 
@@ -42,6 +44,7 @@ impl ChartType {
         match self {
             Self::Throughput => bench_report::create_throughput_chart,
             Self::Latency => bench_report::create_latency_chart,
+            Self::LatencyDistribution => 
bench_report::create_latency_distribution_chart,
         }
     }
 
@@ -58,6 +61,12 @@ impl ChartType {
                 .filter(|m| !m.latency_ts.points.is_empty())
                 .map(|m| m.latency_ts.points.len())
                 .sum(),
+            Self::LatencyDistribution => report
+                .group_metrics
+                .iter()
+                .filter_map(|m| m.latency_distribution.as_ref())
+                .map(|d| d.bins.len())
+                .sum(),
         }
     }
 }
diff --git a/core/bench/src/runner.rs b/core/bench/src/runner.rs
index 40daf2bf1..6813ec040 100644
--- a/core/bench/src/runner.rs
+++ b/core/bench/src/runner.rs
@@ -129,6 +129,16 @@ impl BenchmarkRunner {
                 error!("Failed to generate plots: {e}");
                 IggyError::CannotWriteToFile
             })?;
+            plot_chart(
+                &report,
+                &full_output_path,
+                &ChartType::LatencyDistribution,
+                should_open_charts,
+            )
+            .map_err(|e| {
+                error!("Failed to generate plots: {e}");
+                IggyError::CannotWriteToFile
+            })?;
         }
 
         Ok(())


Reply via email to