This is an automated email from the ASF dual-hosted git repository.

alamb pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/arrow-rs.git


The following commit(s) were added to refs/heads/main by this push:
     new 10a976fc03 chore: increase row count and batch size for more 
deterministic tests (#9088)
10a976fc03 is described below

commit 10a976fc03af32d26f9e4bf4dcc420cb1a455ef0
Author: Alex Huang <[email protected]>
AuthorDate: Wed Jan 7 16:43:28 2026 +0200

    chore: increase row count and batch size for more deterministic tests 
(#9088)
    
    # Which issue does this PR close?
    
    <!--
    We generally require a GitHub issue to be filed for all bug fixes and
    enhancements and this helps us generate change logs for our releases.
    You can link an issue to this PR using the GitHub syntax.
    -->
    
    - Closes #NNN.
    
    # Rationale for this change
    
    Previous benchmark is too fast to deterministically measure the
    performance improvement because they run only in 2-7 microsecond.
    
    <!--
    Why are you proposing this change? If this is already explained clearly
    in the issue then this section is not needed.
    Explaining clearly why changes are proposed helps reviewers understand
    your changes and offer better suggestions for fixes.
    -->
    
    # What changes are included in this PR?
    
    <!--
    There is no need to duplicate the description in the issue here but it
    is sometimes worth providing a summary of the individual changes in this
    PR.
    -->
    
    # Are these changes tested?
    
    <!--
    We typically require tests for all PRs in order to:
    1. Prevent the code from being accidentally broken by subsequent changes
    2. Serve as another way to document the expected behavior of the code
    
    If tests are not included in your PR, please explain why (for example,
    are they covered by existing tests)?
    -->
    
    # Are there any user-facing changes?
    
    <!--
    If there are user-facing changes then we may require documentation to be
    updated before approving the PR.
    
    If there are any breaking changes to public APIs, please call them out.
    -->
---
 arrow-json/benches/serde.rs | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/arrow-json/benches/serde.rs b/arrow-json/benches/serde.rs
index 23f005cc90..282f2e7c76 100644
--- a/arrow-json/benches/serde.rs
+++ b/arrow-json/benches/serde.rs
@@ -22,12 +22,14 @@ use rand::{Rng, rng};
 use serde::Serialize;
 use std::sync::Arc;
 
+const ROWS: usize = 1 << 18;
+
 #[allow(deprecated)]
 fn do_bench<R: Serialize>(c: &mut Criterion, name: &str, rows: &[R], schema: 
&Schema) {
     let schema = Arc::new(schema.clone());
     c.bench_function(name, |b| {
         b.iter(|| {
-            let builder = 
ReaderBuilder::new(schema.clone()).with_batch_size(64);
+            let builder = 
ReaderBuilder::new(schema.clone()).with_batch_size(8192);
             let mut decoder = builder.build_decoder().unwrap();
             decoder.serialize(rows)
         })
@@ -37,26 +39,26 @@ fn do_bench<R: Serialize>(c: &mut Criterion, name: &str, 
rows: &[R], schema: &Sc
 fn criterion_benchmark(c: &mut Criterion) {
     let mut rng = rng();
     let schema = Schema::new(vec![Field::new("i32", DataType::Int32, false)]);
-    let v: Vec<i32> = (0..2048).map(|_| rng.random_range(0..10000)).collect();
+    let v: Vec<i32> = (0..ROWS).map(|_| rng.random_range(0..10000)).collect();
 
     do_bench(c, "small_i32", &v, &schema);
-    let v: Vec<i32> = (0..2048).map(|_| rng.random()).collect();
+    let v: Vec<i32> = (0..ROWS).map(|_| rng.random()).collect();
     do_bench(c, "large_i32", &v, &schema);
 
     let schema = Schema::new(vec![Field::new("i64", DataType::Int64, false)]);
-    let v: Vec<i64> = (0..2048).map(|_| rng.random_range(0..10000)).collect();
+    let v: Vec<i64> = (0..ROWS).map(|_| rng.random_range(0..10000)).collect();
     do_bench(c, "small_i64", &v, &schema);
-    let v: Vec<i64> = (0..2048)
+    let v: Vec<i64> = (0..ROWS)
         .map(|_| rng.random_range(0..i32::MAX as _))
         .collect();
     do_bench(c, "medium_i64", &v, &schema);
-    let v: Vec<i64> = (0..2048).map(|_| rng.random()).collect();
+    let v: Vec<i64> = (0..ROWS).map(|_| rng.random()).collect();
     do_bench(c, "large_i64", &v, &schema);
 
     let schema = Schema::new(vec![Field::new("f32", DataType::Float32, 
false)]);
-    let v: Vec<f32> = (0..2048).map(|_| 
rng.random_range(0.0..10000.)).collect();
+    let v: Vec<f32> = (0..ROWS).map(|_| 
rng.random_range(0.0..10000.)).collect();
     do_bench(c, "small_f32", &v, &schema);
-    let v: Vec<f32> = (0..2048).map(|_| 
rng.random_range(0.0..f32::MAX)).collect();
+    let v: Vec<f32> = (0..ROWS).map(|_| 
rng.random_range(0.0..f32::MAX)).collect();
     do_bench(c, "large_f32", &v, &schema);
 }
 

Reply via email to