Hi Colin, thanks so much for giving this another look. I don't see any references to trying different zfs_arc_meta_limit* settings here!
I raised zfs_arc_meta_limit_percent from the default 75 to 95 and my system has been running this ripgrep for four hours without falling to pieces. I think we can declare success on this one! Thanks # grep . /sys/module/zfs/parameters/zfs_arc_{dnode,meta}_limit* /sys/module/zfs/parameters/zfs_arc_dnode_limit:0 /sys/module/zfs/parameters/zfs_arc_dnode_limit_percent:50 /sys/module/zfs/parameters/zfs_arc_meta_limit:0 /sys/module/zfs/parameters/zfs_arc_meta_limit_percent:95 7.97% [kernel] [k] SHA256TransformBlocks 3.33% [kernel] [k] _raw_spin_lock 2.59% [kernel] [k] arc_evict_state 2.39% [kernel] [k] mutex_lock 2.08% [kernel] [k] osq_lock PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 28555 sarnold 20 0 28.648g 0.023t 2160 S 262.8 18.8 546:39.58 rg 1567 root 20 0 0 0 0 R 71.4 0.0 1200:10 arc_reclaim 2 root 20 0 0 0 0 R 23.7 0.0 238:17.77 kthreadd 1966 root 0 -20 0 0 0 D 11.2 0.0 163:24.49 z_rd_int_2 1967 root 0 -20 0 0 0 S 11.2 0.0 163:25.54 z_rd_int_3 1968 root 0 -20 0 0 0 S 11.2 0.0 163:25.52 z_rd_int_4 # zpool iostat 1 capacity operations bandwidth pool alloc free read write read write [...] ---------- ----- ----- ----- ----- ----- ----- fst 2.11T 1.52T 13.8K 0 112M 0 srv 1.84T 6.32T 0 0 0 0 ---------- ----- ----- ----- ----- ----- ----- fst 2.11T 1.52T 17.4K 0 146M 0 srv 1.84T 6.32T 0 0 0 0 ---------- ----- ----- ----- ----- ----- ----- fst 2.11T 1.52T 17.3K 0 147M 0 srv 1.84T 6.32T 0 0 0 0 ---------- ----- ----- ----- ----- ----- ----- # arc_summary ------------------------------------------------------------------------ ZFS Subsystem Report Tue Oct 27 19:32:40 2020 ARC Summary: (HEALTHY) Memory Throttle Count: 0 ARC Misc: Deleted: 688.20m Mutex Misses: 115.54m Evict Skips: 99.92b ARC Size: 89.14% 56.07 GiB Target Size: (Adaptive) 9.30% 5.85 GiB Min Size (Hard Limit): 6.25% 3.93 GiB Max Size (High Water): 16:1 62.90 GiB ARC Size Breakdown: Recently Used Cache Size: 81.96% 24.51 GiB Frequently Used Cache Size: 18.04% 5.39 GiB ARC Hash Breakdown: Elements Max: 9.78m Elements Current: 56.79% 5.55m Collisions: 201.32m Chain Max: 8 Chains: 736.70k ARC Total accesses: 12.23b Cache Hit Ratio: 90.87% 11.12b Cache Miss Ratio: 9.13% 1.12b Actual Hit Ratio: 90.27% 11.04b Data Demand Efficiency: 82.63% 666.79m Data Prefetch Efficiency: 27.20% 14.18m CACHE HITS BY CACHE LIST: Most Recently Used: 30.85% 3.43b Most Frequently Used: 68.49% 7.61b Most Recently Used Ghost: 0.13% 13.99m Most Frequently Used Ghost: 0.61% 67.51m CACHE HITS BY DATA TYPE: Demand Data: 4.96% 550.94m Prefetch Data: 0.03% 3.86m Demand Metadata: 94.20% 10.47b Prefetch Metadata: 0.80% 89.48m CACHE MISSES BY DATA TYPE: Demand Data: 10.37% 115.85m Prefetch Data: 0.92% 10.32m Demand Metadata: 82.55% 921.91m Prefetch Metadata: 6.15% 68.73m L2 ARC Summary: (HEALTHY) Low Memory Aborts: 116 Free on Write: 21.13k R/W Clashes: 3 Bad Checksums: 0 IO Errors: 0 L2 ARC Size: (Adaptive) 418.26 GiB Compressed: 90.78% 379.71 GiB Header Size: 0.08% 360.27 MiB L2 ARC Breakdown: 1.12b Hit Ratio: 4.65% 51.90m Miss Ratio: 95.35% 1.06b Feeds: 3.98m L2 ARC Writes: Writes Sent: 100.00% 110.74k DMU Prefetch Efficiency: 8.56b Hit Ratio: 0.26% 22.20m Miss Ratio: 99.74% 8.54b ZFS Tunable: dbuf_cache_hiwater_pct 10 dbuf_cache_lowater_pct 10 dbuf_cache_max_bytes 104857600 dbuf_cache_max_shift 5 dmu_object_alloc_chunk_shift 7 ignore_hole_birth 1 l2arc_feed_again 1 l2arc_feed_min_ms 200 l2arc_feed_secs 1 l2arc_headroom 2 l2arc_headroom_boost 200 l2arc_noprefetch 1 l2arc_norw 0 l2arc_write_boost 1073741824 l2arc_write_max 1073741824 metaslab_aliquot 524288 metaslab_bias_enabled 1 metaslab_debug_load 0 metaslab_debug_unload 0 metaslab_fragmentation_factor_enabled 1 metaslab_lba_weighting_enabled 1 metaslab_preload_enabled 1 metaslabs_per_vdev 200 send_holes_without_birth_time 1 spa_asize_inflation 24 spa_config_path /etc/zfs/zpool.cache spa_load_verify_data 1 spa_load_verify_maxinflight 10000 spa_load_verify_metadata 1 spa_slop_shift 5 zfetch_array_rd_sz 1048576 zfetch_max_distance 8388608 zfetch_max_streams 8 zfetch_min_sec_reap 2 zfs_abd_scatter_enabled 1 zfs_abd_scatter_max_order 10 zfs_admin_snapshot 1 zfs_arc_average_blocksize 8192 zfs_arc_dnode_limit 0 zfs_arc_dnode_limit_percent 50 zfs_arc_dnode_reduce_percent 10 zfs_arc_grow_retry 0 zfs_arc_lotsfree_percent 10 zfs_arc_max 0 zfs_arc_meta_adjust_restarts 4096 zfs_arc_meta_limit 0 zfs_arc_meta_limit_percent 95 zfs_arc_meta_min 0 zfs_arc_meta_prune 10000 zfs_arc_meta_strategy 1 zfs_arc_min 0 zfs_arc_min_prefetch_lifespan 0 zfs_arc_p_aggressive_disable 1 zfs_arc_p_dampener_disable 1 zfs_arc_p_min_shift 0 zfs_arc_pc_percent 0 zfs_arc_shrink_shift 0 zfs_arc_sys_free 0 zfs_autoimport_disable 1 zfs_compressed_arc_enabled 1 zfs_dbgmsg_enable 0 zfs_dbgmsg_maxsize 4194304 zfs_dbuf_state_index 0 zfs_deadman_checktime_ms 5000 zfs_deadman_enabled 1 zfs_deadman_synctime_ms 1000000 zfs_dedup_prefetch 0 zfs_delay_min_dirty_percent 60 zfs_delay_scale 500000 zfs_delete_blocks 20480 zfs_dirty_data_max 4294967296 zfs_dirty_data_max_max 4294967296 zfs_dirty_data_max_max_percent 25 zfs_dirty_data_max_percent 10 zfs_dirty_data_sync 67108864 zfs_dmu_offset_next_sync 0 zfs_expire_snapshot 300 zfs_flags 0 zfs_free_bpobj_enabled 1 zfs_free_leak_on_eio 0 zfs_free_max_blocks 100000 zfs_free_min_time_ms 1000 zfs_immediate_write_sz 32768 zfs_max_recordsize 1048576 zfs_mdcomp_disable 0 zfs_metaslab_fragmentation_threshold 70 zfs_metaslab_segment_weight_enabled 1 zfs_metaslab_switch_threshold 2 zfs_mg_fragmentation_threshold 85 zfs_mg_noalloc_threshold 0 zfs_multihost_fail_intervals 5 zfs_multihost_history 0 zfs_multihost_import_intervals 10 zfs_multihost_interval 1000 zfs_multilist_num_sublists 0 zfs_no_scrub_io 0 zfs_no_scrub_prefetch 0 zfs_nocacheflush 0 zfs_nopwrite_enabled 1 zfs_object_mutex_size 64 zfs_pd_bytes_max 52428800 zfs_per_txg_dirty_frees_percent 30 zfs_prefetch_disable 0 zfs_read_chunk_size 1048576 zfs_read_history 0 zfs_read_history_hits 0 zfs_recover 0 zfs_resilver_delay 2 zfs_resilver_min_time_ms 3000 zfs_scan_idle 50 zfs_scan_min_time_ms 1000 zfs_scrub_delay 4 zfs_send_corrupt_data 0 zfs_sync_pass_deferred_free 2 zfs_sync_pass_dont_compress 5 zfs_sync_pass_rewrite 2 zfs_sync_taskq_batch_pct 75 zfs_top_maxinflight 32 zfs_txg_history 0 zfs_txg_timeout 5 zfs_vdev_aggregation_limit 131072 zfs_vdev_async_read_max_active 3 zfs_vdev_async_read_min_active 1 zfs_vdev_async_write_active_max_dirty_percent 60 zfs_vdev_async_write_active_min_dirty_percent 30 zfs_vdev_async_write_max_active 10 zfs_vdev_async_write_min_active 2 zfs_vdev_cache_bshift 16 zfs_vdev_cache_max 16384 zfs_vdev_cache_size 0 zfs_vdev_max_active 1000 zfs_vdev_mirror_non_rotating_inc 0 zfs_vdev_mirror_non_rotating_seek_inc 1 zfs_vdev_mirror_rotating_inc 0 zfs_vdev_mirror_rotating_seek_inc 5 zfs_vdev_mirror_rotating_seek_offset 1048576 zfs_vdev_queue_depth_pct 1000 zfs_vdev_raidz_impl [fastest] original scalar sse2 ssse3 avx2 zfs_vdev_read_gap_limit 32768 zfs_vdev_scheduler noop zfs_vdev_scrub_max_active 2 zfs_vdev_scrub_min_active 1 zfs_vdev_sync_read_max_active 10 zfs_vdev_sync_read_min_active 10 zfs_vdev_sync_write_max_active 10 zfs_vdev_sync_write_min_active 10 zfs_vdev_write_gap_limit 4096 zfs_zevent_cols 80 zfs_zevent_console 0 zfs_zevent_len_max 512 zfs_zil_clean_taskq_maxalloc 1048576 zfs_zil_clean_taskq_minalloc 1024 zfs_zil_clean_taskq_nthr_pct 100 zil_replay_disable 0 zil_slog_bulk 786432 zio_delay_max 30000 zio_dva_throttle_enabled 1 zio_requeue_io_start_cut_in_line 1 zio_taskq_batch_pct 75 zvol_inhibit_dev 0 zvol_major 230 zvol_max_discard_blocks 16384 zvol_prefetch_bytes 131072 zvol_request_sync 0 zvol_threads 32 zvol_volmode 1 ** Changed in: zfs-linux (Ubuntu) Status: Incomplete => Invalid -- You received this bug notification because you are a member of Ubuntu Bugs, which is subscribed to Ubuntu. https://bugs.launchpad.net/bugs/1814983 Title: zfs poor sustained read performance from ssd pool To manage notifications about this bug go to: https://bugs.launchpad.net/zfs/+bug/1814983/+subscriptions -- ubuntu-bugs mailing list ubuntu-bugs@lists.ubuntu.com https://lists.ubuntu.com/mailman/listinfo/ubuntu-bugs