kosiew commented on code in PR #20047: URL: https://github.com/apache/datafusion/pull/20047#discussion_r3128191420
########## datafusion/execution/src/cache/file_statistics_cache.rs: ########## @@ -0,0 +1,739 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use crate::cache::cache_manager::{ + CachedFileMetadata, FileStatisticsCache, FileStatisticsCacheEntry, +}; +use crate::cache::{CacheAccessor, TableScopedPath}; +use object_store::path::Path; +use std::collections::HashMap; +use std::sync::Mutex; + +pub use crate::cache::DefaultFilesMetadataCache; +use crate::cache::lru_queue::LruQueue; +use datafusion_common::TableReference; +use datafusion_common::heap_size::DFHeapSize; + +/// Default implementation of [`FileStatisticsCache`] +/// +/// Stores cached file metadata (statistics and orderings) for files. +/// +/// The typical usage pattern is: +/// 1. Call `get(path)` to check for cached value +/// 2. If `Some(cached)`, validate with `cached.is_valid_for(¤t_meta)` +/// 3. If invalid or missing, compute new value and call `put(path, new_value)` +/// +/// # Internal details +/// +/// The `memory_limit` controls the maximum size of the cache, which uses a +/// Least Recently Used eviction algorithm. When adding a new entry, if the total +/// size of the cached entries exceeds `memory_limit`, the least recently used entries +/// are evicted until the total size is lower than `memory_limit`. +/// +/// +/// [`FileStatisticsCache`]: crate::cache::cache_manager::FileStatisticsCache +#[derive(Default)] +pub struct DefaultFileStatisticsCache { + state: Mutex<DefaultFileStatisticsCacheState>, +} + +impl DefaultFileStatisticsCache { + pub fn new(memory_limit: usize) -> Self { + Self { + state: Mutex::new(DefaultFileStatisticsCacheState::new(memory_limit)), + } + } + + /// Returns the size of the cached memory, in bytes. + pub fn memory_used(&self) -> usize { + let state = self.state.lock().unwrap(); + state.memory_used + } +} + +struct DefaultFileStatisticsCacheState { + lru_queue: LruQueue<TableScopedPath, CachedFileMetadata>, + memory_limit: usize, + memory_used: usize, +} + +pub const DEFAULT_FILE_STATISTICS_MEMORY_LIMIT: usize = 20 * 1024 * 1024; // 20MiB + +impl Default for DefaultFileStatisticsCacheState { + fn default() -> Self { + Self { + lru_queue: LruQueue::new(), + memory_limit: DEFAULT_FILE_STATISTICS_MEMORY_LIMIT, + memory_used: 0, + } + } +} + +impl DefaultFileStatisticsCacheState { + fn new(memory_limit: usize) -> Self { + Self { + lru_queue: LruQueue::new(), + memory_limit, + memory_used: 0, + } + } + fn get(&mut self, key: &TableScopedPath) -> Option<CachedFileMetadata> { + self.lru_queue.get(key).cloned() + } + + fn put( + &mut self, + key: &TableScopedPath, + value: CachedFileMetadata, + ) -> Option<CachedFileMetadata> { + let key_size = key.path.as_ref().heap_size(); Review Comment: The cache key is now `TableScopedPath { table, path }`, but the memory accounting here still only charges for the path bytes. That leaves the new `table_reference` portion effectively unaccounted for, so `memory_used` can undercount in multi-table scenarios. Would it make sense to include the `TableReference` heap usage as well so the cache limit is enforced more accurately? ########## datafusion/execution/src/cache/file_statistics_cache.rs: ########## @@ -0,0 +1,739 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use crate::cache::cache_manager::{ + CachedFileMetadata, FileStatisticsCache, FileStatisticsCacheEntry, +}; +use crate::cache::{CacheAccessor, TableScopedPath}; +use object_store::path::Path; +use std::collections::HashMap; +use std::sync::Mutex; + +pub use crate::cache::DefaultFilesMetadataCache; +use crate::cache::lru_queue::LruQueue; +use datafusion_common::TableReference; +use datafusion_common::heap_size::DFHeapSize; + +/// Default implementation of [`FileStatisticsCache`] +/// +/// Stores cached file metadata (statistics and orderings) for files. +/// +/// The typical usage pattern is: +/// 1. Call `get(path)` to check for cached value +/// 2. If `Some(cached)`, validate with `cached.is_valid_for(¤t_meta)` +/// 3. If invalid or missing, compute new value and call `put(path, new_value)` +/// +/// # Internal details +/// +/// The `memory_limit` controls the maximum size of the cache, which uses a +/// Least Recently Used eviction algorithm. When adding a new entry, if the total +/// size of the cached entries exceeds `memory_limit`, the least recently used entries +/// are evicted until the total size is lower than `memory_limit`. +/// +/// +/// [`FileStatisticsCache`]: crate::cache::cache_manager::FileStatisticsCache +#[derive(Default)] +pub struct DefaultFileStatisticsCache { + state: Mutex<DefaultFileStatisticsCacheState>, +} + +impl DefaultFileStatisticsCache { + pub fn new(memory_limit: usize) -> Self { + Self { + state: Mutex::new(DefaultFileStatisticsCacheState::new(memory_limit)), + } + } + + /// Returns the size of the cached memory, in bytes. + pub fn memory_used(&self) -> usize { + let state = self.state.lock().unwrap(); + state.memory_used + } +} + +struct DefaultFileStatisticsCacheState { + lru_queue: LruQueue<TableScopedPath, CachedFileMetadata>, + memory_limit: usize, + memory_used: usize, +} + +pub const DEFAULT_FILE_STATISTICS_MEMORY_LIMIT: usize = 20 * 1024 * 1024; // 20MiB + +impl Default for DefaultFileStatisticsCacheState { + fn default() -> Self { + Self { + lru_queue: LruQueue::new(), + memory_limit: DEFAULT_FILE_STATISTICS_MEMORY_LIMIT, + memory_used: 0, + } + } +} + +impl DefaultFileStatisticsCacheState { + fn new(memory_limit: usize) -> Self { + Self { + lru_queue: LruQueue::new(), + memory_limit, + memory_used: 0, + } + } + fn get(&mut self, key: &TableScopedPath) -> Option<CachedFileMetadata> { + self.lru_queue.get(key).cloned() + } + + fn put( + &mut self, + key: &TableScopedPath, + value: CachedFileMetadata, + ) -> Option<CachedFileMetadata> { + let key_size = key.path.as_ref().heap_size(); + let entry_size = value.heap_size(); + + if entry_size + key_size > self.memory_limit { + // Remove potential stale entry + self.remove(key); + return None; + } + + let old_value = self.lru_queue.put(key.clone(), value); + self.memory_used += entry_size; + self.memory_used += key.path.as_ref().heap_size(); + + if let Some(old_entry) = &old_value { + self.memory_used -= old_entry.heap_size(); + self.memory_used -= key.path.as_ref().heap_size(); + } + + self.evict_entries(); + + old_value + } + + fn remove(&mut self, k: &TableScopedPath) -> Option<CachedFileMetadata> { + if let Some(old_entry) = self.lru_queue.remove(k) { + self.memory_used -= k.path.as_ref().heap_size(); + self.memory_used -= old_entry.heap_size(); + Some(old_entry) + } else { + None + } + } + + fn contains_key(&self, k: &TableScopedPath) -> bool { + self.lru_queue.contains_key(k) + } + + fn len(&self) -> usize { + self.lru_queue.len() + } + + fn clear(&mut self) { + self.lru_queue.clear(); + self.memory_used = 0; + } + + fn evict_entries(&mut self) { + while self.memory_used > self.memory_limit { + if let Some(removed) = self.lru_queue.pop() { + self.memory_used -= removed.0.path.as_ref().heap_size(); + self.memory_used -= removed.1.heap_size(); + } else { + // cache is empty while memory_used > memory_limit, cannot happen + log::error!( + "File statistics cache memory accounting bug: memory_used={} but cache is empty. \ + Please report this to the Apache DataFusion developers.", + self.memory_used + ); + debug_assert!( + false, + "memory_used={} but cache is empty", + self.memory_used + ); + self.memory_used = 0; + return; + } + } + } +} +impl CacheAccessor<TableScopedPath, CachedFileMetadata> for DefaultFileStatisticsCache { + fn get(&self, key: &TableScopedPath) -> Option<CachedFileMetadata> { + let mut state = self.state.lock().unwrap(); + state.get(key) + } + + fn put( + &self, + key: &TableScopedPath, + value: CachedFileMetadata, + ) -> Option<CachedFileMetadata> { + let mut state = self.state.lock().unwrap(); + state.put(key, value) + } + + fn remove(&self, key: &TableScopedPath) -> Option<CachedFileMetadata> { + let mut state = self.state.lock().unwrap(); + state.remove(key) + } + + fn contains_key(&self, k: &TableScopedPath) -> bool { + let state = self.state.lock().unwrap(); + state.contains_key(k) + } + + fn len(&self) -> usize { + let state = self.state.lock().unwrap(); + state.len() + } + + fn clear(&self) { + let mut state = self.state.lock().unwrap(); + state.clear(); + } + + fn name(&self) -> String { + "DefaultFileStatisticsCache".to_string() + } +} + +impl FileStatisticsCache for DefaultFileStatisticsCache { + fn cache_limit(&self) -> usize { + let state = self.state.lock().unwrap(); + state.memory_limit + } + + fn update_cache_limit(&self, limit: usize) { + let mut state = self.state.lock().unwrap(); + state.memory_limit = limit; + state.evict_entries(); + } + + fn list_entries(&self) -> HashMap<Path, FileStatisticsCacheEntry> { Review Comment: Now that the cache is keyed by `TableScopedPath`, returning `HashMap<Path, ...>` here loses information. If two tables cache the same file path, one entry will overwrite the other in `list_entries()`, so the introspection output no longer reflects the real cache contents. Should this API return a table-scoped key as well, or otherwise preserve multiple entries for the same path? ########## datafusion/core/src/execution/context/mod.rs: ########## Review Comment: `register_listing_table()` now participates in the table-scoped file statistics cache, but this path never calls `with_table_ref(table_ref)`. That means entries created through this API end up cached under `table=None`, so deregistration will not clear them and different registered tables at the same location can still share cache entries. Could we stamp the table reference onto the `ListingTableUrl` here? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
