alamb commented on code in PR #21342:
URL: https://github.com/apache/datafusion/pull/21342#discussion_r3033447587


##########
datafusion/datasource/src/morsel/mocks.rs:
##########
@@ -0,0 +1,556 @@
+// Licensed to the Apache Software Foundation (ASF) under one

Review Comment:
   This is testing infrastructure to write the snapshot tests



##########
datafusion/datasource/src/file_stream/mod.rs:
##########
@@ -89,105 +79,31 @@ impl FileStream {
     /// If `OnError::Skip` the stream will skip files which encounter an error 
and continue
     /// If `OnError:Fail` (default) the stream will fail and stop processing 
when an error occurs
     pub fn with_on_error(mut self, on_error: OnError) -> Self {
-        self.on_error = on_error;
+        if let FileStreamState::Scan { scan_state } = &mut self.state {
+            scan_state.set_on_error(on_error);
+        }
         self
     }
 
-    fn start_next_file(&mut self) -> Option<Result<FileOpenFuture>> {
-        let part_file = self.file_iter.pop_front()?;
-        Some(self.file_opener.open(part_file))
-    }
-
     fn poll_inner(&mut self, cx: &mut Context<'_>) -> 
Poll<Option<Result<RecordBatch>>> {
         loop {
             match &mut self.state {
-                FileStreamState::Idle => match 
self.start_next_file().transpose() {
-                    Ok(Some(future)) => {
-                        self.file_stream_metrics.time_opening.start();
-                        self.state = FileStreamState::Open { future };
-                    }
-                    Ok(None) => return Poll::Ready(None),
-                    Err(e) => {
-                        self.state = FileStreamState::Error;
-                        return Poll::Ready(Some(Err(e)));
-                    }
-                },
-                FileStreamState::Open { future } => match 
ready!(future.poll_unpin(cx)) {
-                    Ok(reader) => {
-                        self.file_stream_metrics.files_opened.add(1);
-                        self.file_stream_metrics.time_opening.stop();
-                        
self.file_stream_metrics.time_scanning_until_data.start();
-                        self.file_stream_metrics.time_scanning_total.start();
-                        self.state = FileStreamState::Scan { reader };
-                    }
-                    Err(e) => {
-                        self.file_stream_metrics.file_open_errors.add(1);
-                        match self.on_error {
-                            OnError::Skip => {
-                                
self.file_stream_metrics.files_processed.add(1);
-                                self.file_stream_metrics.time_opening.stop();
-                                self.state = FileStreamState::Idle
-                            }
-                            OnError::Fail => {
-                                self.state = FileStreamState::Error;
-                                return Poll::Ready(Some(Err(e)));
-                            }
-                        }
-                    }
-                },
-                FileStreamState::Scan { reader } => {
-                    match ready!(reader.poll_next_unpin(cx)) {
-                        Some(Ok(batch)) => {
-                            
self.file_stream_metrics.time_scanning_until_data.stop();
-                            
self.file_stream_metrics.time_scanning_total.stop();
-                            let batch = match &mut self.remain {
-                                Some(remain) => {
-                                    if *remain > batch.num_rows() {
-                                        *remain -= batch.num_rows();
-                                        batch
-                                    } else {
-                                        let batch = batch.slice(0, *remain);
-                                        // Count this file and all remaining 
files
-                                        // we will never open.
-                                        let done = 1 + self.file_iter.len();
-                                        self.file_stream_metrics
-                                            .files_processed
-                                            .add(done);
-                                        self.state = FileStreamState::Limit;
-                                        *remain = 0;
-                                        batch
-                                    }
-                                }
-                                None => batch,
-                            };
-                            
self.file_stream_metrics.time_scanning_total.start();
-                            return Poll::Ready(Some(Ok(batch)));
-                        }
-                        Some(Err(err)) => {
-                            self.file_stream_metrics.file_scan_errors.add(1);
-                            
self.file_stream_metrics.time_scanning_until_data.stop();
-                            
self.file_stream_metrics.time_scanning_total.stop();
-
-                            match self.on_error {
-                                OnError::Skip => {
-                                    
self.file_stream_metrics.files_processed.add(1);
-                                    self.state = FileStreamState::Idle;
-                                }
-                                OnError::Fail => {
-                                    self.state = FileStreamState::Error;
-                                    return Poll::Ready(Some(Err(err)));
-                                }
-                            }
+                FileStreamState::Scan { scan_state: queue } => {

Review Comment:
   moved the inner state machine into a separate module/struct to try and keep 
indenting under control and encapsualte the complexity somewhat



##########
datafusion/datasource/src/file_stream/mod.rs:
##########
@@ -904,4 +678,341 @@ mod tests {
         );
         assert!(err.contains("FileStreamBuilder invalid partition index: 1"));
     }
+
+    /// Verifies the simplest morsel-driven flow: one planner produces one

Review Comment:
   Here are tests showing the sequence of calls to the various morsel APIs. I 
intend to use this framework to show how work can migrate from one stream to 
the other



##########
datafusion/datasource/src/morsel/adapters.rs:
##########
@@ -0,0 +1,165 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::PartitionedFile;
+use crate::file_stream::FileOpener;
+use crate::morsel::{Morsel, MorselPlan, MorselPlanner, Morselizer};
+use arrow::array::RecordBatch;
+use datafusion_common::{DataFusionError, Result, internal_err};
+use futures::FutureExt;
+use futures::stream::BoxStream;
+use std::fmt::Debug;
+use std::sync::Arc;
+use std::sync::mpsc::{self, Receiver, TryRecvError};
+
+/// Adapt a legacy [`FileOpener`] to the morsel API.

Review Comment:
   This is an adapter so that existing `FileOpeners` continue to have the same 
behavior



##########
datafusion/datasource/src/file_stream/scan_state.rs:
##########
@@ -0,0 +1,261 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::collections::VecDeque;
+use std::task::{Context, Poll};
+
+use crate::PartitionedFile;
+use crate::morsel::{Morsel, MorselPlanner, Morselizer};
+use arrow::record_batch::RecordBatch;
+use datafusion_common::{DataFusionError, Result};
+use datafusion_physical_plan::metrics::ScopedTimerGuard;
+use futures::future::BoxFuture;
+use futures::stream::BoxStream;
+use futures::{FutureExt as _, StreamExt as _};
+
+use super::{FileStreamMetrics, OnError};
+
+/// Planner-owned asynchronous I/O discovered while planning a file.
+///
+/// Once `io_future` completes, `planner` becomes CPU-ready again and can be
+/// pushed back onto the scan queue for further planning.
+struct PendingOpen {
+    /// The planner to resume after the I/O completes.
+    planner: Box<dyn MorselPlanner>,
+    /// The outstanding I/O future for `planner`.
+    io_future: BoxFuture<'static, Result<()>>,
+}
+
+/// All mutable state for the active `FileStreamState::Scan` lifecycle.
+///
+/// This groups together ready planners, ready morsels, the active reader,
+/// pending planner I/O, the remaining files and limit, and the metrics
+/// associated with processing that work.
+pub(super) struct ScanState {

Review Comment:
   This is the new inner state machine for FileStream



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to