Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions datafusion/core/src/physical_planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ use datafusion_common::{
};
use datafusion_datasource::file_groups::FileGroup;
use datafusion_datasource::memory::MemorySourceConfig;
use datafusion_datasource::values::ValuesSource;
use datafusion_expr::dml::{CopyTo, InsertOp};
use datafusion_expr::expr::{
AggregateFunction, AggregateFunctionParams, Alias, GroupingSet, NullTreatment,
Expand Down Expand Up @@ -486,8 +487,7 @@ impl DefaultPhysicalPlanner {
.collect::<Result<Vec<Arc<dyn PhysicalExpr>>>>()
})
.collect::<Result<Vec<_>>>()?;
MemorySourceConfig::try_new_as_values(Arc::clone(schema.inner()), exprs)?
as _
ValuesSource::try_new_exec(Arc::clone(schema.inner()), exprs)?
}
LogicalPlan::EmptyRelation(EmptyRelation {
produce_one_row: false,
Expand Down
107 changes: 12 additions & 95 deletions datafusion/datasource/src/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,9 @@ use std::sync::Arc;
use crate::sink::DataSink;
use crate::source::{DataSource, DataSourceExec};

use arrow::array::{RecordBatch, RecordBatchOptions};
use arrow::datatypes::{Schema, SchemaRef};
use datafusion_common::{
Result, ScalarValue, assert_or_internal_err, plan_err, project_schema,
};
use arrow::array::RecordBatch;
use arrow::datatypes::SchemaRef;
use datafusion_common::{Result, assert_or_internal_err, plan_err, project_schema};
use datafusion_execution::TaskContext;
use datafusion_physical_expr::equivalence::project_orderings;
use datafusion_physical_expr::projection::ProjectionExprs;
Expand All @@ -42,8 +40,8 @@ use datafusion_physical_plan::projection::{
all_alias_free_columns, new_projections_for_columns,
};
use datafusion_physical_plan::{
ColumnarValue, DisplayAs, DisplayFormatType, Partitioning, PhysicalExpr,
SendableRecordBatchStream, Statistics, common,
DisplayAs, DisplayFormatType, Partitioning, SendableRecordBatchStream, Statistics,
common,
};

use async_trait::async_trait;
Expand Down Expand Up @@ -285,61 +283,6 @@ impl MemorySourceConfig {
Ok(DataSourceExec::from_data_source(source))
}

/// Create a new execution plan from a list of constant values (`ValuesExec`)
#[expect(clippy::needless_pass_by_value)]
pub fn try_new_as_values(
schema: SchemaRef,
data: Vec<Vec<Arc<dyn PhysicalExpr>>>,
) -> Result<Arc<DataSourceExec>> {
if data.is_empty() {
return plan_err!("Values list cannot be empty");
}

let n_row = data.len();
let n_col = schema.fields().len();

// We have this single row batch as a placeholder to satisfy evaluation argument
// and generate a single output row
let placeholder_schema = Arc::new(Schema::empty());
let placeholder_batch = RecordBatch::try_new_with_options(
Arc::clone(&placeholder_schema),
vec![],
&RecordBatchOptions::new().with_row_count(Some(1)),
)?;

// Evaluate each column
let arrays = (0..n_col)
.map(|j| {
(0..n_row)
.map(|i| {
let expr = &data[i][j];
let result = expr.evaluate(&placeholder_batch)?;

match result {
ColumnarValue::Scalar(scalar) => Ok(scalar),
ColumnarValue::Array(array) if array.len() == 1 => {
ScalarValue::try_from_array(&array, 0)
}
ColumnarValue::Array(_) => {
plan_err!("Cannot have array values in a values list")
}
}
})
.collect::<Result<Vec<_>>>()
.and_then(ScalarValue::iter_to_array)
})
.collect::<Result<Vec<_>>>()?;

let batch = RecordBatch::try_new_with_options(
Arc::clone(&schema),
arrays,
&RecordBatchOptions::new().with_row_count(Some(n_row)),
)?;

let partitions = vec![batch];
Self::try_new_from_batches(Arc::clone(&schema), partitions)
}

/// Create a new plan using the provided schema and batches.
///
/// Errors if any of the batches don't match the provided schema, or if no
Expand Down Expand Up @@ -845,12 +788,13 @@ mod memory_source_tests {
mod tests {
use super::*;
use crate::test_util::col;
use crate::tests::{aggr_test_schema, make_partition};
use crate::tests::make_partition;
use crate::values::ValuesSource;

use arrow::array::{ArrayRef, Int32Array, Int64Array, StringArray};
use arrow::datatypes::{DataType, Field};
use datafusion_common::assert_batches_eq;
use arrow::datatypes::{DataType, Field, Schema};
use datafusion_common::stats::{ColumnStatistics, Precision};
use datafusion_common::{ScalarValue, assert_batches_eq};
use datafusion_physical_expr::PhysicalSortExpr;
use datafusion_physical_plan::expressions::lit;

Expand Down Expand Up @@ -883,14 +827,6 @@ mod tests {
Ok(())
}

#[tokio::test]
async fn values_empty_case() -> Result<()> {
let schema = aggr_test_schema();
let empty = MemorySourceConfig::try_new_as_values(schema, vec![]);
assert!(empty.is_err());
Ok(())
}

#[test]
fn new_exec_with_batches() {
let batch = make_partition(7);
Expand Down Expand Up @@ -919,27 +855,6 @@ mod tests {
.unwrap_err();
}

// Test issue: https://github.com/apache/datafusion/issues/8763
#[test]
fn new_exec_with_non_nullable_schema() {
let schema = Arc::new(Schema::new(vec![Field::new(
"col0",
DataType::UInt32,
false,
)]));
let _ = MemorySourceConfig::try_new_as_values(
Arc::clone(&schema),
vec![vec![lit(1u32)]],
)
.unwrap();
// Test that a null value is rejected
let _ = MemorySourceConfig::try_new_as_values(
schema,
vec![vec![lit(ScalarValue::UInt32(None))]],
)
.unwrap_err();
}

#[test]
fn values_stats_with_nulls_only() -> Result<()> {
let data = vec![
Expand All @@ -950,7 +865,9 @@ mod tests {
let rows = data.len();
let schema =
Arc::new(Schema::new(vec![Field::new("col0", DataType::Null, true)]));
let values = MemorySourceConfig::try_new_as_values(schema, data)?;

let values = ValuesSource::try_new_exec(schema, data)?;
assert!(values.data_source().as_any().is::<MemorySourceConfig>());

assert_eq!(
values.partition_statistics(None)?,
Expand Down
1 change: 1 addition & 0 deletions datafusion/datasource/src/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ pub mod sink;
pub mod source;
mod statistics;
pub mod table_schema;
pub mod values;

#[cfg(test)]
pub mod test_util;
Expand Down
Loading