Skip to content

Commit

Permalink
sql-query-connector: take trace ids by reference
Browse files Browse the repository at this point in the history
This commit is limited to sql-query-connector, it should not interfere
with #3505.

In general, we do not need ownership of the trace id in
sql-query-connector, since we only re-render it in comments. Working
with a reference is easier (it ii `Copy`, etc.), and it already saves us
string copies and allocations with this commit.

The other purpose of this PR is that we discussed yesterday about
introducing a `QueryContext<'_>` type struct to hold things like the
trace id and connection info. Experience from designing similar context
structs in the schema team showed it is much easier to do if everything
in the context can be a reference.

On the side, I could not resist making a few public items crate-public,
to make the public surface of the crate smaller and clearer.
  • Loading branch information
tomhoule committed Jan 6, 2023
1 parent 2a694b5 commit ad36b5f
Show file tree
Hide file tree
Showing 12 changed files with 118 additions and 128 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ where
filter,
&selected_fields.into(),
aggr_selections,
trace_id,
trace_id.as_deref(),
)
.await
})
Expand All @@ -118,7 +118,7 @@ where
&selected_fields.into(),
aggr_selections,
SqlInfo::from(&self.connection_info),
trace_id,
trace_id.as_deref(),
)
.await
})
Expand All @@ -132,7 +132,7 @@ where
trace_id: Option<String>,
) -> connector::Result<Vec<(SelectionResult, SelectionResult)>> {
catch(self.connection_info.clone(), async move {
read::get_related_m2m_record_ids(&self.inner, from_field, from_record_ids, trace_id).await
read::get_related_m2m_record_ids(&self.inner, from_field, from_record_ids, trace_id.as_deref()).await
})
.await
}
Expand All @@ -154,7 +154,7 @@ where
selections,
group_by,
having,
trace_id,
trace_id.as_deref(),
)
.await
})
Expand All @@ -174,7 +174,14 @@ where
trace_id: Option<String>,
) -> connector::Result<SelectionResult> {
catch(self.connection_info.clone(), async move {
write::create_record(&self.inner, &self.connection_info.sql_family(), model, args, trace_id).await
write::create_record(
&self.inner,
&self.connection_info.sql_family(),
model,
args,
trace_id.as_deref(),
)
.await
})
.await
}
Expand All @@ -193,7 +200,7 @@ where
model,
args,
skip_duplicates,
trace_id,
trace_id.as_deref(),
)
.await
})
Expand All @@ -208,7 +215,7 @@ where
trace_id: Option<String>,
) -> connector::Result<usize> {
catch(self.connection_info.clone(), async move {
write::update_records(&self.inner, model, record_filter, args, trace_id).await
write::update_records(&self.inner, model, record_filter, args, trace_id.as_deref()).await
})
.await
}
Expand All @@ -221,7 +228,7 @@ where
trace_id: Option<String>,
) -> connector::Result<Option<SelectionResult>> {
catch(self.connection_info.clone(), async move {
let mut res = write::update_record(&self.inner, model, record_filter, args, trace_id).await?;
let mut res = write::update_record(&self.inner, model, record_filter, args, trace_id.as_deref()).await?;
Ok(res.pop())
})
.await
Expand All @@ -234,7 +241,7 @@ where
trace_id: Option<String>,
) -> connector::Result<usize> {
catch(self.connection_info.clone(), async move {
write::delete_records(&self.inner, model, record_filter, trace_id).await
write::delete_records(&self.inner, model, record_filter, trace_id.as_deref()).await
})
.await
}
Expand All @@ -245,7 +252,7 @@ where
trace_id: Option<String>,
) -> connector::Result<SingleRecord> {
catch(self.connection_info.clone(), async move {
native_upsert(&self.inner, upsert, trace_id).await
native_upsert(&self.inner, upsert, trace_id.as_deref()).await
})
.await
}
Expand All @@ -270,7 +277,7 @@ where
trace_id: Option<String>,
) -> connector::Result<()> {
catch(self.connection_info.clone(), async move {
write::m2m_disconnect(&self.inner, field, parent_id, child_ids, trace_id).await
write::m2m_disconnect(&self.inner, field, parent_id, child_ids, trace_id.as_deref()).await
})
.await
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,15 @@ use futures::stream::{FuturesUnordered, StreamExt};
use prisma_models::*;
use quaint::ast::*;

pub async fn get_single_record(
pub(crate) async fn get_single_record(
conn: &dyn QueryExt,
model: &ModelRef,
filter: &Filter,
selected_fields: &ModelProjection,
aggr_selections: &[RelAggregationSelection],
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<Option<SingleRecord>> {
let query = read::get_records(
model,
selected_fields.as_columns(),
aggr_selections,
filter,
trace_id.clone(),
);
let query = read::get_records(model, selected_fields.as_columns(), aggr_selections, filter, trace_id);

let mut field_names: Vec<_> = selected_fields.db_names().collect();
let mut aggr_field_names: Vec<_> = aggr_selections.iter().map(|aggr_sel| aggr_sel.db_alias()).collect();
Expand Down Expand Up @@ -61,7 +55,7 @@ pub async fn get_many_records(
selected_fields: &ModelProjection,
aggr_selections: &[RelAggregationSelection],
sql_info: SqlInfo,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<ManyRecords> {
let reversed = query_arguments.needs_reversed_order();

Expand Down Expand Up @@ -112,15 +106,9 @@ pub async fn get_many_records(
let mut futures = FuturesUnordered::new();

for args in batches.into_iter() {
let query = read::get_records(
model,
selected_fields.as_columns(),
aggr_selections,
args,
trace_id.clone(),
);

futures.push(conn.filter(query.into(), meta.as_slice(), trace_id.clone()));
let query = read::get_records(model, selected_fields.as_columns(), aggr_selections, args, trace_id);

futures.push(conn.filter(query.into(), meta.as_slice(), trace_id));
}

while let Some(result) = futures.next().await {
Expand All @@ -139,7 +127,7 @@ pub async fn get_many_records(
selected_fields.as_columns(),
aggr_selections,
query_arguments,
trace_id.clone(),
trace_id,
);

for item in conn.filter(query.into(), meta.as_slice(), trace_id).await?.into_iter() {
Expand All @@ -159,7 +147,7 @@ pub async fn get_related_m2m_record_ids(
conn: &dyn QueryExt,
from_field: &RelationFieldRef,
from_record_ids: &[SelectionResult],
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<Vec<(SelectionResult, SelectionResult)>> {
let mut idents = vec![];
idents.extend(ModelProjection::from(from_field.model().primary_identifier()).type_identifiers_with_arities());
Expand Down Expand Up @@ -231,7 +219,7 @@ pub async fn aggregate(
selections: Vec<AggregationSelection>,
group_by: Vec<ScalarFieldRef>,
having: Option<Filter>,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<Vec<AggregationRow>> {
if !group_by.is_empty() {
group_by_aggregate(conn, model, query_arguments, selections, group_by, having, trace_id).await
Expand All @@ -247,9 +235,9 @@ async fn plain_aggregate(
model: &ModelRef,
query_arguments: QueryArguments,
selections: Vec<AggregationSelection>,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<Vec<AggregationResult>> {
let query = read::aggregate(model, &selections, query_arguments, trace_id.clone());
let query = read::aggregate(model, &selections, query_arguments, trace_id);

let idents: Vec<_> = selections
.iter()
Expand All @@ -274,9 +262,9 @@ async fn group_by_aggregate(
selections: Vec<AggregationSelection>,
group_by: Vec<ScalarFieldRef>,
having: Option<Filter>,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<Vec<AggregationRow>> {
let query = read::group_by_aggregate(model, query_arguments, &selections, group_by, having, trace_id.clone());
let query = read::group_by_aggregate(model, query_arguments, &selections, group_by, having, trace_id);

let idents: Vec<_> = selections
.iter()
Expand Down
Original file line number Diff line number Diff line change
@@ -1,20 +1,19 @@
use connector_interface::NativeUpsert;
use prisma_models::{ModelProjection, Record, SingleRecord};
use quaint::prelude::{OnConflict, Query};

use crate::{
column_metadata,
filter_conversion::AliasedCondition,
model_extensions::AsColumns,
query_builder::{build_update_and_set_query, create_record},
query_builder::write::{build_update_and_set_query, create_record},
query_ext::QueryExt,
row::ToSqlRow,
};
use connector_interface::NativeUpsert;
use prisma_models::{ModelProjection, Record, SingleRecord};
use quaint::prelude::{OnConflict, Query};

pub async fn native_upsert(
pub(crate) async fn native_upsert(
conn: &dyn QueryExt,
upsert: NativeUpsert,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<SingleRecord> {
let selected_fields: ModelProjection = upsert.selected_fields().into();
let field_names: Vec<_> = selected_fields.db_names().collect();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ use user_facing_errors::query_engine::DatabaseConstraint;
async fn generate_id(
conn: &dyn QueryExt,
primary_key: &FieldSelection,
trace_id: Option<String>,
trace_id: Option<&str>,
args: &WriteArgs,
) -> crate::Result<Option<SelectionResult>> {
// Go through all the values and generate a select statement with the correct MySQL function
Expand Down Expand Up @@ -67,12 +67,12 @@ pub async fn create_record(
sql_family: &SqlFamily,
model: &ModelRef,
mut args: WriteArgs,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<SelectionResult> {
let pk = model.primary_identifier();

let returned_id = if *sql_family == SqlFamily::Mysql {
generate_id(conn, &pk, trace_id.clone(), &args).await?
generate_id(conn, &pk, trace_id, &args).await?
} else {
args.as_record_projection(pk.clone().into())
};
Expand Down Expand Up @@ -159,7 +159,7 @@ pub async fn create_records(
model: &ModelRef,
args: Vec<WriteArgs>,
skip_duplicates: bool,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<usize> {
if args.is_empty() {
return Ok(0);
Expand Down Expand Up @@ -199,7 +199,7 @@ async fn create_many_nonempty(
args: Vec<WriteArgs>,
skip_duplicates: bool,
affected_fields: HashSet<ScalarFieldRef>,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<usize> {
let batches = if let Some(max_params) = sql_info.max_bind_values {
// We need to split inserts if they are above a parameter threshold, as well as split based on number of rows.
Expand Down Expand Up @@ -273,7 +273,7 @@ async fn create_many_nonempty(

let mut count = 0;
for batch in partitioned_batches {
let stmt = write::create_records_nonempty(model, batch, skip_duplicates, &affected_fields, trace_id.clone());
let stmt = write::create_records_nonempty(model, batch, skip_duplicates, &affected_fields, trace_id);
count += conn.execute(stmt.into()).await?;
}

Expand All @@ -286,7 +286,7 @@ async fn create_many_empty(
model: &ModelRef,
num_records: usize,
skip_duplicates: bool,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<usize> {
let stmt = write::create_records_empty(model, skip_duplicates, trace_id);
let mut count = 0;
Expand All @@ -306,16 +306,14 @@ pub async fn update_record(
model: &ModelRef,
record_filter: RecordFilter,
args: WriteArgs,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<Vec<SelectionResult>> {
let id_args = pick_args(&model.primary_identifier().into(), &args);

// This is to match the behaviour expected but it seems a bit strange to me
// This comes across as if the update happened even if it didn't
if args.args.is_empty() {
let ids: Vec<SelectionResult> = conn
.filter_selectors(model, record_filter.clone(), trace_id.clone())
.await?;
let ids: Vec<SelectionResult> = conn.filter_selectors(model, record_filter.clone(), trace_id).await?;

return Ok(ids);
}
Expand All @@ -332,10 +330,10 @@ async fn update_records_from_ids_and_filter(
model: &ModelRef,
record_filter: RecordFilter,
args: WriteArgs,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<(usize, Vec<SelectionResult>)> {
let filter_condition = record_filter.clone().filter.aliased_condition_from(None, false);
let ids: Vec<SelectionResult> = conn.filter_selectors(model, record_filter, trace_id.clone()).await?;
let ids: Vec<SelectionResult> = conn.filter_selectors(model, record_filter, trace_id).await?;

if ids.is_empty() {
return Ok((0, Vec::new()));
Expand Down Expand Up @@ -365,7 +363,7 @@ async fn update_records_from_filter(
model: &ModelRef,
record_filter: RecordFilter,
args: WriteArgs,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<usize> {
let update = build_update_and_set_query(model, args, trace_id);
let filter_condition = record_filter.clone().filter.aliased_condition_from(None, false);
Expand All @@ -385,7 +383,7 @@ pub async fn update_records(
model: &ModelRef,
record_filter: RecordFilter,
args: WriteArgs,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<usize> {
if args.args.is_empty() {
return Ok(0);
Expand All @@ -400,14 +398,14 @@ pub async fn update_records(
}

/// Delete multiple records in `conn`, defined in the `Filter`. Result is the number of items deleted.
pub async fn delete_records(
pub(crate) async fn delete_records(
conn: &dyn QueryExt,
model: &ModelRef,
record_filter: RecordFilter,
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<usize> {
let filter_condition = record_filter.clone().filter.aliased_condition_from(None, false);
let ids = conn.filter_selectors(model, record_filter, trace_id.clone()).await?;
let ids = conn.filter_selectors(model, record_filter, trace_id).await?;
let ids: Vec<&SelectionResult> = ids.iter().collect();
let count = ids.len();

Expand All @@ -428,7 +426,7 @@ pub async fn delete_records(

/// Connect relations defined in `child_ids` to a parent defined in `parent_id`.
/// The relation information is in the `RelationFieldRef`.
pub async fn m2m_connect(
pub(crate) async fn m2m_connect(
conn: &dyn QueryExt,
field: &RelationFieldRef,
parent_id: &SelectionResult,
Expand All @@ -442,12 +440,12 @@ pub async fn m2m_connect(

/// Disconnect relations defined in `child_ids` to a parent defined in `parent_id`.
/// The relation information is in the `RelationFieldRef`.
pub async fn m2m_disconnect(
pub(crate) async fn m2m_disconnect(
conn: &dyn QueryExt,
field: &RelationFieldRef,
parent_id: &SelectionResult,
child_ids: &[SelectionResult],
trace_id: Option<String>,
trace_id: Option<&str>,
) -> crate::Result<()> {
let query = write::delete_relation_table_records(field, parent_id, child_ids, trace_id);
conn.delete(query).await?;
Expand All @@ -457,7 +455,7 @@ pub async fn m2m_disconnect(

/// Execute a plain SQL query with the given parameters, returning the number of
/// affected rows.
pub async fn execute_raw(
pub(crate) async fn execute_raw(
conn: &dyn QueryExt,
features: psl::PreviewFeatures,
inputs: HashMap<String, PrismaValue>,
Expand Down
Loading

0 comments on commit ad36b5f

Please sign in to comment.