Skip to content

Commit 049df20

Browse files
committed
store: Make write failurs easier to debug
1 parent 89cb4d2 commit 049df20

File tree

5 files changed

+47
-9
lines changed

5 files changed

+47
-9
lines changed

graph/src/components/store/write.rs

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -933,6 +933,17 @@ impl<'a> WriteChunk<'a> {
933933
count: 0,
934934
}
935935
}
936+
937+
pub fn as_vec(&self) -> Vec<Self> {
938+
(0..self.len())
939+
.into_iter()
940+
.map(|position| WriteChunk {
941+
group: self.group,
942+
chunk_size: 1,
943+
position: self.position + position,
944+
})
945+
.collect()
946+
}
936947
}
937948

938949
impl<'a> IntoIterator for &WriteChunk<'a> {

store/postgres/src/deployment_store.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -346,7 +346,7 @@ impl DeploymentStore {
346346
section.end();
347347

348348
let section = stopwatch.start_section("apply_entity_modifications_insert");
349-
layout.insert(conn, group, stopwatch).await?;
349+
layout.insert(&self.logger, conn, group, stopwatch).await?;
350350
section.end();
351351
}
352352

store/postgres/src/relational.rs

Lines changed: 33 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -734,6 +734,7 @@ impl Layout {
734734

735735
pub async fn insert<'a>(
736736
&'a self,
737+
logger: &Logger,
737738
conn: &mut AsyncPgConnection,
738739
group: &'a RowGroup,
739740
stopwatch: &StopwatchMetrics,
@@ -767,13 +768,39 @@ impl Layout {
767768
for chunk in group.write_chunks(chunk_size) {
768769
// Empty chunks would lead to invalid SQL
769770
if !chunk.is_empty() {
770-
InsertQuery::new(table, &chunk)?
771-
.execute(conn)
772-
.await
773-
.map_err(|e| {
771+
if let Err(e) = InsertQuery::new(table, &chunk)?.execute(conn).await {
772+
// We occasionally get these errors but it's entirely
773+
// unclear what causes them. We work around that by
774+
// switching to row-by-row inserts until we can figure
775+
// out what the underlying cause is
776+
let err_msg = e.to_string();
777+
if !err_msg.contains("value too large to transmit") {
774778
let (block, msg) = chunk_details(&chunk);
775-
StoreError::write_failure(e, table.object.as_str(), block, msg)
776-
})?;
779+
return Err(StoreError::write_failure(
780+
e,
781+
table.object.as_str(),
782+
block,
783+
msg,
784+
));
785+
}
786+
let (block, msg) = chunk_details(&chunk);
787+
warn!(logger, "Insert of entire chunk failed. Trying row by row insert.";
788+
"table" => table.object.as_str(),
789+
"block" => block,
790+
"error" => err_msg,
791+
"details" => msg
792+
);
793+
for single_chunk in chunk.as_vec() {
794+
InsertQuery::new(table, &single_chunk)?
795+
.execute(conn)
796+
.await
797+
.map_err(|e| {
798+
let (block, msg) = chunk_details(&single_chunk);
799+
let msg = format!("{}: offending row {:?}", msg, single_chunk);
800+
StoreError::write_failure(e, table.object.as_str(), block, msg)
801+
})?;
802+
}
803+
}
777804
}
778805
}
779806
Ok(())

store/test-store/tests/postgres/relational.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,7 @@ async fn insert_entity_at(
271271
);
272272
let group = row_group_insert(&entity_type, block, entities_with_keys_owned.clone());
273273
layout
274-
.insert(conn, &group, &MOCK_STOPWATCH)
274+
.insert(&LOGGER, conn, &group, &MOCK_STOPWATCH)
275275
.await
276276
.expect(&errmsg);
277277
assert_eq!(

store/test-store/tests/postgres/relational_bytes.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ async fn insert_entity(
134134
let group = row_group_insert(&entity_type, 0, entities);
135135
let errmsg = format!("Failed to insert entity {}[{}]", entity_type, key.entity_id);
136136
layout
137-
.insert(conn, &group, &MOCK_STOPWATCH)
137+
.insert(&LOGGER, conn, &group, &MOCK_STOPWATCH)
138138
.await
139139
.expect(&errmsg);
140140
}

0 commit comments

Comments
 (0)