From d919a908da85b5703c07ffb5e0c9bd825240901a Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 10:59:50 -0500 Subject: [PATCH 01/25] Add list metadata repository to wp_mobile_cache Introduces database infrastructure for tracking list metadata, enabling efficient cache invalidation and stale detection for paginated lists. Changes: - Add migration for list metadata tables - Add DbListMetadata types for database operations - Add ListMetadata domain types - Add ListMetadataRepository with full CRUD operations - Update PostsRepository with stale detection helpers Extracted from prototype/metadata-collection branch. Original commits: - https://github.com/Automattic/wordpress-rs/commit/14b01d80 (Implement stale detection by comparing modified_gmt timestamps) - https://github.com/Automattic/wordpress-rs/commit/e47cec89 (Add database foundation for MetadataService) - https://github.com/Automattic/wordpress-rs/commit/2440a13f (Add list metadata repository concurrency helpers) - https://github.com/Automattic/wordpress-rs/commit/cc0c8a58 (Reset stale fetching states on app launch) - https://github.com/Automattic/wordpress-rs/commit/d64142fb (Split collection observers for data vs state updates) - https://github.com/Automattic/wordpress-rs/commit/fe7435c9 (make fmt-rust) - https://github.com/Automattic/wordpress-rs/commit/2918b339 (Add parent and menu_order fields to list metadata items) - https://github.com/Automattic/wordpress-rs/commit/25f88a49 (Rename last_updated_at to last_fetched_at in list metadata) - https://github.com/Automattic/wordpress-rs/commit/1d709e70 (Simplify ListMetadataReader trait with combined ListInfo query) Note: Since we use a rebase/squash merge strategy, these commits may show "does not belong to any branch" on GitHub but remain accessible via URL. --- .../0007-create-list-metadata-tables.sql | 46 + wp_mobile_cache/src/db_types.rs | 1 + .../src/db_types/db_list_metadata.rs | 166 ++ wp_mobile_cache/src/lib.rs | 107 +- wp_mobile_cache/src/list_metadata.rs | 131 ++ .../src/repository/list_metadata.rs | 1366 +++++++++++++++++ wp_mobile_cache/src/repository/mod.rs | 1 + wp_mobile_cache/src/repository/posts.rs | 58 +- 8 files changed, 1873 insertions(+), 3 deletions(-) create mode 100644 wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql create mode 100644 wp_mobile_cache/src/db_types/db_list_metadata.rs create mode 100644 wp_mobile_cache/src/list_metadata.rs create mode 100644 wp_mobile_cache/src/repository/list_metadata.rs diff --git a/wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql b/wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql new file mode 100644 index 000000000..e69a7eb4b --- /dev/null +++ b/wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql @@ -0,0 +1,46 @@ +-- Table 1: List header/pagination info +CREATE TABLE `list_metadata` ( + `rowid` INTEGER PRIMARY KEY AUTOINCREMENT, + `db_site_id` INTEGER NOT NULL, + `key` TEXT NOT NULL, -- e.g., "edit:posts:publish" + `total_pages` INTEGER, + `total_items` INTEGER, + `current_page` INTEGER NOT NULL DEFAULT 0, + `per_page` INTEGER NOT NULL DEFAULT 20, + `last_first_page_fetched_at` TEXT, + `last_fetched_at` TEXT, + `version` INTEGER NOT NULL DEFAULT 0, + + FOREIGN KEY (db_site_id) REFERENCES db_sites(id) ON DELETE CASCADE +) STRICT; + +CREATE UNIQUE INDEX idx_list_metadata_unique_key ON list_metadata(db_site_id, key); + +-- Table 2: List items (rowid = insertion order = display order) +CREATE TABLE `list_metadata_items` ( + `rowid` INTEGER PRIMARY KEY AUTOINCREMENT, + `db_site_id` INTEGER NOT NULL, + `key` TEXT NOT NULL, + `entity_id` INTEGER NOT NULL, -- post/comment/etc ID + `modified_gmt` TEXT, -- nullable for entities without it + `parent` INTEGER, -- parent post ID (for hierarchical post types like pages) + `menu_order` INTEGER, -- menu order (for hierarchical post types) + + FOREIGN KEY (db_site_id) REFERENCES db_sites(id) ON DELETE CASCADE +) STRICT; + +CREATE INDEX idx_list_metadata_items_key ON list_metadata_items(db_site_id, key); +CREATE INDEX idx_list_metadata_items_entity ON list_metadata_items(db_site_id, entity_id); + +-- Table 3: Sync state (FK to list_metadata, not duplicating key) +CREATE TABLE `list_metadata_state` ( + `rowid` INTEGER PRIMARY KEY AUTOINCREMENT, + `list_metadata_id` INTEGER NOT NULL, + `state` TEXT NOT NULL DEFAULT 'idle', -- idle, fetching_first_page, fetching_next_page, error + `error_message` TEXT, + `updated_at` TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), + + FOREIGN KEY (list_metadata_id) REFERENCES list_metadata(rowid) ON DELETE CASCADE +) STRICT; + +CREATE UNIQUE INDEX idx_list_metadata_state_unique ON list_metadata_state(list_metadata_id); diff --git a/wp_mobile_cache/src/db_types.rs b/wp_mobile_cache/src/db_types.rs index 0a3874799..eabc434ff 100644 --- a/wp_mobile_cache/src/db_types.rs +++ b/wp_mobile_cache/src/db_types.rs @@ -1,3 +1,4 @@ +pub mod db_list_metadata; pub mod db_site; pub mod db_term_relationship; pub mod helpers; diff --git a/wp_mobile_cache/src/db_types/db_list_metadata.rs b/wp_mobile_cache/src/db_types/db_list_metadata.rs new file mode 100644 index 000000000..0fe12e00c --- /dev/null +++ b/wp_mobile_cache/src/db_types/db_list_metadata.rs @@ -0,0 +1,166 @@ +use crate::{ + SqliteDbError, + db_types::row_ext::{ColumnIndex, RowExt}, + list_metadata::{ + DbListHeaderWithState, DbListMetadata, DbListMetadataItem, DbListMetadataState, ListState, + }, +}; +use rusqlite::Row; + +/// Column indexes for list_metadata table. +/// These must match the order of columns in the CREATE TABLE statement. +#[repr(usize)] +#[derive(Debug, Clone, Copy)] +pub enum ListMetadataColumn { + Rowid = 0, + DbSiteId = 1, + Key = 2, + TotalPages = 3, + TotalItems = 4, + CurrentPage = 5, + PerPage = 6, + LastFirstPageFetchedAt = 7, + LastFetchedAt = 8, + Version = 9, +} + +impl ColumnIndex for ListMetadataColumn { + fn as_index(&self) -> usize { + *self as usize + } +} + +impl DbListMetadata { + /// Construct a list metadata entity from a database row. + pub fn from_row(row: &Row) -> Result { + use ListMetadataColumn as Col; + + Ok(Self { + row_id: row.get_column(Col::Rowid)?, + db_site_id: row.get_column(Col::DbSiteId)?, + key: row.get_column(Col::Key)?, + total_pages: row.get_column(Col::TotalPages)?, + total_items: row.get_column(Col::TotalItems)?, + current_page: row.get_column(Col::CurrentPage)?, + per_page: row.get_column(Col::PerPage)?, + last_first_page_fetched_at: row.get_column(Col::LastFirstPageFetchedAt)?, + last_fetched_at: row.get_column(Col::LastFetchedAt)?, + version: row.get_column(Col::Version)?, + }) + } +} + +/// Column indexes for list_metadata_items table. +/// These must match the order of columns in the CREATE TABLE statement. +#[repr(usize)] +#[derive(Debug, Clone, Copy)] +pub enum ListMetadataItemColumn { + Rowid = 0, + DbSiteId = 1, + Key = 2, + EntityId = 3, + ModifiedGmt = 4, + Parent = 5, + MenuOrder = 6, +} + +impl ColumnIndex for ListMetadataItemColumn { + fn as_index(&self) -> usize { + *self as usize + } +} + +impl DbListMetadataItem { + /// Construct a list metadata item from a database row. + pub fn from_row(row: &Row) -> Result { + use ListMetadataItemColumn as Col; + + Ok(Self { + row_id: row.get_column(Col::Rowid)?, + db_site_id: row.get_column(Col::DbSiteId)?, + key: row.get_column(Col::Key)?, + entity_id: row.get_column(Col::EntityId)?, + modified_gmt: row.get_column(Col::ModifiedGmt)?, + parent: row.get_column(Col::Parent)?, + menu_order: row.get_column(Col::MenuOrder)?, + }) + } +} + +/// Column indexes for list_metadata_state table. +/// These must match the order of columns in the CREATE TABLE statement. +#[repr(usize)] +#[derive(Debug, Clone, Copy)] +pub enum ListMetadataStateColumn { + Rowid = 0, + ListMetadataId = 1, + State = 2, + ErrorMessage = 3, + UpdatedAt = 4, +} + +impl ColumnIndex for ListMetadataStateColumn { + fn as_index(&self) -> usize { + *self as usize + } +} + +impl DbListMetadataState { + /// Construct a list metadata state from a database row. + pub fn from_row(row: &Row) -> Result { + use ListMetadataStateColumn as Col; + + let state_str: String = row.get_column(Col::State)?; + + Ok(Self { + row_id: row.get_column(Col::Rowid)?, + list_metadata_id: row.get_column(Col::ListMetadataId)?, + state: ListState::from(state_str), + error_message: row.get_column(Col::ErrorMessage)?, + updated_at: row.get_column(Col::UpdatedAt)?, + }) + } +} + +/// Column indexes for the header + state JOIN query. +/// +/// Query: SELECT m.total_pages, m.total_items, m.current_page, m.per_page, s.state, s.error_message +/// FROM list_metadata m LEFT JOIN list_metadata_state s ON ... +#[repr(usize)] +#[derive(Debug, Clone, Copy)] +pub enum ListHeaderWithStateColumn { + TotalPages = 0, + TotalItems = 1, + CurrentPage = 2, + PerPage = 3, + State = 4, + ErrorMessage = 5, +} + +impl ColumnIndex for ListHeaderWithStateColumn { + fn as_index(&self) -> usize { + *self as usize + } +} + +impl DbListHeaderWithState { + /// Construct from a JOIN query row. + /// + /// Expects columns in order: total_pages, total_items, current_page, per_page, state, error_message + pub fn from_row(row: &Row) -> Result { + use ListHeaderWithStateColumn as Col; + + // state is nullable due to LEFT JOIN - default to "idle" + let state_str: Option = row.get_column(Col::State)?; + let state = state_str.map(ListState::from).unwrap_or(ListState::Idle); + + Ok(Self { + state, + error_message: row.get_column(Col::ErrorMessage)?, + current_page: row.get_column(Col::CurrentPage)?, + total_pages: row.get_column(Col::TotalPages)?, + total_items: row.get_column(Col::TotalItems)?, + per_page: row.get_column(Col::PerPage)?, + }) + } +} diff --git a/wp_mobile_cache/src/lib.rs b/wp_mobile_cache/src/lib.rs index cff397785..0d1c1dbd2 100644 --- a/wp_mobile_cache/src/lib.rs +++ b/wp_mobile_cache/src/lib.rs @@ -6,6 +6,7 @@ use std::sync::Mutex; pub mod context; pub mod db_types; pub mod entity; +pub mod list_metadata; pub mod repository; pub mod term_relationships; @@ -64,6 +65,12 @@ pub enum DbTable { DbSites, /// Term relationships (post-category, post-tag associations) TermRelationships, + /// List metadata headers (pagination, version) + ListMetadata, + /// List metadata items (entity IDs with ordering) + ListMetadataItems, + /// List metadata sync state (idle, fetching, error) + ListMetadataState, } impl DbTable { @@ -79,6 +86,9 @@ impl DbTable { DbTable::SelfHostedSites => "self_hosted_sites", DbTable::DbSites => "db_sites", DbTable::TermRelationships => "term_relationships", + DbTable::ListMetadata => "list_metadata", + DbTable::ListMetadataItems => "list_metadata_items", + DbTable::ListMetadataState => "list_metadata_state", } } } @@ -107,6 +117,9 @@ impl TryFrom<&str> for DbTable { "self_hosted_sites" => Ok(DbTable::SelfHostedSites), "db_sites" => Ok(DbTable::DbSites), "term_relationships" => Ok(DbTable::TermRelationships), + "list_metadata" => Ok(DbTable::ListMetadata), + "list_metadata_items" => Ok(DbTable::ListMetadataItems), + "list_metadata_state" => Ok(DbTable::ListMetadataState), _ => Err(DbTableError::UnknownTable(table_name.to_string())), } } @@ -249,7 +262,13 @@ impl WpApiCache { pub fn perform_migrations(&self) -> Result { self.execute(|connection| { let mut mgr = MigrationManager::new(connection)?; - mgr.perform_migrations().map_err(SqliteDbError::from) + let version = mgr.perform_migrations().map_err(SqliteDbError::from)?; + + // Reset stale fetching states after migrations complete. + // See `reset_stale_fetching_states` for rationale. + Self::reset_stale_fetching_states_internal(connection); + + Ok(version) }) } @@ -288,6 +307,89 @@ impl WpApiCache { } impl WpApiCache { + /// Resets stale fetching states (`FetchingFirstPage`, `FetchingNextPage`) to `Idle`. + /// + /// # Why This Is Needed + /// + /// The `ListState` enum includes transient states that represent in-progress operations: + /// - `FetchingFirstPage` - A refresh/pull-to-refresh is in progress + /// - `FetchingNextPage` - A "load more" pagination fetch is in progress + /// + /// If the app is killed, crashes, or the process terminates while a fetch is in progress, + /// these states will persist in the database. On the next app launch: + /// - UI might show a perpetual loading indicator + /// - New fetch attempts might be blocked if code checks "already fetching" + /// - State is inconsistent with reality (no fetch is actually in progress) + /// + /// # Why We Reset in `WpApiCache` Initialization + /// + /// We considered several approaches: + /// + /// 1. **Reset in `MetadataService::new()`** - Rejected because `MetadataService` is not + /// a singleton. Multiple services (PostService, CommentService, etc.) each create + /// their own `MetadataService` instance. Resetting on each instantiation would + /// incorrectly reset states when a new service is created mid-session. + /// + /// 2. **Reset in `WpApiCache` initialization** (this approach) - Chosen because + /// `WpApiCache` is typically created once at app startup, making it the right + /// timing for session-boundary cleanup. + /// + /// 3. **Session tokens** - More complex: tag states with a session ID and treat + /// mismatched sessions as stale. Adds schema complexity for minimal benefit. + /// + /// 4. **In-memory only for fetching states** - Keep transient states in memory, + /// only persist `Idle`/`Error`. Adds complexity in state management. + /// + /// # Theoretical Issues + /// + /// This approach assumes `WpApiCache` is created once per app session. If an app + /// architecture creates multiple `WpApiCache` instances during a session (e.g., + /// recreating it after a user logs out and back in), this would reset in-progress + /// fetches. In practice: + /// - Most apps create `WpApiCache` once at startup + /// - If your architecture differs, consider wrapping this in a "first launch" check + /// or using a session token approach + /// + /// # Note on `Error` State + /// + /// We intentionally do NOT reset `Error` states. These represent completed (failed) + /// operations, not in-progress ones. Preserving them allows: + /// - UI to show "last sync failed" on launch + /// - Debugging by inspecting `error_message` + /// + /// If you need a fresh start, the user can trigger a refresh which will overwrite + /// the error state. + fn reset_stale_fetching_states_internal(connection: &mut Connection) { + use crate::list_metadata::ListState; + + // Reset both fetching states to idle + let result = connection.execute( + "UPDATE list_metadata_state SET state = ?1 WHERE state IN (?2, ?3)", + params![ + ListState::Idle.as_db_str(), + ListState::FetchingFirstPage.as_db_str(), + ListState::FetchingNextPage.as_db_str(), + ], + ); + + match result { + Ok(count) if count > 0 => { + eprintln!( + "WpApiCache: Reset {} stale fetching state(s) from previous session", + count + ); + } + Ok(_) => { + // No stale states found - normal case + } + Err(e) => { + // Log but don't fail - table might not exist yet on fresh install + // (though we run this after migrations, so it should exist) + eprintln!("WpApiCache: Failed to reset stale fetching states: {}", e); + } + } + } + /// Execute a database operation with scoped access to the connection. /// /// This is the **only** way to access the database. The provided closure @@ -350,13 +452,14 @@ impl From for WpApiCache { } } -static MIGRATION_QUERIES: [&str; 6] = [ +static MIGRATION_QUERIES: [&str; 7] = [ include_str!("../migrations/0001-create-sites-table.sql"), include_str!("../migrations/0002-create-posts-table.sql"), include_str!("../migrations/0003-create-term-relationships.sql"), include_str!("../migrations/0004-create-posts-view-context-table.sql"), include_str!("../migrations/0005-create-posts-embed-context-table.sql"), include_str!("../migrations/0006-create-self-hosted-sites-table.sql"), + include_str!("../migrations/0007-create-list-metadata-tables.sql"), ]; pub struct MigrationManager<'a> { diff --git a/wp_mobile_cache/src/list_metadata.rs b/wp_mobile_cache/src/list_metadata.rs new file mode 100644 index 000000000..616b89205 --- /dev/null +++ b/wp_mobile_cache/src/list_metadata.rs @@ -0,0 +1,131 @@ +use crate::RowId; + +/// Represents list metadata header in the database. +/// +/// Stores pagination info and version for a specific list (e.g., "edit:posts:publish"). +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DbListMetadata { + /// SQLite rowid of this list metadata + pub row_id: RowId, + /// Database site ID (rowid from sites table) + pub db_site_id: RowId, + /// List key (e.g., "edit:posts:publish") + pub key: String, + /// Total number of pages from API response + pub total_pages: Option, + /// Total number of items from API response + pub total_items: Option, + /// Current page that has been loaded (0 = no pages loaded) + pub current_page: i64, + /// Items per page + pub per_page: i64, + /// ISO 8601 timestamp of when page 1 was last fetched + pub last_first_page_fetched_at: Option, + /// ISO 8601 timestamp of when any page was last fetched + pub last_fetched_at: Option, + /// Version number, incremented on page 1 refresh for concurrency control + pub version: i64, +} + +/// Represents a single item in a list metadata collection. +/// +/// Items are ordered by rowid (insertion order = display order). +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DbListMetadataItem { + /// SQLite rowid (determines display order) + pub row_id: RowId, + /// Database site ID + pub db_site_id: RowId, + /// List key this item belongs to + pub key: String, + /// Entity ID (post ID, comment ID, etc.) + pub entity_id: i64, + /// Last modified timestamp (for staleness detection) + pub modified_gmt: Option, + /// Parent entity ID (for hierarchical post types like pages) + pub parent: Option, + /// Menu order (for hierarchical post types) + pub menu_order: Option, +} + +/// Represents sync state for a list metadata. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DbListMetadataState { + /// SQLite rowid + pub row_id: RowId, + /// Foreign key to list_metadata.rowid + pub list_metadata_id: RowId, + /// Current sync state + pub state: ListState, + /// Error message if state is error + pub error_message: Option, + /// ISO 8601 timestamp of last state change + pub updated_at: String, +} + +/// Sync state for a list. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, uniffi::Enum)] +pub enum ListState { + /// No sync in progress + #[default] + Idle, + /// Fetching first page (pull-to-refresh) + FetchingFirstPage, + /// Fetching subsequent page (load more) + FetchingNextPage, + /// Last sync failed + Error, +} + +impl ListState { + /// Convert to database string representation. + pub fn as_db_str(&self) -> &'static str { + match self { + ListState::Idle => "idle", + ListState::FetchingFirstPage => "fetching_first_page", + ListState::FetchingNextPage => "fetching_next_page", + ListState::Error => "error", + } + } +} + +impl From<&str> for ListState { + fn from(s: &str) -> Self { + match s { + "idle" => ListState::Idle, + "fetching_first_page" => ListState::FetchingFirstPage, + "fetching_next_page" => ListState::FetchingNextPage, + "error" => ListState::Error, + _ => { + // Default to Idle for unknown states to avoid panics + eprintln!("Warning: Unknown ListState '{}', defaulting to Idle", s); + ListState::Idle + } + } + } +} + +impl From for ListState { + fn from(s: String) -> Self { + ListState::from(s.as_str()) + } +} + +/// Combined header + state from a JOIN query. +/// +/// Contains pagination info from `list_metadata` and sync state from `list_metadata_state`. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DbListHeaderWithState { + /// Current sync state (defaults to Idle if no state record exists) + pub state: ListState, + /// Error message if state is Error + pub error_message: Option, + /// Current page that has been loaded (0 = no pages loaded) + pub current_page: i64, + /// Total number of pages from API response + pub total_pages: Option, + /// Total number of items from API response + pub total_items: Option, + /// Items per page + pub per_page: i64, +} diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs new file mode 100644 index 000000000..1f0576115 --- /dev/null +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -0,0 +1,1366 @@ +use crate::{ + DbTable, RowId, SqliteDbError, + db_types::db_site::DbSite, + list_metadata::{ + DbListHeaderWithState, DbListMetadata, DbListMetadataItem, DbListMetadataState, ListState, + }, + repository::QueryExecutor, +}; + +/// Repository for managing list metadata in the database. +/// +/// Provides methods for querying and managing list pagination, items, and sync state. +pub struct ListMetadataRepository; + +impl ListMetadataRepository { + /// Get the database table for list metadata headers + pub const fn header_table() -> DbTable { + DbTable::ListMetadata + } + + /// Get the database table for list metadata items + pub const fn items_table() -> DbTable { + DbTable::ListMetadataItems + } + + /// Get the database table for list metadata state + pub const fn state_table() -> DbTable { + DbTable::ListMetadataState + } + + // ============================================================ + // Read Operations + // ============================================================ + + /// Get list metadata header by site and key. + /// + /// Returns None if the list doesn't exist. + pub fn get_header( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result, SqliteDbError> { + let sql = format!( + "SELECT * FROM {} WHERE db_site_id = ? AND key = ?", + Self::header_table().table_name() + ); + let mut stmt = executor.prepare(&sql)?; + let mut rows = stmt.query_map(rusqlite::params![site.row_id, key], |row| { + DbListMetadata::from_row(row) + .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e))) + })?; + + match rows.next() { + Some(result) => Ok(Some(result.map_err(SqliteDbError::from)?)), + None => Ok(None), + } + } + + /// Get or create list metadata header. + /// + /// If the header doesn't exist, creates it with default values and returns its rowid. + /// If it exists, returns the existing rowid. + pub fn get_or_create( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result { + // Try to get existing + if let Some(header) = self.get_header(executor, site, key)? { + return Ok(header.row_id); + } + + // Create new header with defaults + let sql = format!( + "INSERT INTO {} (db_site_id, key, current_page, per_page, version) VALUES (?, ?, 0, 20, 0)", + Self::header_table().table_name() + ); + executor.execute(&sql, rusqlite::params![site.row_id, key])?; + + Ok(executor.last_insert_rowid()) + } + + /// Get all items for a list, ordered by rowid (insertion order = display order). + pub fn get_items( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result, SqliteDbError> { + let sql = format!( + "SELECT * FROM {} WHERE db_site_id = ? AND key = ? ORDER BY rowid", + Self::items_table().table_name() + ); + let mut stmt = executor.prepare(&sql)?; + let rows = stmt.query_map(rusqlite::params![site.row_id, key], |row| { + DbListMetadataItem::from_row(row) + .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e))) + })?; + + rows.collect::, _>>() + .map_err(SqliteDbError::from) + } + + /// Get the current sync state for a list. + /// + /// Returns None if no state record exists (list not yet synced). + pub fn get_state( + &self, + executor: &impl QueryExecutor, + list_metadata_id: RowId, + ) -> Result, SqliteDbError> { + let sql = format!( + "SELECT * FROM {} WHERE list_metadata_id = ?", + Self::state_table().table_name() + ); + let mut stmt = executor.prepare(&sql)?; + let mut rows = stmt.query_map(rusqlite::params![list_metadata_id], |row| { + DbListMetadataState::from_row(row) + .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e))) + })?; + + match rows.next() { + Some(result) => Ok(Some(result.map_err(SqliteDbError::from)?)), + None => Ok(None), + } + } + + /// Get the current sync state for a list by site and key. + /// + /// Convenience method that looks up the list_metadata_id first. + /// Returns ListState::Idle if the list or state doesn't exist. + pub fn get_state_by_key( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result { + let header = self.get_header(executor, site, key)?; + match header { + Some(h) => { + let state = self.get_state(executor, h.row_id)?; + Ok(state.map(|s| s.state).unwrap_or(ListState::Idle)) + } + None => Ok(ListState::Idle), + } + } + + /// Get header with state in a single JOIN query. + /// + /// Returns pagination info + sync state combined. More efficient than + /// calling `get_header()` and `get_state()` separately when both are needed. + /// + /// Returns `None` if the list doesn't exist. + pub fn get_header_with_state( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result, SqliteDbError> { + let sql = format!( + "SELECT m.total_pages, m.total_items, m.current_page, m.per_page, s.state, s.error_message \ + FROM {} m \ + LEFT JOIN {} s ON s.list_metadata_id = m.rowid \ + WHERE m.db_site_id = ? AND m.key = ?", + Self::header_table().table_name(), + Self::state_table().table_name() + ); + let mut stmt = executor.prepare(&sql)?; + let mut rows = stmt.query_map(rusqlite::params![site.row_id, key], |row| { + DbListHeaderWithState::from_row(row) + .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e))) + })?; + + match rows.next() { + Some(result) => Ok(Some(result.map_err(SqliteDbError::from)?)), + None => Ok(None), + } + } + + /// Get the current version for a list. + /// + /// Returns 0 if the list doesn't exist. + pub fn get_version( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result { + let header = self.get_header(executor, site, key)?; + Ok(header.map(|h| h.version).unwrap_or(0)) + } + + /// Check if the current version matches the expected version. + /// + /// Used for concurrency control to detect if a refresh happened + /// while a load-more operation was in progress. + pub fn check_version( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + expected_version: i64, + ) -> Result { + let current_version = self.get_version(executor, site, key)?; + Ok(current_version == expected_version) + } + + /// Get the item count for a list. + pub fn get_item_count( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result { + let sql = format!( + "SELECT COUNT(*) FROM {} WHERE db_site_id = ? AND key = ?", + Self::items_table().table_name() + ); + let mut stmt = executor.prepare(&sql)?; + stmt.query_row(rusqlite::params![site.row_id, key], |row| row.get(0)) + .map_err(SqliteDbError::from) + } + + // ============================================================ + // Write Operations + // ============================================================ + + /// Set items for a list, replacing any existing items. + /// + /// Used for refresh (page 1) - deletes all existing items and inserts new ones. + /// Items are inserted in order, so rowid determines display order. + pub fn set_items( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + items: &[ListMetadataItemInput], + ) -> Result<(), SqliteDbError> { + // Delete existing items + let delete_sql = format!( + "DELETE FROM {} WHERE db_site_id = ? AND key = ?", + Self::items_table().table_name() + ); + executor.execute(&delete_sql, rusqlite::params![site.row_id, key])?; + + // Insert new items + self.insert_items(executor, site, key, items)?; + + Ok(()) + } + + /// Append items to an existing list. + /// + /// Used for load-more (page 2+) - appends items without deleting existing ones. + /// Items are inserted in order, so they appear after existing items. + pub fn append_items( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + items: &[ListMetadataItemInput], + ) -> Result<(), SqliteDbError> { + self.insert_items(executor, site, key, items) + } + + /// Internal helper to insert items. + fn insert_items( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + items: &[ListMetadataItemInput], + ) -> Result<(), SqliteDbError> { + if items.is_empty() { + return Ok(()); + } + + let insert_sql = format!( + "INSERT INTO {} (db_site_id, key, entity_id, modified_gmt, parent, menu_order) VALUES (?, ?, ?, ?, ?, ?)", + Self::items_table().table_name() + ); + + for item in items { + executor.execute( + &insert_sql, + rusqlite::params![ + site.row_id, + key, + item.entity_id, + item.modified_gmt, + item.parent, + item.menu_order + ], + )?; + } + + Ok(()) + } + + /// Update header pagination info. + pub fn update_header( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + update: &ListMetadataHeaderUpdate, + ) -> Result<(), SqliteDbError> { + // Ensure header exists + self.get_or_create(executor, site, key)?; + + let sql = format!( + "UPDATE {} SET total_pages = ?, total_items = ?, current_page = ?, per_page = ?, last_fetched_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now') WHERE db_site_id = ? AND key = ?", + Self::header_table().table_name() + ); + + executor.execute( + &sql, + rusqlite::params![ + update.total_pages, + update.total_items, + update.current_page, + update.per_page, + site.row_id, + key + ], + )?; + + Ok(()) + } + + /// Update sync state for a list. + /// + /// Creates the state record if it doesn't exist (upsert). + pub fn update_state( + &self, + executor: &impl QueryExecutor, + list_metadata_id: RowId, + state: ListState, + error_message: Option<&str>, + ) -> Result<(), SqliteDbError> { + // Use INSERT OR REPLACE for upsert behavior + let sql = format!( + "INSERT INTO {} (list_metadata_id, state, error_message, updated_at) VALUES (?, ?, ?, strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) + ON CONFLICT(list_metadata_id) DO UPDATE SET state = excluded.state, error_message = excluded.error_message, updated_at = excluded.updated_at", + Self::state_table().table_name() + ); + + executor.execute( + &sql, + rusqlite::params![list_metadata_id, state.as_db_str(), error_message], + )?; + + Ok(()) + } + + /// Update sync state for a list by site and key. + /// + /// Convenience method that looks up or creates the list_metadata_id first. + pub fn update_state_by_key( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + state: ListState, + error_message: Option<&str>, + ) -> Result<(), SqliteDbError> { + let list_metadata_id = self.get_or_create(executor, site, key)?; + self.update_state(executor, list_metadata_id, state, error_message) + } + + /// Increment version and return the new value. + /// + /// Used when starting a refresh to invalidate any in-flight load-more operations. + pub fn increment_version( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result { + // Ensure header exists + self.get_or_create(executor, site, key)?; + + let sql = format!( + "UPDATE {} SET version = version + 1, last_first_page_fetched_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now') WHERE db_site_id = ? AND key = ?", + Self::header_table().table_name() + ); + + executor.execute(&sql, rusqlite::params![site.row_id, key])?; + + // Return the new version + self.get_version(executor, site, key) + } + + /// Delete all data for a list (header, items, and state). + pub fn delete_list( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result<(), SqliteDbError> { + // Delete items first (no FK constraint to header) + let delete_items_sql = format!( + "DELETE FROM {} WHERE db_site_id = ? AND key = ?", + Self::items_table().table_name() + ); + executor.execute(&delete_items_sql, rusqlite::params![site.row_id, key])?; + + // Delete header (state will be cascade deleted via FK) + let delete_header_sql = format!( + "DELETE FROM {} WHERE db_site_id = ? AND key = ?", + Self::header_table().table_name() + ); + executor.execute(&delete_header_sql, rusqlite::params![site.row_id, key])?; + + Ok(()) + } + + // ============================================================ + // Concurrency Helpers + // ============================================================ + + /// Begin a refresh operation (fetch first page). + /// + /// Atomically: + /// 1. Creates header if needed + /// 2. Increments version (invalidates any in-flight load-more) + /// 3. Updates state to FetchingFirstPage + /// 4. Returns info needed for the fetch + /// + /// Call this before starting an API fetch for page 1. + pub fn begin_refresh( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result { + // Ensure header exists and get its ID + let list_metadata_id = self.get_or_create(executor, site, key)?; + + // Increment version (invalidates any in-flight load-more) + let version = self.increment_version(executor, site, key)?; + + // Update state to fetching + self.update_state( + executor, + list_metadata_id, + ListState::FetchingFirstPage, + None, + )?; + + // Get header for pagination info + let header = self.get_header(executor, site, key)?.unwrap(); + + Ok(RefreshInfo { + list_metadata_id, + version, + per_page: header.per_page, + }) + } + + /// Begin a load-next-page operation. + /// + /// Atomically: + /// 1. Gets current pagination state + /// 2. Checks if there are more pages to load + /// 3. Updates state to FetchingNextPage + /// 4. Returns info needed for the fetch (including version for later check) + /// + /// Returns None if already at the last page or no pages loaded yet. + /// Call this before starting an API fetch for page N+1. + pub fn begin_fetch_next_page( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result, SqliteDbError> { + let header = match self.get_header(executor, site, key)? { + Some(h) => h, + None => return Ok(None), // List doesn't exist + }; + + // Check if we have pages loaded and more to fetch + if header.current_page == 0 { + return Ok(None); // No pages loaded yet, need refresh first + } + + if let Some(total_pages) = header.total_pages + && header.current_page >= total_pages + { + return Ok(None); // Already at last page + } + + let next_page = header.current_page + 1; + + // Update state to fetching + self.update_state(executor, header.row_id, ListState::FetchingNextPage, None)?; + + Ok(Some(FetchNextPageInfo { + list_metadata_id: header.row_id, + page: next_page, + version: header.version, + per_page: header.per_page, + })) + } + + /// Complete a sync operation successfully. + /// + /// Updates state to Idle and clears any error message. + pub fn complete_sync( + &self, + executor: &impl QueryExecutor, + list_metadata_id: RowId, + ) -> Result<(), SqliteDbError> { + self.update_state(executor, list_metadata_id, ListState::Idle, None) + } + + /// Complete a sync operation with an error. + /// + /// Updates state to Error with the provided message. + pub fn complete_sync_with_error( + &self, + executor: &impl QueryExecutor, + list_metadata_id: RowId, + error_message: &str, + ) -> Result<(), SqliteDbError> { + self.update_state( + executor, + list_metadata_id, + ListState::Error, + Some(error_message), + ) + } + + // ============================================ + // Relevance checking for update hooks + // ============================================ + + /// Get the list_metadata_id (rowid) for a given key. + /// + /// Returns None if no list exists for this key yet. + /// Used by collections to cache the ID for relevance checking. + pub fn get_list_metadata_id( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + ) -> Result, SqliteDbError> { + self.get_header(executor, site, key) + .map(|opt| opt.map(|h| h.row_id)) + } + + /// Get the list_metadata_id that a state row belongs to. + /// + /// Given a rowid from the list_metadata_state table, returns the + /// list_metadata_id (FK to list_metadata) that this state belongs to. + /// Returns None if the state row doesn't exist. + pub fn get_list_metadata_id_for_state_row( + &self, + executor: &impl QueryExecutor, + state_row_id: RowId, + ) -> Result, SqliteDbError> { + let sql = format!( + "SELECT list_metadata_id FROM {} WHERE rowid = ?", + Self::state_table().table_name() + ); + let mut stmt = executor.prepare(&sql)?; + let result = stmt.query_row([state_row_id], |row| row.get::<_, RowId>(0)); + + match result { + Ok(id) => Ok(Some(id)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(SqliteDbError::from(e)), + } + } + + /// Check if a list_metadata_items row belongs to a specific key. + /// + /// Given a rowid from the list_metadata_items table, checks if the item + /// belongs to the list identified by (site, key). + pub fn is_item_row_for_key( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + key: &str, + item_row_id: RowId, + ) -> Result { + let sql = format!( + "SELECT 1 FROM {} WHERE rowid = ? AND db_site_id = ? AND key = ?", + Self::items_table().table_name() + ); + let mut stmt = executor.prepare(&sql)?; + let result = stmt.query_row(rusqlite::params![item_row_id, site.row_id, key], |_| Ok(())); + + match result { + Ok(()) => Ok(true), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(false), + Err(e) => Err(SqliteDbError::from(e)), + } + } +} + +/// Information returned when starting a refresh operation. +#[derive(Debug, Clone)] +pub struct RefreshInfo { + /// Row ID of the list_metadata record + pub list_metadata_id: RowId, + /// New version number (for concurrency checking) + pub version: i64, + /// Items per page setting + pub per_page: i64, +} + +/// Information returned when starting a load-next-page operation. +#[derive(Debug, Clone)] +pub struct FetchNextPageInfo { + /// Row ID of the list_metadata record + pub list_metadata_id: RowId, + /// Page number to fetch + pub page: i64, + /// Version at start (check before storing results) + pub version: i64, + /// Items per page setting + pub per_page: i64, +} + +/// Input for creating a list metadata item. +#[derive(Debug, Clone)] +pub struct ListMetadataItemInput { + /// Entity ID (post ID, comment ID, etc.) + pub entity_id: i64, + /// Last modified timestamp (for staleness detection) + pub modified_gmt: Option, + /// Parent entity ID (for hierarchical post types like pages) + pub parent: Option, + /// Menu order (for hierarchical post types) + pub menu_order: Option, +} + +/// Update parameters for list metadata header. +#[derive(Debug, Clone, Default)] +pub struct ListMetadataHeaderUpdate { + /// Total number of pages from API response + pub total_pages: Option, + /// Total number of items from API response + pub total_items: Option, + /// Current page that has been loaded + pub current_page: i64, + /// Items per page + pub per_page: i64, +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_fixtures::{TestContext, test_ctx}; + use rstest::*; + + fn list_metadata_repo() -> ListMetadataRepository { + ListMetadataRepository + } + + #[rstest] + fn test_get_header_returns_none_for_non_existent(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let result = repo + .get_header(&test_ctx.conn, &test_ctx.site, "nonexistent:key") + .unwrap(); + assert!(result.is_none()); + } + + #[rstest] + fn test_get_or_create_creates_new_header(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + // Create new header + let row_id = repo + .get_or_create(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + + // Verify it was created with defaults + let header = repo + .get_header(&test_ctx.conn, &test_ctx.site, key) + .unwrap() + .unwrap(); + assert_eq!(header.row_id, row_id); + assert_eq!(header.key, key); + assert_eq!(header.current_page, 0); + assert_eq!(header.per_page, 20); + assert_eq!(header.version, 0); + assert!(header.total_pages.is_none()); + assert!(header.total_items.is_none()); + } + + #[rstest] + fn test_get_or_create_returns_existing_header(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:draft"; + + // Create initial header + let first_row_id = repo + .get_or_create(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + + // Get or create again should return same rowid + let second_row_id = repo + .get_or_create(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + + assert_eq!(first_row_id, second_row_id); + } + + #[rstest] + fn test_get_items_returns_empty_for_non_existent_list(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let items = repo + .get_items(&test_ctx.conn, &test_ctx.site, "nonexistent:key") + .unwrap(); + assert!(items.is_empty()); + } + + #[rstest] + fn test_get_state_returns_none_for_non_existent(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let result = repo.get_state(&test_ctx.conn, RowId(999999)).unwrap(); + assert!(result.is_none()); + } + + #[rstest] + fn test_get_state_by_key_returns_idle_for_non_existent_list(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let state = repo + .get_state_by_key(&test_ctx.conn, &test_ctx.site, "nonexistent:key") + .unwrap(); + assert_eq!(state, ListState::Idle); + } + + #[rstest] + fn test_get_version_returns_zero_for_non_existent_list(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let version = repo + .get_version(&test_ctx.conn, &test_ctx.site, "nonexistent:key") + .unwrap(); + assert_eq!(version, 0); + } + + #[rstest] + fn test_check_version_returns_true_for_matching_version(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + // Create header (version = 0) + repo.get_or_create(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + + // Check version matches + let matches = repo + .check_version(&test_ctx.conn, &test_ctx.site, key, 0) + .unwrap(); + assert!(matches); + + // Check version doesn't match + let matches = repo + .check_version(&test_ctx.conn, &test_ctx.site, key, 1) + .unwrap(); + assert!(!matches); + } + + #[rstest] + fn test_get_item_count_returns_zero_for_empty_list(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let count = repo + .get_item_count(&test_ctx.conn, &test_ctx.site, "empty:list") + .unwrap(); + assert_eq!(count, 0); + } + + #[rstest] + fn test_list_metadata_column_enum_matches_schema(test_ctx: TestContext) { + // Verify column order by selecting specific columns and checking positions + let sql = format!( + "SELECT rowid, db_site_id, key, total_pages, total_items, current_page, per_page, last_first_page_fetched_at, last_fetched_at, version FROM {}", + ListMetadataRepository::header_table().table_name() + ); + let stmt = test_ctx.conn.prepare(&sql); + assert!( + stmt.is_ok(), + "Column order mismatch - SELECT with explicit columns failed" + ); + } + + #[rstest] + fn test_list_metadata_items_column_enum_matches_schema(test_ctx: TestContext) { + let sql = format!( + "SELECT rowid, db_site_id, key, entity_id, modified_gmt, parent, menu_order FROM {}", + ListMetadataRepository::items_table().table_name() + ); + let stmt = test_ctx.conn.prepare(&sql); + assert!( + stmt.is_ok(), + "Column order mismatch - SELECT with explicit columns failed" + ); + } + + #[rstest] + fn test_list_metadata_state_column_enum_matches_schema(test_ctx: TestContext) { + let sql = format!( + "SELECT rowid, list_metadata_id, state, error_message, updated_at FROM {}", + ListMetadataRepository::state_table().table_name() + ); + let stmt = test_ctx.conn.prepare(&sql); + assert!( + stmt.is_ok(), + "Column order mismatch - SELECT with explicit columns failed" + ); + } + + // ============================================================ + // Write Operation Tests + // ============================================================ + + #[rstest] + fn test_set_items_inserts_new_items(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + let items = vec![ + ListMetadataItemInput { + entity_id: 100, + modified_gmt: Some("2024-01-01T00:00:00Z".to_string()), + parent: Some(50), + menu_order: Some(1), + }, + ListMetadataItemInput { + entity_id: 200, + modified_gmt: Some("2024-01-02T00:00:00Z".to_string()), + parent: Some(50), + menu_order: Some(2), + }, + ListMetadataItemInput { + entity_id: 300, + modified_gmt: None, + parent: None, + menu_order: None, + }, + ]; + + repo.set_items(&test_ctx.conn, &test_ctx.site, key, &items) + .unwrap(); + + let retrieved = repo.get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + assert_eq!(retrieved.len(), 3); + assert_eq!(retrieved[0].entity_id, 100); + assert_eq!(retrieved[0].parent, Some(50)); + assert_eq!(retrieved[0].menu_order, Some(1)); + assert_eq!(retrieved[1].entity_id, 200); + assert_eq!(retrieved[2].entity_id, 300); + assert!(retrieved[2].modified_gmt.is_none()); + assert!(retrieved[2].parent.is_none()); + assert!(retrieved[2].menu_order.is_none()); + } + + #[rstest] + fn test_set_items_replaces_existing_items(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:draft"; + + // Insert initial items + let initial_items = vec![ + ListMetadataItemInput { + entity_id: 1, + modified_gmt: None, + parent: None, + menu_order: None, + }, + ListMetadataItemInput { + entity_id: 2, + modified_gmt: None, + parent: None, + menu_order: None, + }, + ]; + repo.set_items(&test_ctx.conn, &test_ctx.site, key, &initial_items) + .unwrap(); + + // Replace with new items + let new_items = vec![ + ListMetadataItemInput { + entity_id: 10, + modified_gmt: None, + parent: None, + menu_order: None, + }, + ListMetadataItemInput { + entity_id: 20, + modified_gmt: None, + parent: None, + menu_order: None, + }, + ListMetadataItemInput { + entity_id: 30, + modified_gmt: None, + parent: None, + menu_order: None, + }, + ]; + repo.set_items(&test_ctx.conn, &test_ctx.site, key, &new_items) + .unwrap(); + + let retrieved = repo.get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + assert_eq!(retrieved.len(), 3); + assert_eq!(retrieved[0].entity_id, 10); + assert_eq!(retrieved[1].entity_id, 20); + assert_eq!(retrieved[2].entity_id, 30); + } + + #[rstest] + fn test_append_items_adds_to_existing(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:pending"; + + // Insert initial items + let initial_items = vec![ + ListMetadataItemInput { + entity_id: 1, + modified_gmt: None, + parent: None, + menu_order: None, + }, + ListMetadataItemInput { + entity_id: 2, + modified_gmt: None, + parent: None, + menu_order: None, + }, + ]; + repo.set_items(&test_ctx.conn, &test_ctx.site, key, &initial_items) + .unwrap(); + + // Append more items + let more_items = vec![ + ListMetadataItemInput { + entity_id: 3, + modified_gmt: None, + parent: None, + menu_order: None, + }, + ListMetadataItemInput { + entity_id: 4, + modified_gmt: None, + parent: None, + menu_order: None, + }, + ]; + repo.append_items(&test_ctx.conn, &test_ctx.site, key, &more_items) + .unwrap(); + + let retrieved = repo.get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + assert_eq!(retrieved.len(), 4); + assert_eq!(retrieved[0].entity_id, 1); + assert_eq!(retrieved[1].entity_id, 2); + assert_eq!(retrieved[2].entity_id, 3); + assert_eq!(retrieved[3].entity_id, 4); + } + + #[rstest] + fn test_update_header_updates_pagination(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + let update = ListMetadataHeaderUpdate { + total_pages: Some(5), + total_items: Some(100), + current_page: 1, + per_page: 20, + }; + + repo.update_header(&test_ctx.conn, &test_ctx.site, key, &update) + .unwrap(); + + let header = repo + .get_header(&test_ctx.conn, &test_ctx.site, key) + .unwrap() + .unwrap(); + assert_eq!(header.total_pages, Some(5)); + assert_eq!(header.total_items, Some(100)); + assert_eq!(header.current_page, 1); + assert_eq!(header.per_page, 20); + assert!(header.last_fetched_at.is_some()); + } + + #[rstest] + fn test_update_state_creates_new_state(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + let list_id = repo + .get_or_create(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + repo.update_state(&test_ctx.conn, list_id, ListState::FetchingFirstPage, None) + .unwrap(); + + let state = repo.get_state(&test_ctx.conn, list_id).unwrap().unwrap(); + assert_eq!(state.state, ListState::FetchingFirstPage); + assert!(state.error_message.is_none()); + } + + #[rstest] + fn test_update_state_updates_existing_state(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:draft"; + + let list_id = repo + .get_or_create(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + + // Set initial state + repo.update_state(&test_ctx.conn, list_id, ListState::FetchingFirstPage, None) + .unwrap(); + + // Update to error state + repo.update_state( + &test_ctx.conn, + list_id, + ListState::Error, + Some("Network error"), + ) + .unwrap(); + + let state = repo.get_state(&test_ctx.conn, list_id).unwrap().unwrap(); + assert_eq!(state.state, ListState::Error); + assert_eq!(state.error_message.as_deref(), Some("Network error")); + } + + #[rstest] + fn test_update_state_by_key(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:pending"; + + repo.update_state_by_key( + &test_ctx.conn, + &test_ctx.site, + key, + ListState::FetchingNextPage, + None, + ) + .unwrap(); + + let state = repo + .get_state_by_key(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert_eq!(state, ListState::FetchingNextPage); + } + + #[rstest] + fn test_increment_version(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + // Create header (version starts at 0) + repo.get_or_create(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + let initial_version = repo + .get_version(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert_eq!(initial_version, 0); + + // Increment version + let new_version = repo + .increment_version(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert_eq!(new_version, 1); + + // Increment again + let newer_version = repo + .increment_version(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert_eq!(newer_version, 2); + + // Verify last_first_page_fetched_at is set + let header = repo + .get_header(&test_ctx.conn, &test_ctx.site, key) + .unwrap() + .unwrap(); + assert!(header.last_first_page_fetched_at.is_some()); + } + + #[rstest] + fn test_delete_list_removes_all_data(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + // Create header and add items and state + let list_id = repo + .get_or_create(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + let items = vec![ListMetadataItemInput { + entity_id: 1, + modified_gmt: None, + parent: None, + menu_order: None, + }]; + repo.set_items(&test_ctx.conn, &test_ctx.site, key, &items) + .unwrap(); + repo.update_state(&test_ctx.conn, list_id, ListState::Idle, None) + .unwrap(); + + // Verify data exists + assert!( + repo.get_header(&test_ctx.conn, &test_ctx.site, key) + .unwrap() + .is_some() + ); + assert_eq!( + repo.get_item_count(&test_ctx.conn, &test_ctx.site, key) + .unwrap(), + 1 + ); + + // Delete the list + repo.delete_list(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + + // Verify everything is deleted + assert!( + repo.get_header(&test_ctx.conn, &test_ctx.site, key) + .unwrap() + .is_none() + ); + assert_eq!( + repo.get_item_count(&test_ctx.conn, &test_ctx.site, key) + .unwrap(), + 0 + ); + } + + #[rstest] + fn test_items_preserve_order(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:ordered"; + + // Insert items in specific order + let items: Vec = (1..=10) + .map(|i| ListMetadataItemInput { + entity_id: i * 100, + modified_gmt: None, + parent: None, + menu_order: None, + }) + .collect(); + + repo.set_items(&test_ctx.conn, &test_ctx.site, key, &items) + .unwrap(); + + let retrieved = repo.get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + assert_eq!(retrieved.len(), 10); + + // Verify order is preserved (rowid ordering) + for (i, item) in retrieved.iter().enumerate() { + assert_eq!(item.entity_id, ((i + 1) * 100) as i64); + } + } + + // ============================================================ + // Concurrency Helper Tests + // ============================================================ + + #[rstest] + fn test_begin_refresh_creates_header_and_sets_state(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + let info = repo + .begin_refresh(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + + // Verify version was incremented (from 0 to 1) + assert_eq!(info.version, 1); + assert_eq!(info.per_page, 20); // default + + // Verify state is FetchingFirstPage + let state = repo + .get_state_by_key(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert_eq!(state, ListState::FetchingFirstPage); + } + + #[rstest] + fn test_begin_refresh_increments_version_each_time(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:draft"; + + let info1 = repo + .begin_refresh(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert_eq!(info1.version, 1); + + // Complete the first refresh + repo.complete_sync(&test_ctx.conn, info1.list_metadata_id) + .unwrap(); + + let info2 = repo + .begin_refresh(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert_eq!(info2.version, 2); + } + + #[rstest] + fn test_begin_fetch_next_page_returns_none_for_non_existent_list(test_ctx: TestContext) { + let repo = list_metadata_repo(); + + let result = repo + .begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, "nonexistent") + .unwrap(); + assert!(result.is_none()); + } + + #[rstest] + fn test_begin_fetch_next_page_returns_none_when_no_pages_loaded(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + // Create header but don't set current_page + repo.get_or_create(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + + let result = repo + .begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert!(result.is_none()); + } + + #[rstest] + fn test_begin_fetch_next_page_returns_none_at_last_page(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + // Set up header with current_page = total_pages + let update = ListMetadataHeaderUpdate { + total_pages: Some(3), + total_items: Some(60), + current_page: 3, + per_page: 20, + }; + repo.update_header(&test_ctx.conn, &test_ctx.site, key, &update) + .unwrap(); + + let result = repo + .begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert!(result.is_none()); + } + + #[rstest] + fn test_begin_fetch_next_page_returns_info_when_more_pages(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + // Set up header with more pages available + let update = ListMetadataHeaderUpdate { + total_pages: Some(5), + total_items: Some(100), + current_page: 2, + per_page: 20, + }; + repo.update_header(&test_ctx.conn, &test_ctx.site, key, &update) + .unwrap(); + + let result = repo + .begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert!(result.is_some()); + + let info = result.unwrap(); + assert_eq!(info.page, 3); // next page + assert_eq!(info.per_page, 20); + + // Verify state changed to FetchingNextPage + let state = repo + .get_state_by_key(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert_eq!(state, ListState::FetchingNextPage); + } + + #[rstest] + fn test_complete_sync_sets_state_to_idle(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + let info = repo + .begin_refresh(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + repo.complete_sync(&test_ctx.conn, info.list_metadata_id) + .unwrap(); + + let state = repo + .get_state_by_key(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert_eq!(state, ListState::Idle); + } + + #[rstest] + fn test_complete_sync_with_error_sets_state_and_message(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + let info = repo + .begin_refresh(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + repo.complete_sync_with_error(&test_ctx.conn, info.list_metadata_id, "Network timeout") + .unwrap(); + + let state_record = repo + .get_state(&test_ctx.conn, info.list_metadata_id) + .unwrap() + .unwrap(); + assert_eq!(state_record.state, ListState::Error); + assert_eq!( + state_record.error_message.as_deref(), + Some("Network timeout") + ); + } + + #[rstest] + fn test_version_check_detects_stale_operation(test_ctx: TestContext) { + let repo = list_metadata_repo(); + let key = "edit:posts:publish"; + + // Start a refresh (version becomes 1) + let refresh_info = repo + .begin_refresh(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + assert_eq!(refresh_info.version, 1); + + // Update header to simulate page 1 loaded + let update = ListMetadataHeaderUpdate { + total_pages: Some(5), + total_items: Some(100), + current_page: 1, + per_page: 20, + }; + repo.update_header(&test_ctx.conn, &test_ctx.site, key, &update) + .unwrap(); + repo.complete_sync(&test_ctx.conn, refresh_info.list_metadata_id) + .unwrap(); + + // Start load-next-page (captures version = 1) + let next_page_info = repo + .begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + .unwrap() + .unwrap(); + let captured_version = next_page_info.version; + + // Another refresh happens (version becomes 2) + repo.begin_refresh(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); + + // Version check should fail (stale) + let is_valid = repo + .check_version(&test_ctx.conn, &test_ctx.site, key, captured_version) + .unwrap(); + assert!(!is_valid, "Version should not match after refresh"); + } +} diff --git a/wp_mobile_cache/src/repository/mod.rs b/wp_mobile_cache/src/repository/mod.rs index b2d4f9122..84e16a1f8 100644 --- a/wp_mobile_cache/src/repository/mod.rs +++ b/wp_mobile_cache/src/repository/mod.rs @@ -1,6 +1,7 @@ use crate::{RowId, SqliteDbError}; use rusqlite::Connection; +pub mod list_metadata; pub mod posts; pub mod sites; pub mod term_relationships; diff --git a/wp_mobile_cache/src/repository/posts.rs b/wp_mobile_cache/src/repository/posts.rs index 910162403..4d6a76a9f 100644 --- a/wp_mobile_cache/src/repository/posts.rs +++ b/wp_mobile_cache/src/repository/posts.rs @@ -20,7 +20,7 @@ use crate::{ term_relationships::DbTermRelationship, }; use rusqlite::{OptionalExtension, Row}; -use std::{marker::PhantomData, sync::Arc}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use wp_api::{ posts::{ AnyPostWithEditContext, AnyPostWithEmbedContext, AnyPostWithViewContext, @@ -28,6 +28,7 @@ use wp_api::{ PostGuidWithViewContext, PostId, PostTitleWithEditContext, PostTitleWithEmbedContext, PostTitleWithViewContext, SparsePostExcerpt, }, + prelude::WpGmtDateTime, taxonomies::TaxonomyType, terms::TermId, }; @@ -307,6 +308,61 @@ impl PostRepository { })) } + /// Select `modified_gmt` timestamps for multiple posts by their WordPress post IDs. + /// + /// This is a lightweight query used for staleness detection - it only fetches + /// the `id` and `modified_gmt` columns without loading the full post data. + /// + /// Returns a HashMap mapping post IDs to their cached `modified_gmt` timestamps. + /// Posts not found in the cache are simply omitted from the result. + /// + /// # Arguments + /// * `executor` - Database connection or transaction + /// * `site` - The site to query posts for + /// * `post_ids` - WordPress post IDs to look up + /// + /// # Returns + /// HashMap where keys are post IDs and values are their `modified_gmt` timestamps. + pub fn select_modified_gmt_by_ids( + &self, + executor: &impl QueryExecutor, + site: &DbSite, + post_ids: &[i64], + ) -> Result, SqliteDbError> { + if post_ids.is_empty() { + return Ok(HashMap::new()); + } + + let ids_str = post_ids + .iter() + .map(|id| id.to_string()) + .collect::>() + .join(", "); + + let sql = format!( + "SELECT id, modified_gmt FROM {} WHERE db_site_id = ? AND id IN ({})", + Self::table_name(), + ids_str + ); + + let mut stmt = executor.prepare(&sql)?; + let rows = stmt.query_map([site.row_id], |row| { + let id: i64 = row.get(0)?; + let modified_gmt_str: String = row.get(1)?; + Ok((id, modified_gmt_str)) + })?; + + let mut result = HashMap::new(); + for row_result in rows { + let (id, modified_gmt_str) = row_result.map_err(SqliteDbError::from)?; + if let Ok(modified_gmt) = modified_gmt_str.parse::() { + result.insert(id, modified_gmt); + } + } + + Ok(result) + } + /// Delete a post by its EntityId for a given site. /// /// Returns the number of rows deleted (0 or 1). From dd6d45df4317a8c2d77a6c0a5b6a85df3231a8a7 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 11:45:25 -0500 Subject: [PATCH 02/25] Store ListState as INTEGER instead of TEXT Changes the database storage for `ListState` from TEXT strings to INTEGER values for better performance and type safety. Changes: - Update migration to use `INTEGER NOT NULL DEFAULT 0` for state column - Add `#[repr(i32)]` to `ListState` enum with explicit discriminant values - Implement `ToSql` and `FromSql` traits for direct rusqlite integration - Remove string-based `as_db_str()` and `From<&str>` implementations - Update all callers to use the enum directly with rusqlite params --- .../0007-create-list-metadata-tables.sql | 2 +- .../src/db_types/db_list_metadata.rs | 11 ++-- wp_mobile_cache/src/lib.rs | 6 +-- wp_mobile_cache/src/list_metadata.rs | 50 ++++++++----------- .../src/repository/list_metadata.rs | 2 +- 5 files changed, 30 insertions(+), 41 deletions(-) diff --git a/wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql b/wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql index e69a7eb4b..f51d9f43c 100644 --- a/wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql +++ b/wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql @@ -36,7 +36,7 @@ CREATE INDEX idx_list_metadata_items_entity ON list_metadata_items(db_site_id, e CREATE TABLE `list_metadata_state` ( `rowid` INTEGER PRIMARY KEY AUTOINCREMENT, `list_metadata_id` INTEGER NOT NULL, - `state` TEXT NOT NULL DEFAULT 'idle', -- idle, fetching_first_page, fetching_next_page, error + `state` INTEGER NOT NULL DEFAULT 0, -- 0=idle, 1=fetching_first_page, 2=fetching_next_page, 3=error `error_message` TEXT, `updated_at` TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ', 'now')), diff --git a/wp_mobile_cache/src/db_types/db_list_metadata.rs b/wp_mobile_cache/src/db_types/db_list_metadata.rs index 0fe12e00c..bb448b17e 100644 --- a/wp_mobile_cache/src/db_types/db_list_metadata.rs +++ b/wp_mobile_cache/src/db_types/db_list_metadata.rs @@ -110,12 +110,10 @@ impl DbListMetadataState { pub fn from_row(row: &Row) -> Result { use ListMetadataStateColumn as Col; - let state_str: String = row.get_column(Col::State)?; - Ok(Self { row_id: row.get_column(Col::Rowid)?, list_metadata_id: row.get_column(Col::ListMetadataId)?, - state: ListState::from(state_str), + state: row.get_column(Col::State)?, error_message: row.get_column(Col::ErrorMessage)?, updated_at: row.get_column(Col::UpdatedAt)?, }) @@ -150,12 +148,11 @@ impl DbListHeaderWithState { pub fn from_row(row: &Row) -> Result { use ListHeaderWithStateColumn as Col; - // state is nullable due to LEFT JOIN - default to "idle" - let state_str: Option = row.get_column(Col::State)?; - let state = state_str.map(ListState::from).unwrap_or(ListState::Idle); + // state is nullable due to LEFT JOIN - default to Idle + let state: Option = row.get_column(Col::State)?; Ok(Self { - state, + state: state.unwrap_or(ListState::Idle), error_message: row.get_column(Col::ErrorMessage)?, current_page: row.get_column(Col::CurrentPage)?, total_pages: row.get_column(Col::TotalPages)?, diff --git a/wp_mobile_cache/src/lib.rs b/wp_mobile_cache/src/lib.rs index 0d1c1dbd2..f5519874c 100644 --- a/wp_mobile_cache/src/lib.rs +++ b/wp_mobile_cache/src/lib.rs @@ -366,9 +366,9 @@ impl WpApiCache { let result = connection.execute( "UPDATE list_metadata_state SET state = ?1 WHERE state IN (?2, ?3)", params![ - ListState::Idle.as_db_str(), - ListState::FetchingFirstPage.as_db_str(), - ListState::FetchingNextPage.as_db_str(), + ListState::Idle as i32, + ListState::FetchingFirstPage as i32, + ListState::FetchingNextPage as i32, ], ); diff --git a/wp_mobile_cache/src/list_metadata.rs b/wp_mobile_cache/src/list_metadata.rs index 616b89205..f950a5ef8 100644 --- a/wp_mobile_cache/src/list_metadata.rs +++ b/wp_mobile_cache/src/list_metadata.rs @@ -1,4 +1,5 @@ use crate::RowId; +use rusqlite::types::{FromSql, FromSqlResult, ToSql, ToSqlOutput}; /// Represents list metadata header in the database. /// @@ -64,50 +65,41 @@ pub struct DbListMetadataState { } /// Sync state for a list. +/// +/// Stored as INTEGER in the database. The repr(i32) ensures stable values +/// even if the enum definition order changes. #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, uniffi::Enum)] +#[repr(i32)] pub enum ListState { /// No sync in progress #[default] - Idle, + Idle = 0, /// Fetching first page (pull-to-refresh) - FetchingFirstPage, + FetchingFirstPage = 1, /// Fetching subsequent page (load more) - FetchingNextPage, + FetchingNextPage = 2, /// Last sync failed - Error, + Error = 3, } -impl ListState { - /// Convert to database string representation. - pub fn as_db_str(&self) -> &'static str { - match self { - ListState::Idle => "idle", - ListState::FetchingFirstPage => "fetching_first_page", - ListState::FetchingNextPage => "fetching_next_page", - ListState::Error => "error", - } +impl ToSql for ListState { + fn to_sql(&self) -> rusqlite::Result> { + Ok(ToSqlOutput::from(*self as i32)) } } -impl From<&str> for ListState { - fn from(s: &str) -> Self { - match s { - "idle" => ListState::Idle, - "fetching_first_page" => ListState::FetchingFirstPage, - "fetching_next_page" => ListState::FetchingNextPage, - "error" => ListState::Error, +impl FromSql for ListState { + fn column_result(value: rusqlite::types::ValueRef<'_>) -> FromSqlResult { + i32::column_result(value).map(|i| match i { + 0 => ListState::Idle, + 1 => ListState::FetchingFirstPage, + 2 => ListState::FetchingNextPage, + 3 => ListState::Error, _ => { - // Default to Idle for unknown states to avoid panics - eprintln!("Warning: Unknown ListState '{}', defaulting to Idle", s); + debug_assert!(false, "Unknown ListState value: {}", i); ListState::Idle } - } - } -} - -impl From for ListState { - fn from(s: String) -> Self { - ListState::from(s.as_str()) + }) } } diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 1f0576115..d971bd33c 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -349,7 +349,7 @@ impl ListMetadataRepository { executor.execute( &sql, - rusqlite::params![list_metadata_id, state.as_db_str(), error_message], + rusqlite::params![list_metadata_id, state, error_message], )?; Ok(()) From f3a0187477c7e0a810a410b54d8c9ccddb997b2d Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 11:59:05 -0500 Subject: [PATCH 03/25] Return error for invalid ListState values instead of silent fallback The `FromSql` implementation now returns a proper error when encountering an unknown integer value, rather than silently defaulting to `Idle`. This makes data corruption issues visible instead of hiding them. --- wp_mobile_cache/src/list_metadata.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/wp_mobile_cache/src/list_metadata.rs b/wp_mobile_cache/src/list_metadata.rs index f950a5ef8..97c757f13 100644 --- a/wp_mobile_cache/src/list_metadata.rs +++ b/wp_mobile_cache/src/list_metadata.rs @@ -90,15 +90,14 @@ impl ToSql for ListState { impl FromSql for ListState { fn column_result(value: rusqlite::types::ValueRef<'_>) -> FromSqlResult { - i32::column_result(value).map(|i| match i { - 0 => ListState::Idle, - 1 => ListState::FetchingFirstPage, - 2 => ListState::FetchingNextPage, - 3 => ListState::Error, - _ => { - debug_assert!(false, "Unknown ListState value: {}", i); - ListState::Idle - } + i32::column_result(value).and_then(|i| match i { + 0 => Ok(ListState::Idle), + 1 => Ok(ListState::FetchingFirstPage), + 2 => Ok(ListState::FetchingNextPage), + 3 => Ok(ListState::Error), + _ => Err(rusqlite::types::FromSqlError::Other( + format!("Invalid ListState value: {}", i).into(), + )), }) } } From 49ab6c96d61d7f3f52ed4ec81bbef608670eb973 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 12:04:07 -0500 Subject: [PATCH 04/25] Convert ListMetadataRepository methods to associated functions The repository struct has no state, so methods are converted from instance methods (&self) to associated functions. This removes the need to instantiate the struct before calling methods. Before: ListMetadataRepository.get_header(&conn, &site, key) After: ListMetadataRepository::get_header(&conn, &site, key) --- .../src/repository/list_metadata.rs | 411 ++++++++---------- 1 file changed, 174 insertions(+), 237 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index d971bd33c..b0bb58654 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -9,7 +9,8 @@ use crate::{ /// Repository for managing list metadata in the database. /// -/// Provides methods for querying and managing list pagination, items, and sync state. +/// Provides associated functions for querying and managing list pagination, +/// items, and sync state. All functions are stateless. pub struct ListMetadataRepository; impl ListMetadataRepository { @@ -36,7 +37,6 @@ impl ListMetadataRepository { /// /// Returns None if the list doesn't exist. pub fn get_header( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, @@ -62,13 +62,12 @@ impl ListMetadataRepository { /// If the header doesn't exist, creates it with default values and returns its rowid. /// If it exists, returns the existing rowid. pub fn get_or_create( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, ) -> Result { // Try to get existing - if let Some(header) = self.get_header(executor, site, key)? { + if let Some(header) = Self::get_header(executor, site, key)? { return Ok(header.row_id); } @@ -84,7 +83,6 @@ impl ListMetadataRepository { /// Get all items for a list, ordered by rowid (insertion order = display order). pub fn get_items( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, @@ -107,7 +105,6 @@ impl ListMetadataRepository { /// /// Returns None if no state record exists (list not yet synced). pub fn get_state( - &self, executor: &impl QueryExecutor, list_metadata_id: RowId, ) -> Result, SqliteDbError> { @@ -132,15 +129,14 @@ impl ListMetadataRepository { /// Convenience method that looks up the list_metadata_id first. /// Returns ListState::Idle if the list or state doesn't exist. pub fn get_state_by_key( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, ) -> Result { - let header = self.get_header(executor, site, key)?; + let header = Self::get_header(executor, site, key)?; match header { Some(h) => { - let state = self.get_state(executor, h.row_id)?; + let state = Self::get_state(executor, h.row_id)?; Ok(state.map(|s| s.state).unwrap_or(ListState::Idle)) } None => Ok(ListState::Idle), @@ -154,7 +150,6 @@ impl ListMetadataRepository { /// /// Returns `None` if the list doesn't exist. pub fn get_header_with_state( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, @@ -183,12 +178,11 @@ impl ListMetadataRepository { /// /// Returns 0 if the list doesn't exist. pub fn get_version( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, ) -> Result { - let header = self.get_header(executor, site, key)?; + let header = Self::get_header(executor, site, key)?; Ok(header.map(|h| h.version).unwrap_or(0)) } @@ -197,19 +191,17 @@ impl ListMetadataRepository { /// Used for concurrency control to detect if a refresh happened /// while a load-more operation was in progress. pub fn check_version( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, expected_version: i64, ) -> Result { - let current_version = self.get_version(executor, site, key)?; + let current_version = Self::get_version(executor, site, key)?; Ok(current_version == expected_version) } /// Get the item count for a list. pub fn get_item_count( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, @@ -232,7 +224,6 @@ impl ListMetadataRepository { /// Used for refresh (page 1) - deletes all existing items and inserts new ones. /// Items are inserted in order, so rowid determines display order. pub fn set_items( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, @@ -246,7 +237,7 @@ impl ListMetadataRepository { executor.execute(&delete_sql, rusqlite::params![site.row_id, key])?; // Insert new items - self.insert_items(executor, site, key, items)?; + Self::insert_items(executor, site, key, items)?; Ok(()) } @@ -256,18 +247,16 @@ impl ListMetadataRepository { /// Used for load-more (page 2+) - appends items without deleting existing ones. /// Items are inserted in order, so they appear after existing items. pub fn append_items( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { - self.insert_items(executor, site, key, items) + Self::insert_items(executor, site, key, items) } /// Internal helper to insert items. fn insert_items( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, @@ -301,14 +290,13 @@ impl ListMetadataRepository { /// Update header pagination info. pub fn update_header( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, update: &ListMetadataHeaderUpdate, ) -> Result<(), SqliteDbError> { // Ensure header exists - self.get_or_create(executor, site, key)?; + Self::get_or_create(executor, site, key)?; let sql = format!( "UPDATE {} SET total_pages = ?, total_items = ?, current_page = ?, per_page = ?, last_fetched_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now') WHERE db_site_id = ? AND key = ?", @@ -334,7 +322,6 @@ impl ListMetadataRepository { /// /// Creates the state record if it doesn't exist (upsert). pub fn update_state( - &self, executor: &impl QueryExecutor, list_metadata_id: RowId, state: ListState, @@ -359,28 +346,26 @@ impl ListMetadataRepository { /// /// Convenience method that looks up or creates the list_metadata_id first. pub fn update_state_by_key( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, state: ListState, error_message: Option<&str>, ) -> Result<(), SqliteDbError> { - let list_metadata_id = self.get_or_create(executor, site, key)?; - self.update_state(executor, list_metadata_id, state, error_message) + let list_metadata_id = Self::get_or_create(executor, site, key)?; + Self::update_state(executor, list_metadata_id, state, error_message) } /// Increment version and return the new value. /// /// Used when starting a refresh to invalidate any in-flight load-more operations. pub fn increment_version( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, ) -> Result { // Ensure header exists - self.get_or_create(executor, site, key)?; + Self::get_or_create(executor, site, key)?; let sql = format!( "UPDATE {} SET version = version + 1, last_first_page_fetched_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now') WHERE db_site_id = ? AND key = ?", @@ -390,12 +375,11 @@ impl ListMetadataRepository { executor.execute(&sql, rusqlite::params![site.row_id, key])?; // Return the new version - self.get_version(executor, site, key) + Self::get_version(executor, site, key) } /// Delete all data for a list (header, items, and state). pub fn delete_list( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, @@ -431,19 +415,18 @@ impl ListMetadataRepository { /// /// Call this before starting an API fetch for page 1. pub fn begin_refresh( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, ) -> Result { // Ensure header exists and get its ID - let list_metadata_id = self.get_or_create(executor, site, key)?; + let list_metadata_id = Self::get_or_create(executor, site, key)?; // Increment version (invalidates any in-flight load-more) - let version = self.increment_version(executor, site, key)?; + let version = Self::increment_version(executor, site, key)?; // Update state to fetching - self.update_state( + Self::update_state( executor, list_metadata_id, ListState::FetchingFirstPage, @@ -451,7 +434,7 @@ impl ListMetadataRepository { )?; // Get header for pagination info - let header = self.get_header(executor, site, key)?.unwrap(); + let header = Self::get_header(executor, site, key)?.unwrap(); Ok(RefreshInfo { list_metadata_id, @@ -471,12 +454,11 @@ impl ListMetadataRepository { /// Returns None if already at the last page or no pages loaded yet. /// Call this before starting an API fetch for page N+1. pub fn begin_fetch_next_page( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, ) -> Result, SqliteDbError> { - let header = match self.get_header(executor, site, key)? { + let header = match Self::get_header(executor, site, key)? { Some(h) => h, None => return Ok(None), // List doesn't exist }; @@ -495,7 +477,7 @@ impl ListMetadataRepository { let next_page = header.current_page + 1; // Update state to fetching - self.update_state(executor, header.row_id, ListState::FetchingNextPage, None)?; + Self::update_state(executor, header.row_id, ListState::FetchingNextPage, None)?; Ok(Some(FetchNextPageInfo { list_metadata_id: header.row_id, @@ -509,23 +491,21 @@ impl ListMetadataRepository { /// /// Updates state to Idle and clears any error message. pub fn complete_sync( - &self, executor: &impl QueryExecutor, list_metadata_id: RowId, ) -> Result<(), SqliteDbError> { - self.update_state(executor, list_metadata_id, ListState::Idle, None) + Self::update_state(executor, list_metadata_id, ListState::Idle, None) } /// Complete a sync operation with an error. /// /// Updates state to Error with the provided message. pub fn complete_sync_with_error( - &self, executor: &impl QueryExecutor, list_metadata_id: RowId, error_message: &str, ) -> Result<(), SqliteDbError> { - self.update_state( + Self::update_state( executor, list_metadata_id, ListState::Error, @@ -542,13 +522,11 @@ impl ListMetadataRepository { /// Returns None if no list exists for this key yet. /// Used by collections to cache the ID for relevance checking. pub fn get_list_metadata_id( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, ) -> Result, SqliteDbError> { - self.get_header(executor, site, key) - .map(|opt| opt.map(|h| h.row_id)) + Self::get_header(executor, site, key).map(|opt| opt.map(|h| h.row_id)) } /// Get the list_metadata_id that a state row belongs to. @@ -557,7 +535,6 @@ impl ListMetadataRepository { /// list_metadata_id (FK to list_metadata) that this state belongs to. /// Returns None if the state row doesn't exist. pub fn get_list_metadata_id_for_state_row( - &self, executor: &impl QueryExecutor, state_row_id: RowId, ) -> Result, SqliteDbError> { @@ -580,7 +557,6 @@ impl ListMetadataRepository { /// Given a rowid from the list_metadata_items table, checks if the item /// belongs to the list identified by (site, key). pub fn is_item_row_for_key( - &self, executor: &impl QueryExecutor, site: &DbSite, key: &str, @@ -657,32 +633,24 @@ mod tests { use crate::test_fixtures::{TestContext, test_ctx}; use rstest::*; - fn list_metadata_repo() -> ListMetadataRepository { - ListMetadataRepository - } - #[rstest] fn test_get_header_returns_none_for_non_existent(test_ctx: TestContext) { - let repo = list_metadata_repo(); - let result = repo - .get_header(&test_ctx.conn, &test_ctx.site, "nonexistent:key") - .unwrap(); + let result = + ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, "nonexistent:key") + .unwrap(); assert!(result.is_none()); } #[rstest] fn test_get_or_create_creates_new_header(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; // Create new header - let row_id = repo - .get_or_create(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let row_id = + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); // Verify it was created with defaults - let header = repo - .get_header(&test_ctx.conn, &test_ctx.site, key) + let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, key) .unwrap() .unwrap(); assert_eq!(header.row_id, row_id); @@ -696,84 +664,75 @@ mod tests { #[rstest] fn test_get_or_create_returns_existing_header(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:draft"; // Create initial header - let first_row_id = repo - .get_or_create(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let first_row_id = + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); // Get or create again should return same rowid - let second_row_id = repo - .get_or_create(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let second_row_id = + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(first_row_id, second_row_id); } #[rstest] fn test_get_items_returns_empty_for_non_existent_list(test_ctx: TestContext) { - let repo = list_metadata_repo(); - let items = repo - .get_items(&test_ctx.conn, &test_ctx.site, "nonexistent:key") - .unwrap(); + let items = + ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, "nonexistent:key") + .unwrap(); assert!(items.is_empty()); } #[rstest] fn test_get_state_returns_none_for_non_existent(test_ctx: TestContext) { - let repo = list_metadata_repo(); - let result = repo.get_state(&test_ctx.conn, RowId(999999)).unwrap(); + let result = ListMetadataRepository::get_state(&test_ctx.conn, RowId(999999)).unwrap(); assert!(result.is_none()); } #[rstest] fn test_get_state_by_key_returns_idle_for_non_existent_list(test_ctx: TestContext) { - let repo = list_metadata_repo(); - let state = repo - .get_state_by_key(&test_ctx.conn, &test_ctx.site, "nonexistent:key") - .unwrap(); + let state = ListMetadataRepository::get_state_by_key( + &test_ctx.conn, + &test_ctx.site, + "nonexistent:key", + ) + .unwrap(); assert_eq!(state, ListState::Idle); } #[rstest] fn test_get_version_returns_zero_for_non_existent_list(test_ctx: TestContext) { - let repo = list_metadata_repo(); - let version = repo - .get_version(&test_ctx.conn, &test_ctx.site, "nonexistent:key") - .unwrap(); + let version = + ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, "nonexistent:key") + .unwrap(); assert_eq!(version, 0); } #[rstest] fn test_check_version_returns_true_for_matching_version(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; // Create header (version = 0) - repo.get_or_create(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); // Check version matches - let matches = repo - .check_version(&test_ctx.conn, &test_ctx.site, key, 0) - .unwrap(); + let matches = + ListMetadataRepository::check_version(&test_ctx.conn, &test_ctx.site, key, 0).unwrap(); assert!(matches); // Check version doesn't match - let matches = repo - .check_version(&test_ctx.conn, &test_ctx.site, key, 1) - .unwrap(); + let matches = + ListMetadataRepository::check_version(&test_ctx.conn, &test_ctx.site, key, 1).unwrap(); assert!(!matches); } #[rstest] fn test_get_item_count_returns_zero_for_empty_list(test_ctx: TestContext) { - let repo = list_metadata_repo(); - let count = repo - .get_item_count(&test_ctx.conn, &test_ctx.site, "empty:list") - .unwrap(); + let count = + ListMetadataRepository::get_item_count(&test_ctx.conn, &test_ctx.site, "empty:list") + .unwrap(); assert_eq!(count, 0); } @@ -823,7 +782,6 @@ mod tests { #[rstest] fn test_set_items_inserts_new_items(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; let items = vec![ @@ -847,10 +805,10 @@ mod tests { }, ]; - repo.set_items(&test_ctx.conn, &test_ctx.site, key, &items) - .unwrap(); + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &items).unwrap(); - let retrieved = repo.get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + let retrieved = + ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(retrieved.len(), 3); assert_eq!(retrieved[0].entity_id, 100); assert_eq!(retrieved[0].parent, Some(50)); @@ -864,7 +822,6 @@ mod tests { #[rstest] fn test_set_items_replaces_existing_items(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:draft"; // Insert initial items @@ -882,7 +839,7 @@ mod tests { menu_order: None, }, ]; - repo.set_items(&test_ctx.conn, &test_ctx.site, key, &initial_items) + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &initial_items) .unwrap(); // Replace with new items @@ -906,10 +863,10 @@ mod tests { menu_order: None, }, ]; - repo.set_items(&test_ctx.conn, &test_ctx.site, key, &new_items) - .unwrap(); + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &new_items).unwrap(); - let retrieved = repo.get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + let retrieved = + ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(retrieved.len(), 3); assert_eq!(retrieved[0].entity_id, 10); assert_eq!(retrieved[1].entity_id, 20); @@ -918,7 +875,6 @@ mod tests { #[rstest] fn test_append_items_adds_to_existing(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:pending"; // Insert initial items @@ -936,7 +892,7 @@ mod tests { menu_order: None, }, ]; - repo.set_items(&test_ctx.conn, &test_ctx.site, key, &initial_items) + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &initial_items) .unwrap(); // Append more items @@ -954,10 +910,11 @@ mod tests { menu_order: None, }, ]; - repo.append_items(&test_ctx.conn, &test_ctx.site, key, &more_items) + ListMetadataRepository::append_items(&test_ctx.conn, &test_ctx.site, key, &more_items) .unwrap(); - let retrieved = repo.get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + let retrieved = + ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(retrieved.len(), 4); assert_eq!(retrieved[0].entity_id, 1); assert_eq!(retrieved[1].entity_id, 2); @@ -967,7 +924,6 @@ mod tests { #[rstest] fn test_update_header_updates_pagination(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; let update = ListMetadataHeaderUpdate { @@ -977,11 +933,10 @@ mod tests { per_page: 20, }; - repo.update_header(&test_ctx.conn, &test_ctx.site, key, &update) + ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, key, &update) .unwrap(); - let header = repo - .get_header(&test_ctx.conn, &test_ctx.site, key) + let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, key) .unwrap() .unwrap(); assert_eq!(header.total_pages, Some(5)); @@ -993,35 +948,43 @@ mod tests { #[rstest] fn test_update_state_creates_new_state(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; - let list_id = repo - .get_or_create(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); - repo.update_state(&test_ctx.conn, list_id, ListState::FetchingFirstPage, None) - .unwrap(); + let list_id = + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::update_state( + &test_ctx.conn, + list_id, + ListState::FetchingFirstPage, + None, + ) + .unwrap(); - let state = repo.get_state(&test_ctx.conn, list_id).unwrap().unwrap(); + let state = ListMetadataRepository::get_state(&test_ctx.conn, list_id) + .unwrap() + .unwrap(); assert_eq!(state.state, ListState::FetchingFirstPage); assert!(state.error_message.is_none()); } #[rstest] fn test_update_state_updates_existing_state(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:draft"; - let list_id = repo - .get_or_create(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let list_id = + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); // Set initial state - repo.update_state(&test_ctx.conn, list_id, ListState::FetchingFirstPage, None) - .unwrap(); + ListMetadataRepository::update_state( + &test_ctx.conn, + list_id, + ListState::FetchingFirstPage, + None, + ) + .unwrap(); // Update to error state - repo.update_state( + ListMetadataRepository::update_state( &test_ctx.conn, list_id, ListState::Error, @@ -1029,17 +992,18 @@ mod tests { ) .unwrap(); - let state = repo.get_state(&test_ctx.conn, list_id).unwrap().unwrap(); + let state = ListMetadataRepository::get_state(&test_ctx.conn, list_id) + .unwrap() + .unwrap(); assert_eq!(state.state, ListState::Error); assert_eq!(state.error_message.as_deref(), Some("Network error")); } #[rstest] fn test_update_state_by_key(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:pending"; - repo.update_state_by_key( + ListMetadataRepository::update_state_by_key( &test_ctx.conn, &test_ctx.site, key, @@ -1048,40 +1012,33 @@ mod tests { ) .unwrap(); - let state = repo - .get_state_by_key(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let state = + ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(state, ListState::FetchingNextPage); } #[rstest] fn test_increment_version(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; // Create header (version starts at 0) - repo.get_or_create(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); - let initial_version = repo - .get_version(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); + let initial_version = + ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(initial_version, 0); // Increment version - let new_version = repo - .increment_version(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let new_version = + ListMetadataRepository::increment_version(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(new_version, 1); // Increment again - let newer_version = repo - .increment_version(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let newer_version = + ListMetadataRepository::increment_version(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(newer_version, 2); // Verify last_first_page_fetched_at is set - let header = repo - .get_header(&test_ctx.conn, &test_ctx.site, key) + let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, key) .unwrap() .unwrap(); assert!(header.last_first_page_fetched_at.is_some()); @@ -1089,56 +1046,49 @@ mod tests { #[rstest] fn test_delete_list_removes_all_data(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; // Create header and add items and state - let list_id = repo - .get_or_create(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let list_id = + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); let items = vec![ListMetadataItemInput { entity_id: 1, modified_gmt: None, parent: None, menu_order: None, }]; - repo.set_items(&test_ctx.conn, &test_ctx.site, key, &items) - .unwrap(); - repo.update_state(&test_ctx.conn, list_id, ListState::Idle, None) + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &items).unwrap(); + ListMetadataRepository::update_state(&test_ctx.conn, list_id, ListState::Idle, None) .unwrap(); // Verify data exists assert!( - repo.get_header(&test_ctx.conn, &test_ctx.site, key) + ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, key) .unwrap() .is_some() ); assert_eq!( - repo.get_item_count(&test_ctx.conn, &test_ctx.site, key) - .unwrap(), + ListMetadataRepository::get_item_count(&test_ctx.conn, &test_ctx.site, key).unwrap(), 1 ); // Delete the list - repo.delete_list(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + ListMetadataRepository::delete_list(&test_ctx.conn, &test_ctx.site, key).unwrap(); // Verify everything is deleted assert!( - repo.get_header(&test_ctx.conn, &test_ctx.site, key) + ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, key) .unwrap() .is_none() ); assert_eq!( - repo.get_item_count(&test_ctx.conn, &test_ctx.site, key) - .unwrap(), + ListMetadataRepository::get_item_count(&test_ctx.conn, &test_ctx.site, key).unwrap(), 0 ); } #[rstest] fn test_items_preserve_order(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:ordered"; // Insert items in specific order @@ -1151,10 +1101,10 @@ mod tests { }) .collect(); - repo.set_items(&test_ctx.conn, &test_ctx.site, key, &items) - .unwrap(); + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &items).unwrap(); - let retrieved = repo.get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + let retrieved = + ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(retrieved.len(), 10); // Verify order is preserved (rowid ordering) @@ -1169,72 +1119,63 @@ mod tests { #[rstest] fn test_begin_refresh_creates_header_and_sets_state(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; - let info = repo - .begin_refresh(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let info = + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); // Verify version was incremented (from 0 to 1) assert_eq!(info.version, 1); assert_eq!(info.per_page, 20); // default // Verify state is FetchingFirstPage - let state = repo - .get_state_by_key(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let state = + ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(state, ListState::FetchingFirstPage); } #[rstest] fn test_begin_refresh_increments_version_each_time(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:draft"; - let info1 = repo - .begin_refresh(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let info1 = + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(info1.version, 1); // Complete the first refresh - repo.complete_sync(&test_ctx.conn, info1.list_metadata_id) - .unwrap(); + ListMetadataRepository::complete_sync(&test_ctx.conn, info1.list_metadata_id).unwrap(); - let info2 = repo - .begin_refresh(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let info2 = + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(info2.version, 2); } #[rstest] fn test_begin_fetch_next_page_returns_none_for_non_existent_list(test_ctx: TestContext) { - let repo = list_metadata_repo(); - - let result = repo - .begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, "nonexistent") - .unwrap(); + let result = ListMetadataRepository::begin_fetch_next_page( + &test_ctx.conn, + &test_ctx.site, + "nonexistent", + ) + .unwrap(); assert!(result.is_none()); } #[rstest] fn test_begin_fetch_next_page_returns_none_when_no_pages_loaded(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; // Create header but don't set current_page - repo.get_or_create(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); - let result = repo - .begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let result = + ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); assert!(result.is_none()); } #[rstest] fn test_begin_fetch_next_page_returns_none_at_last_page(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; // Set up header with current_page = total_pages @@ -1244,18 +1185,17 @@ mod tests { current_page: 3, per_page: 20, }; - repo.update_header(&test_ctx.conn, &test_ctx.site, key, &update) + ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, key, &update) .unwrap(); - let result = repo - .begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let result = + ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); assert!(result.is_none()); } #[rstest] fn test_begin_fetch_next_page_returns_info_when_more_pages(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; // Set up header with more pages available @@ -1265,12 +1205,12 @@ mod tests { current_page: 2, per_page: 20, }; - repo.update_header(&test_ctx.conn, &test_ctx.site, key, &update) + ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, key, &update) .unwrap(); - let result = repo - .begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let result = + ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + .unwrap(); assert!(result.is_some()); let info = result.unwrap(); @@ -1278,42 +1218,38 @@ mod tests { assert_eq!(info.per_page, 20); // Verify state changed to FetchingNextPage - let state = repo - .get_state_by_key(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let state = + ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(state, ListState::FetchingNextPage); } #[rstest] fn test_complete_sync_sets_state_to_idle(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; - let info = repo - .begin_refresh(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); - repo.complete_sync(&test_ctx.conn, info.list_metadata_id) - .unwrap(); + let info = + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::complete_sync(&test_ctx.conn, info.list_metadata_id).unwrap(); - let state = repo - .get_state_by_key(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let state = + ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(state, ListState::Idle); } #[rstest] fn test_complete_sync_with_error_sets_state_and_message(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; - let info = repo - .begin_refresh(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); - repo.complete_sync_with_error(&test_ctx.conn, info.list_metadata_id, "Network timeout") - .unwrap(); + let info = + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::complete_sync_with_error( + &test_ctx.conn, + info.list_metadata_id, + "Network timeout", + ) + .unwrap(); - let state_record = repo - .get_state(&test_ctx.conn, info.list_metadata_id) + let state_record = ListMetadataRepository::get_state(&test_ctx.conn, info.list_metadata_id) .unwrap() .unwrap(); assert_eq!(state_record.state, ListState::Error); @@ -1325,13 +1261,11 @@ mod tests { #[rstest] fn test_version_check_detects_stale_operation(test_ctx: TestContext) { - let repo = list_metadata_repo(); let key = "edit:posts:publish"; // Start a refresh (version becomes 1) - let refresh_info = repo - .begin_refresh(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + let refresh_info = + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); assert_eq!(refresh_info.version, 1); // Update header to simulate page 1 loaded @@ -1341,26 +1275,29 @@ mod tests { current_page: 1, per_page: 20, }; - repo.update_header(&test_ctx.conn, &test_ctx.site, key, &update) + ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, key, &update) .unwrap(); - repo.complete_sync(&test_ctx.conn, refresh_info.list_metadata_id) + ListMetadataRepository::complete_sync(&test_ctx.conn, refresh_info.list_metadata_id) .unwrap(); // Start load-next-page (captures version = 1) - let next_page_info = repo - .begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) - .unwrap() - .unwrap(); + let next_page_info = + ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + .unwrap() + .unwrap(); let captured_version = next_page_info.version; // Another refresh happens (version becomes 2) - repo.begin_refresh(&test_ctx.conn, &test_ctx.site, key) - .unwrap(); + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); // Version check should fail (stale) - let is_valid = repo - .check_version(&test_ctx.conn, &test_ctx.site, key, captured_version) - .unwrap(); + let is_valid = ListMetadataRepository::check_version( + &test_ctx.conn, + &test_ctx.site, + key, + captured_version, + ) + .unwrap(); assert!(!is_valid, "Version should not match after refresh"); } } From 4b1e6cb85a19224bd1b24f4667461773549b372f Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 12:07:04 -0500 Subject: [PATCH 05/25] Use batch insert for list metadata items Replaces individual INSERT statements with a single batch INSERT for better performance when inserting multiple items. Uses functional style with try_for_each and flat_map. Items are chunked to stay under SQLite's variable limit (999). --- .../src/repository/list_metadata.rs | 53 +++++++++++-------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index b0bb58654..9a03f01c7 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -255,7 +255,7 @@ impl ListMetadataRepository { Self::insert_items(executor, site, key, items) } - /// Internal helper to insert items. + /// Internal helper to insert items using batch insert for better performance. fn insert_items( executor: &impl QueryExecutor, site: &DbSite, @@ -266,26 +266,37 @@ impl ListMetadataRepository { return Ok(()); } - let insert_sql = format!( - "INSERT INTO {} (db_site_id, key, entity_id, modified_gmt, parent, menu_order) VALUES (?, ?, ?, ?, ?, ?)", - Self::items_table().table_name() - ); - - for item in items { - executor.execute( - &insert_sql, - rusqlite::params![ - site.row_id, - key, - item.entity_id, - item.modified_gmt, - item.parent, - item.menu_order - ], - )?; - } - - Ok(()) + // SQLite has a variable limit (default 999). Each item uses 6 variables, + // so batch in chunks of ~150 items to stay well under the limit. + const BATCH_SIZE: usize = 150; + + items.chunks(BATCH_SIZE).try_for_each(|chunk| { + let placeholders = vec!["(?, ?, ?, ?, ?, ?)"; chunk.len()].join(", "); + let sql = format!( + "INSERT INTO {} (db_site_id, key, entity_id, modified_gmt, parent, menu_order) VALUES {}", + Self::items_table().table_name(), + placeholders + ); + + let params: Vec> = chunk + .iter() + .flat_map(|item| -> [Box; 6] { + [ + Box::new(site.row_id), + Box::new(key.to_string()), + Box::new(item.entity_id), + Box::new(item.modified_gmt.clone()), + Box::new(item.parent), + Box::new(item.menu_order), + ] + }) + .collect(); + + let param_refs: Vec<&dyn rusqlite::ToSql> = + params.iter().map(|p| p.as_ref()).collect(); + executor.execute(&sql, param_refs.as_slice())?; + Ok(()) + }) } /// Update header pagination info. From b271746b0cb4930970a84339a5809fd490e59505 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 12:22:40 -0500 Subject: [PATCH 06/25] Use JOIN query internally in get_state_by_key Replaces two separate queries (get_header + get_state) with a single JOIN query via get_header_with_state for better efficiency. --- wp_mobile_cache/src/repository/list_metadata.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 9a03f01c7..f29eb9eb4 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -126,21 +126,15 @@ impl ListMetadataRepository { /// Get the current sync state for a list by site and key. /// - /// Convenience method that looks up the list_metadata_id first. + /// Uses a JOIN query internally for efficiency. /// Returns ListState::Idle if the list or state doesn't exist. pub fn get_state_by_key( executor: &impl QueryExecutor, site: &DbSite, key: &str, ) -> Result { - let header = Self::get_header(executor, site, key)?; - match header { - Some(h) => { - let state = Self::get_state(executor, h.row_id)?; - Ok(state.map(|s| s.state).unwrap_or(ListState::Idle)) - } - None => Ok(ListState::Idle), - } + Self::get_header_with_state(executor, site, key) + .map(|opt| opt.map(|h| h.state).unwrap_or(ListState::Idle)) } /// Get header with state in a single JOIN query. From 6fc0eb068f8cfe09befaef8a738f3c914e79ba8f Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 12:31:53 -0500 Subject: [PATCH 07/25] Add ListKey newtype for type-safe list key handling Replaces raw `&str` parameters with `&ListKey` throughout the repository API. This prevents accidental misuse of arbitrary strings as list keys and makes the API more self-documenting. The ListKey type provides: - Type safety at compile time - Conversion from &str and String via From trait - as_str() for SQL parameter usage - Display impl for debug output --- wp_mobile_cache/src/list_metadata.rs | 44 +++ .../src/repository/list_metadata.rs | 273 ++++++++++-------- 2 files changed, 194 insertions(+), 123 deletions(-) diff --git a/wp_mobile_cache/src/list_metadata.rs b/wp_mobile_cache/src/list_metadata.rs index 97c757f13..6a88a31a0 100644 --- a/wp_mobile_cache/src/list_metadata.rs +++ b/wp_mobile_cache/src/list_metadata.rs @@ -1,5 +1,49 @@ use crate::RowId; use rusqlite::types::{FromSql, FromSqlResult, ToSql, ToSqlOutput}; +use std::fmt; + +/// Type-safe wrapper for list keys. +/// +/// List keys identify specific lists, e.g., `"edit:posts:publish"` or `"view:comments"`. +/// Using a newtype prevents accidental misuse of arbitrary strings as keys. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct ListKey(String); + +impl ListKey { + /// Create a new ListKey from a string. + pub fn new(key: impl Into) -> Self { + Self(key.into()) + } + + /// Get the key as a string slice. + pub fn as_str(&self) -> &str { + &self.0 + } +} + +impl AsRef for ListKey { + fn as_ref(&self) -> &str { + &self.0 + } +} + +impl fmt::Display for ListKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From<&str> for ListKey { + fn from(s: &str) -> Self { + Self(s.to_string()) + } +} + +impl From for ListKey { + fn from(s: String) -> Self { + Self(s) + } +} /// Represents list metadata header in the database. /// diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index f29eb9eb4..fbf879706 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -2,7 +2,8 @@ use crate::{ DbTable, RowId, SqliteDbError, db_types::db_site::DbSite, list_metadata::{ - DbListHeaderWithState, DbListMetadata, DbListMetadataItem, DbListMetadataState, ListState, + DbListHeaderWithState, DbListMetadata, DbListMetadataItem, DbListMetadataState, ListKey, + ListState, }, repository::QueryExecutor, }; @@ -39,14 +40,14 @@ impl ListMetadataRepository { pub fn get_header( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result, SqliteDbError> { let sql = format!( "SELECT * FROM {} WHERE db_site_id = ? AND key = ?", Self::header_table().table_name() ); let mut stmt = executor.prepare(&sql)?; - let mut rows = stmt.query_map(rusqlite::params![site.row_id, key], |row| { + let mut rows = stmt.query_map(rusqlite::params![site.row_id, key.as_str()], |row| { DbListMetadata::from_row(row) .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e))) })?; @@ -64,7 +65,7 @@ impl ListMetadataRepository { pub fn get_or_create( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result { // Try to get existing if let Some(header) = Self::get_header(executor, site, key)? { @@ -76,7 +77,7 @@ impl ListMetadataRepository { "INSERT INTO {} (db_site_id, key, current_page, per_page, version) VALUES (?, ?, 0, 20, 0)", Self::header_table().table_name() ); - executor.execute(&sql, rusqlite::params![site.row_id, key])?; + executor.execute(&sql, rusqlite::params![site.row_id, key.as_str()])?; Ok(executor.last_insert_rowid()) } @@ -85,14 +86,14 @@ impl ListMetadataRepository { pub fn get_items( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result, SqliteDbError> { let sql = format!( "SELECT * FROM {} WHERE db_site_id = ? AND key = ? ORDER BY rowid", Self::items_table().table_name() ); let mut stmt = executor.prepare(&sql)?; - let rows = stmt.query_map(rusqlite::params![site.row_id, key], |row| { + let rows = stmt.query_map(rusqlite::params![site.row_id, key.as_str()], |row| { DbListMetadataItem::from_row(row) .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e))) })?; @@ -131,7 +132,7 @@ impl ListMetadataRepository { pub fn get_state_by_key( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result { Self::get_header_with_state(executor, site, key) .map(|opt| opt.map(|h| h.state).unwrap_or(ListState::Idle)) @@ -146,7 +147,7 @@ impl ListMetadataRepository { pub fn get_header_with_state( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result, SqliteDbError> { let sql = format!( "SELECT m.total_pages, m.total_items, m.current_page, m.per_page, s.state, s.error_message \ @@ -157,7 +158,7 @@ impl ListMetadataRepository { Self::state_table().table_name() ); let mut stmt = executor.prepare(&sql)?; - let mut rows = stmt.query_map(rusqlite::params![site.row_id, key], |row| { + let mut rows = stmt.query_map(rusqlite::params![site.row_id, key.as_str()], |row| { DbListHeaderWithState::from_row(row) .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e))) })?; @@ -174,7 +175,7 @@ impl ListMetadataRepository { pub fn get_version( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result { let header = Self::get_header(executor, site, key)?; Ok(header.map(|h| h.version).unwrap_or(0)) @@ -187,7 +188,7 @@ impl ListMetadataRepository { pub fn check_version( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, expected_version: i64, ) -> Result { let current_version = Self::get_version(executor, site, key)?; @@ -198,15 +199,17 @@ impl ListMetadataRepository { pub fn get_item_count( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result { let sql = format!( "SELECT COUNT(*) FROM {} WHERE db_site_id = ? AND key = ?", Self::items_table().table_name() ); let mut stmt = executor.prepare(&sql)?; - stmt.query_row(rusqlite::params![site.row_id, key], |row| row.get(0)) - .map_err(SqliteDbError::from) + stmt.query_row(rusqlite::params![site.row_id, key.as_str()], |row| { + row.get(0) + }) + .map_err(SqliteDbError::from) } // ============================================================ @@ -220,7 +223,7 @@ impl ListMetadataRepository { pub fn set_items( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { // Delete existing items @@ -228,7 +231,7 @@ impl ListMetadataRepository { "DELETE FROM {} WHERE db_site_id = ? AND key = ?", Self::items_table().table_name() ); - executor.execute(&delete_sql, rusqlite::params![site.row_id, key])?; + executor.execute(&delete_sql, rusqlite::params![site.row_id, key.as_str()])?; // Insert new items Self::insert_items(executor, site, key, items)?; @@ -243,7 +246,7 @@ impl ListMetadataRepository { pub fn append_items( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { Self::insert_items(executor, site, key, items) @@ -253,7 +256,7 @@ impl ListMetadataRepository { fn insert_items( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { if items.is_empty() { @@ -277,7 +280,7 @@ impl ListMetadataRepository { .flat_map(|item| -> [Box; 6] { [ Box::new(site.row_id), - Box::new(key.to_string()), + Box::new(key.as_str().to_string()), Box::new(item.entity_id), Box::new(item.modified_gmt.clone()), Box::new(item.parent), @@ -297,7 +300,7 @@ impl ListMetadataRepository { pub fn update_header( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, update: &ListMetadataHeaderUpdate, ) -> Result<(), SqliteDbError> { // Ensure header exists @@ -316,7 +319,7 @@ impl ListMetadataRepository { update.current_page, update.per_page, site.row_id, - key + key.as_str() ], )?; @@ -353,7 +356,7 @@ impl ListMetadataRepository { pub fn update_state_by_key( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, state: ListState, error_message: Option<&str>, ) -> Result<(), SqliteDbError> { @@ -367,7 +370,7 @@ impl ListMetadataRepository { pub fn increment_version( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result { // Ensure header exists Self::get_or_create(executor, site, key)?; @@ -377,7 +380,7 @@ impl ListMetadataRepository { Self::header_table().table_name() ); - executor.execute(&sql, rusqlite::params![site.row_id, key])?; + executor.execute(&sql, rusqlite::params![site.row_id, key.as_str()])?; // Return the new version Self::get_version(executor, site, key) @@ -387,21 +390,27 @@ impl ListMetadataRepository { pub fn delete_list( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result<(), SqliteDbError> { // Delete items first (no FK constraint to header) let delete_items_sql = format!( "DELETE FROM {} WHERE db_site_id = ? AND key = ?", Self::items_table().table_name() ); - executor.execute(&delete_items_sql, rusqlite::params![site.row_id, key])?; + executor.execute( + &delete_items_sql, + rusqlite::params![site.row_id, key.as_str()], + )?; // Delete header (state will be cascade deleted via FK) let delete_header_sql = format!( "DELETE FROM {} WHERE db_site_id = ? AND key = ?", Self::header_table().table_name() ); - executor.execute(&delete_header_sql, rusqlite::params![site.row_id, key])?; + executor.execute( + &delete_header_sql, + rusqlite::params![site.row_id, key.as_str()], + )?; Ok(()) } @@ -422,7 +431,7 @@ impl ListMetadataRepository { pub fn begin_refresh( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result { // Ensure header exists and get its ID let list_metadata_id = Self::get_or_create(executor, site, key)?; @@ -461,7 +470,7 @@ impl ListMetadataRepository { pub fn begin_fetch_next_page( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result, SqliteDbError> { let header = match Self::get_header(executor, site, key)? { Some(h) => h, @@ -529,7 +538,7 @@ impl ListMetadataRepository { pub fn get_list_metadata_id( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, ) -> Result, SqliteDbError> { Self::get_header(executor, site, key).map(|opt| opt.map(|h| h.row_id)) } @@ -564,7 +573,7 @@ impl ListMetadataRepository { pub fn is_item_row_for_key( executor: &impl QueryExecutor, site: &DbSite, - key: &str, + key: &ListKey, item_row_id: RowId, ) -> Result { let sql = format!( @@ -572,7 +581,10 @@ impl ListMetadataRepository { Self::items_table().table_name() ); let mut stmt = executor.prepare(&sql)?; - let result = stmt.query_row(rusqlite::params![item_row_id, site.row_id, key], |_| Ok(())); + let result = stmt.query_row( + rusqlite::params![item_row_id, site.row_id, key.as_str()], + |_| Ok(()), + ); match result { Ok(()) => Ok(true), @@ -640,26 +652,29 @@ mod tests { #[rstest] fn test_get_header_returns_none_for_non_existent(test_ctx: TestContext) { - let result = - ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, "nonexistent:key") - .unwrap(); + let result = ListMetadataRepository::get_header( + &test_ctx.conn, + &test_ctx.site, + &ListKey::from("nonexistent:key"), + ) + .unwrap(); assert!(result.is_none()); } #[rstest] fn test_get_or_create_creates_new_header(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); // Create new header let row_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); // Verify it was created with defaults - let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, key) + let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) .unwrap() .unwrap(); assert_eq!(header.row_id, row_id); - assert_eq!(header.key, key); + assert_eq!(header.key, key.as_str()); assert_eq!(header.current_page, 0); assert_eq!(header.per_page, 20); assert_eq!(header.version, 0); @@ -669,24 +684,27 @@ mod tests { #[rstest] fn test_get_or_create_returns_existing_header(test_ctx: TestContext) { - let key = "edit:posts:draft"; + let key = ListKey::from("edit:posts:draft"); // Create initial header let first_row_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); // Get or create again should return same rowid let second_row_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(first_row_id, second_row_id); } #[rstest] fn test_get_items_returns_empty_for_non_existent_list(test_ctx: TestContext) { - let items = - ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, "nonexistent:key") - .unwrap(); + let items = ListMetadataRepository::get_items( + &test_ctx.conn, + &test_ctx.site, + &ListKey::from("nonexistent:key"), + ) + .unwrap(); assert!(items.is_empty()); } @@ -701,7 +719,7 @@ mod tests { let state = ListMetadataRepository::get_state_by_key( &test_ctx.conn, &test_ctx.site, - "nonexistent:key", + &ListKey::from("nonexistent:key"), ) .unwrap(); assert_eq!(state, ListState::Idle); @@ -709,35 +727,41 @@ mod tests { #[rstest] fn test_get_version_returns_zero_for_non_existent_list(test_ctx: TestContext) { - let version = - ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, "nonexistent:key") - .unwrap(); + let version = ListMetadataRepository::get_version( + &test_ctx.conn, + &test_ctx.site, + &ListKey::from("nonexistent:key"), + ) + .unwrap(); assert_eq!(version, 0); } #[rstest] fn test_check_version_returns_true_for_matching_version(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); // Create header (version = 0) - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); // Check version matches let matches = - ListMetadataRepository::check_version(&test_ctx.conn, &test_ctx.site, key, 0).unwrap(); + ListMetadataRepository::check_version(&test_ctx.conn, &test_ctx.site, &key, 0).unwrap(); assert!(matches); // Check version doesn't match let matches = - ListMetadataRepository::check_version(&test_ctx.conn, &test_ctx.site, key, 1).unwrap(); + ListMetadataRepository::check_version(&test_ctx.conn, &test_ctx.site, &key, 1).unwrap(); assert!(!matches); } #[rstest] fn test_get_item_count_returns_zero_for_empty_list(test_ctx: TestContext) { - let count = - ListMetadataRepository::get_item_count(&test_ctx.conn, &test_ctx.site, "empty:list") - .unwrap(); + let count = ListMetadataRepository::get_item_count( + &test_ctx.conn, + &test_ctx.site, + &ListKey::from("empty:list"), + ) + .unwrap(); assert_eq!(count, 0); } @@ -787,7 +811,7 @@ mod tests { #[rstest] fn test_set_items_inserts_new_items(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); let items = vec![ ListMetadataItemInput { @@ -810,10 +834,10 @@ mod tests { }, ]; - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &items).unwrap(); + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); let retrieved = - ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(retrieved.len(), 3); assert_eq!(retrieved[0].entity_id, 100); assert_eq!(retrieved[0].parent, Some(50)); @@ -827,7 +851,7 @@ mod tests { #[rstest] fn test_set_items_replaces_existing_items(test_ctx: TestContext) { - let key = "edit:posts:draft"; + let key = ListKey::from("edit:posts:draft"); // Insert initial items let initial_items = vec![ @@ -844,7 +868,7 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &initial_items) + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &initial_items) .unwrap(); // Replace with new items @@ -868,10 +892,11 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &new_items).unwrap(); + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &new_items) + .unwrap(); let retrieved = - ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(retrieved.len(), 3); assert_eq!(retrieved[0].entity_id, 10); assert_eq!(retrieved[1].entity_id, 20); @@ -880,7 +905,7 @@ mod tests { #[rstest] fn test_append_items_adds_to_existing(test_ctx: TestContext) { - let key = "edit:posts:pending"; + let key = ListKey::from("edit:posts:pending"); // Insert initial items let initial_items = vec![ @@ -897,7 +922,7 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &initial_items) + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &initial_items) .unwrap(); // Append more items @@ -915,11 +940,11 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::append_items(&test_ctx.conn, &test_ctx.site, key, &more_items) + ListMetadataRepository::append_items(&test_ctx.conn, &test_ctx.site, &key, &more_items) .unwrap(); let retrieved = - ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(retrieved.len(), 4); assert_eq!(retrieved[0].entity_id, 1); assert_eq!(retrieved[1].entity_id, 2); @@ -929,7 +954,7 @@ mod tests { #[rstest] fn test_update_header_updates_pagination(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); let update = ListMetadataHeaderUpdate { total_pages: Some(5), @@ -938,10 +963,10 @@ mod tests { per_page: 20, }; - ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, key, &update) + ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, &key, &update) .unwrap(); - let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, key) + let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) .unwrap() .unwrap(); assert_eq!(header.total_pages, Some(5)); @@ -953,10 +978,10 @@ mod tests { #[rstest] fn test_update_state_creates_new_state(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); let list_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); ListMetadataRepository::update_state( &test_ctx.conn, list_id, @@ -974,10 +999,10 @@ mod tests { #[rstest] fn test_update_state_updates_existing_state(test_ctx: TestContext) { - let key = "edit:posts:draft"; + let key = ListKey::from("edit:posts:draft"); let list_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); // Set initial state ListMetadataRepository::update_state( @@ -1006,44 +1031,46 @@ mod tests { #[rstest] fn test_update_state_by_key(test_ctx: TestContext) { - let key = "edit:posts:pending"; + let key = ListKey::from("edit:posts:pending"); ListMetadataRepository::update_state_by_key( &test_ctx.conn, &test_ctx.site, - key, + &key, ListState::FetchingNextPage, None, ) .unwrap(); let state = - ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(state, ListState::FetchingNextPage); } #[rstest] fn test_increment_version(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); // Create header (version starts at 0) - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); let initial_version = - ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(initial_version, 0); // Increment version let new_version = - ListMetadataRepository::increment_version(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::increment_version(&test_ctx.conn, &test_ctx.site, &key) + .unwrap(); assert_eq!(new_version, 1); // Increment again let newer_version = - ListMetadataRepository::increment_version(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::increment_version(&test_ctx.conn, &test_ctx.site, &key) + .unwrap(); assert_eq!(newer_version, 2); // Verify last_first_page_fetched_at is set - let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, key) + let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) .unwrap() .unwrap(); assert!(header.last_first_page_fetched_at.is_some()); @@ -1051,50 +1078,50 @@ mod tests { #[rstest] fn test_delete_list_removes_all_data(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); // Create header and add items and state let list_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); let items = vec![ListMetadataItemInput { entity_id: 1, modified_gmt: None, parent: None, menu_order: None, }]; - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &items).unwrap(); + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); ListMetadataRepository::update_state(&test_ctx.conn, list_id, ListState::Idle, None) .unwrap(); // Verify data exists assert!( - ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, key) + ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) .unwrap() .is_some() ); assert_eq!( - ListMetadataRepository::get_item_count(&test_ctx.conn, &test_ctx.site, key).unwrap(), + ListMetadataRepository::get_item_count(&test_ctx.conn, &test_ctx.site, &key).unwrap(), 1 ); // Delete the list - ListMetadataRepository::delete_list(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::delete_list(&test_ctx.conn, &test_ctx.site, &key).unwrap(); // Verify everything is deleted assert!( - ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, key) + ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) .unwrap() .is_none() ); assert_eq!( - ListMetadataRepository::get_item_count(&test_ctx.conn, &test_ctx.site, key).unwrap(), + ListMetadataRepository::get_item_count(&test_ctx.conn, &test_ctx.site, &key).unwrap(), 0 ); } #[rstest] fn test_items_preserve_order(test_ctx: TestContext) { - let key = "edit:posts:ordered"; + let key = ListKey::from("edit:posts:ordered"); // Insert items in specific order let items: Vec = (1..=10) @@ -1106,10 +1133,10 @@ mod tests { }) .collect(); - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, key, &items).unwrap(); + ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); let retrieved = - ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(retrieved.len(), 10); // Verify order is preserved (rowid ordering) @@ -1124,10 +1151,10 @@ mod tests { #[rstest] fn test_begin_refresh_creates_header_and_sets_state(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); let info = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); // Verify version was incremented (from 0 to 1) assert_eq!(info.version, 1); @@ -1135,23 +1162,23 @@ mod tests { // Verify state is FetchingFirstPage let state = - ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(state, ListState::FetchingFirstPage); } #[rstest] fn test_begin_refresh_increments_version_each_time(test_ctx: TestContext) { - let key = "edit:posts:draft"; + let key = ListKey::from("edit:posts:draft"); let info1 = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(info1.version, 1); // Complete the first refresh ListMetadataRepository::complete_sync(&test_ctx.conn, info1.list_metadata_id).unwrap(); let info2 = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(info2.version, 2); } @@ -1160,7 +1187,7 @@ mod tests { let result = ListMetadataRepository::begin_fetch_next_page( &test_ctx.conn, &test_ctx.site, - "nonexistent", + &ListKey::from("nonexistent"), ) .unwrap(); assert!(result.is_none()); @@ -1168,20 +1195,20 @@ mod tests { #[rstest] fn test_begin_fetch_next_page_returns_none_when_no_pages_loaded(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); // Create header but don't set current_page - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); let result = - ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) .unwrap(); assert!(result.is_none()); } #[rstest] fn test_begin_fetch_next_page_returns_none_at_last_page(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); // Set up header with current_page = total_pages let update = ListMetadataHeaderUpdate { @@ -1190,18 +1217,18 @@ mod tests { current_page: 3, per_page: 20, }; - ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, key, &update) + ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, &key, &update) .unwrap(); let result = - ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) .unwrap(); assert!(result.is_none()); } #[rstest] fn test_begin_fetch_next_page_returns_info_when_more_pages(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); // Set up header with more pages available let update = ListMetadataHeaderUpdate { @@ -1210,11 +1237,11 @@ mod tests { current_page: 2, per_page: 20, }; - ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, key, &update) + ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, &key, &update) .unwrap(); let result = - ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) .unwrap(); assert!(result.is_some()); @@ -1224,29 +1251,29 @@ mod tests { // Verify state changed to FetchingNextPage let state = - ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(state, ListState::FetchingNextPage); } #[rstest] fn test_complete_sync_sets_state_to_idle(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); let info = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); ListMetadataRepository::complete_sync(&test_ctx.conn, info.list_metadata_id).unwrap(); let state = - ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(state, ListState::Idle); } #[rstest] fn test_complete_sync_with_error_sets_state_and_message(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); let info = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); ListMetadataRepository::complete_sync_with_error( &test_ctx.conn, info.list_metadata_id, @@ -1266,11 +1293,11 @@ mod tests { #[rstest] fn test_version_check_detects_stale_operation(test_ctx: TestContext) { - let key = "edit:posts:publish"; + let key = ListKey::from("edit:posts:publish"); // Start a refresh (version becomes 1) let refresh_info = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(refresh_info.version, 1); // Update header to simulate page 1 loaded @@ -1280,26 +1307,26 @@ mod tests { current_page: 1, per_page: 20, }; - ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, key, &update) + ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, &key, &update) .unwrap(); ListMetadataRepository::complete_sync(&test_ctx.conn, refresh_info.list_metadata_id) .unwrap(); // Start load-next-page (captures version = 1) let next_page_info = - ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, key) + ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) .unwrap() .unwrap(); let captured_version = next_page_info.version; // Another refresh happens (version becomes 2) - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, key).unwrap(); + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); // Version check should fail (stale) let is_valid = ListMetadataRepository::check_version( &test_ctx.conn, &test_ctx.site, - key, + &key, captured_version, ) .unwrap(); From 4e307dc1f16d815910d6cfcd921352a08f6e87db Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 12:37:40 -0500 Subject: [PATCH 08/25] Simplify reset_stale_fetching_states and return Result - Condense doc comment, remove references to non-existent MetadataService - Return Result instead of logging internally - Ignore errors at call site with explanatory comment --- wp_mobile_cache/src/lib.rs | 99 ++++++++------------------------------ 1 file changed, 21 insertions(+), 78 deletions(-) diff --git a/wp_mobile_cache/src/lib.rs b/wp_mobile_cache/src/lib.rs index f5519874c..25f2e04bc 100644 --- a/wp_mobile_cache/src/lib.rs +++ b/wp_mobile_cache/src/lib.rs @@ -265,8 +265,9 @@ impl WpApiCache { let version = mgr.perform_migrations().map_err(SqliteDbError::from)?; // Reset stale fetching states after migrations complete. - // See `reset_stale_fetching_states` for rationale. - Self::reset_stale_fetching_states_internal(connection); + // Errors are ignored: this is a best-effort cleanup, and failure doesn't + // affect core functionality (worst case: UI shows stale loading state). + let _ = Self::reset_stale_fetching_states_internal(connection); Ok(version) }) @@ -309,85 +310,27 @@ impl WpApiCache { impl WpApiCache { /// Resets stale fetching states (`FetchingFirstPage`, `FetchingNextPage`) to `Idle`. /// - /// # Why This Is Needed + /// If the app terminates while a fetch is in progress, these transient states persist + /// in the database. On next launch, this could cause perpetual loading indicators or + /// blocked fetches. We reset them during `WpApiCache` initialization since it's + /// typically created once at app startup. /// - /// The `ListState` enum includes transient states that represent in-progress operations: - /// - `FetchingFirstPage` - A refresh/pull-to-refresh is in progress - /// - `FetchingNextPage` - A "load more" pagination fetch is in progress - /// - /// If the app is killed, crashes, or the process terminates while a fetch is in progress, - /// these states will persist in the database. On the next app launch: - /// - UI might show a perpetual loading indicator - /// - New fetch attempts might be blocked if code checks "already fetching" - /// - State is inconsistent with reality (no fetch is actually in progress) - /// - /// # Why We Reset in `WpApiCache` Initialization - /// - /// We considered several approaches: - /// - /// 1. **Reset in `MetadataService::new()`** - Rejected because `MetadataService` is not - /// a singleton. Multiple services (PostService, CommentService, etc.) each create - /// their own `MetadataService` instance. Resetting on each instantiation would - /// incorrectly reset states when a new service is created mid-session. - /// - /// 2. **Reset in `WpApiCache` initialization** (this approach) - Chosen because - /// `WpApiCache` is typically created once at app startup, making it the right - /// timing for session-boundary cleanup. - /// - /// 3. **Session tokens** - More complex: tag states with a session ID and treat - /// mismatched sessions as stale. Adds schema complexity for minimal benefit. - /// - /// 4. **In-memory only for fetching states** - Keep transient states in memory, - /// only persist `Idle`/`Error`. Adds complexity in state management. - /// - /// # Theoretical Issues - /// - /// This approach assumes `WpApiCache` is created once per app session. If an app - /// architecture creates multiple `WpApiCache` instances during a session (e.g., - /// recreating it after a user logs out and back in), this would reset in-progress - /// fetches. In practice: - /// - Most apps create `WpApiCache` once at startup - /// - If your architecture differs, consider wrapping this in a "first launch" check - /// or using a session token approach - /// - /// # Note on `Error` State - /// - /// We intentionally do NOT reset `Error` states. These represent completed (failed) - /// operations, not in-progress ones. Preserving them allows: - /// - UI to show "last sync failed" on launch - /// - Debugging by inspecting `error_message` - /// - /// If you need a fresh start, the user can trigger a refresh which will overwrite - /// the error state. - fn reset_stale_fetching_states_internal(connection: &mut Connection) { + /// Note: `Error` states are intentionally preserved for UI feedback and debugging. + fn reset_stale_fetching_states_internal( + connection: &mut Connection, + ) -> Result { use crate::list_metadata::ListState; - // Reset both fetching states to idle - let result = connection.execute( - "UPDATE list_metadata_state SET state = ?1 WHERE state IN (?2, ?3)", - params![ - ListState::Idle as i32, - ListState::FetchingFirstPage as i32, - ListState::FetchingNextPage as i32, - ], - ); - - match result { - Ok(count) if count > 0 => { - eprintln!( - "WpApiCache: Reset {} stale fetching state(s) from previous session", - count - ); - } - Ok(_) => { - // No stale states found - normal case - } - Err(e) => { - // Log but don't fail - table might not exist yet on fresh install - // (though we run this after migrations, so it should exist) - eprintln!("WpApiCache: Failed to reset stale fetching states: {}", e); - } - } + connection + .execute( + "UPDATE list_metadata_state SET state = ?1 WHERE state IN (?2, ?3)", + params![ + ListState::Idle as i32, + ListState::FetchingFirstPage as i32, + ListState::FetchingNextPage as i32, + ], + ) + .map_err(SqliteDbError::from) } /// Execute a database operation with scoped access to the connection. From 94e05aae587a875776a184a9e868dd8b4b1399ff Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 12:47:44 -0500 Subject: [PATCH 09/25] Add FK from list_metadata_items to list_metadata Normalizes the schema by replacing (db_site_id, key) in items table with a foreign key to list_metadata. This: - Ensures referential integrity - Enables cascade delete (simplifies delete_list) - Reduces storage per item --- .../0007-create-list-metadata-tables.sql | 9 +-- .../src/db_types/db_list_metadata.rs | 14 ++-- wp_mobile_cache/src/list_metadata.rs | 6 +- .../src/repository/list_metadata.rs | 79 +++++++++---------- 4 files changed, 51 insertions(+), 57 deletions(-) diff --git a/wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql b/wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql index f51d9f43c..71b70fa14 100644 --- a/wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql +++ b/wp_mobile_cache/migrations/0007-create-list-metadata-tables.sql @@ -19,18 +19,17 @@ CREATE UNIQUE INDEX idx_list_metadata_unique_key ON list_metadata(db_site_id, ke -- Table 2: List items (rowid = insertion order = display order) CREATE TABLE `list_metadata_items` ( `rowid` INTEGER PRIMARY KEY AUTOINCREMENT, - `db_site_id` INTEGER NOT NULL, - `key` TEXT NOT NULL, + `list_metadata_id` INTEGER NOT NULL, `entity_id` INTEGER NOT NULL, -- post/comment/etc ID `modified_gmt` TEXT, -- nullable for entities without it `parent` INTEGER, -- parent post ID (for hierarchical post types like pages) `menu_order` INTEGER, -- menu order (for hierarchical post types) - FOREIGN KEY (db_site_id) REFERENCES db_sites(id) ON DELETE CASCADE + FOREIGN KEY (list_metadata_id) REFERENCES list_metadata(rowid) ON DELETE CASCADE ) STRICT; -CREATE INDEX idx_list_metadata_items_key ON list_metadata_items(db_site_id, key); -CREATE INDEX idx_list_metadata_items_entity ON list_metadata_items(db_site_id, entity_id); +CREATE INDEX idx_list_metadata_items_list ON list_metadata_items(list_metadata_id); +CREATE INDEX idx_list_metadata_items_entity ON list_metadata_items(list_metadata_id, entity_id); -- Table 3: Sync state (FK to list_metadata, not duplicating key) CREATE TABLE `list_metadata_state` ( diff --git a/wp_mobile_cache/src/db_types/db_list_metadata.rs b/wp_mobile_cache/src/db_types/db_list_metadata.rs index bb448b17e..71915d9fb 100644 --- a/wp_mobile_cache/src/db_types/db_list_metadata.rs +++ b/wp_mobile_cache/src/db_types/db_list_metadata.rs @@ -56,12 +56,11 @@ impl DbListMetadata { #[derive(Debug, Clone, Copy)] pub enum ListMetadataItemColumn { Rowid = 0, - DbSiteId = 1, - Key = 2, - EntityId = 3, - ModifiedGmt = 4, - Parent = 5, - MenuOrder = 6, + ListMetadataId = 1, + EntityId = 2, + ModifiedGmt = 3, + Parent = 4, + MenuOrder = 5, } impl ColumnIndex for ListMetadataItemColumn { @@ -77,8 +76,7 @@ impl DbListMetadataItem { Ok(Self { row_id: row.get_column(Col::Rowid)?, - db_site_id: row.get_column(Col::DbSiteId)?, - key: row.get_column(Col::Key)?, + list_metadata_id: row.get_column(Col::ListMetadataId)?, entity_id: row.get_column(Col::EntityId)?, modified_gmt: row.get_column(Col::ModifiedGmt)?, parent: row.get_column(Col::Parent)?, diff --git a/wp_mobile_cache/src/list_metadata.rs b/wp_mobile_cache/src/list_metadata.rs index 6a88a31a0..cb23fd5b6 100644 --- a/wp_mobile_cache/src/list_metadata.rs +++ b/wp_mobile_cache/src/list_metadata.rs @@ -79,10 +79,8 @@ pub struct DbListMetadata { pub struct DbListMetadataItem { /// SQLite rowid (determines display order) pub row_id: RowId, - /// Database site ID - pub db_site_id: RowId, - /// List key this item belongs to - pub key: String, + /// Foreign key to list_metadata table + pub list_metadata_id: RowId, /// Entity ID (post ID, comment ID, etc.) pub entity_id: i64, /// Last modified timestamp (for staleness detection) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index fbf879706..57ffacf77 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -88,12 +88,17 @@ impl ListMetadataRepository { site: &DbSite, key: &ListKey, ) -> Result, SqliteDbError> { + let list_metadata_id = match Self::get_header(executor, site, key)? { + Some(header) => header.row_id, + None => return Ok(Vec::new()), + }; + let sql = format!( - "SELECT * FROM {} WHERE db_site_id = ? AND key = ? ORDER BY rowid", + "SELECT * FROM {} WHERE list_metadata_id = ? ORDER BY rowid", Self::items_table().table_name() ); let mut stmt = executor.prepare(&sql)?; - let rows = stmt.query_map(rusqlite::params![site.row_id, key.as_str()], |row| { + let rows = stmt.query_map(rusqlite::params![list_metadata_id], |row| { DbListMetadataItem::from_row(row) .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e))) })?; @@ -201,15 +206,18 @@ impl ListMetadataRepository { site: &DbSite, key: &ListKey, ) -> Result { + let list_metadata_id = match Self::get_header(executor, site, key)? { + Some(header) => header.row_id, + None => return Ok(0), + }; + let sql = format!( - "SELECT COUNT(*) FROM {} WHERE db_site_id = ? AND key = ?", + "SELECT COUNT(*) FROM {} WHERE list_metadata_id = ?", Self::items_table().table_name() ); let mut stmt = executor.prepare(&sql)?; - stmt.query_row(rusqlite::params![site.row_id, key.as_str()], |row| { - row.get(0) - }) - .map_err(SqliteDbError::from) + stmt.query_row(rusqlite::params![list_metadata_id], |row| row.get(0)) + .map_err(SqliteDbError::from) } // ============================================================ @@ -226,15 +234,17 @@ impl ListMetadataRepository { key: &ListKey, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { + let list_metadata_id = Self::get_or_create(executor, site, key)?; + // Delete existing items let delete_sql = format!( - "DELETE FROM {} WHERE db_site_id = ? AND key = ?", + "DELETE FROM {} WHERE list_metadata_id = ?", Self::items_table().table_name() ); - executor.execute(&delete_sql, rusqlite::params![site.row_id, key.as_str()])?; + executor.execute(&delete_sql, rusqlite::params![list_metadata_id])?; // Insert new items - Self::insert_items(executor, site, key, items)?; + Self::insert_items(executor, list_metadata_id, items)?; Ok(()) } @@ -249,38 +259,37 @@ impl ListMetadataRepository { key: &ListKey, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { - Self::insert_items(executor, site, key, items) + let list_metadata_id = Self::get_or_create(executor, site, key)?; + Self::insert_items(executor, list_metadata_id, items) } /// Internal helper to insert items using batch insert for better performance. fn insert_items( executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, + list_metadata_id: RowId, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { if items.is_empty() { return Ok(()); } - // SQLite has a variable limit (default 999). Each item uses 6 variables, - // so batch in chunks of ~150 items to stay well under the limit. - const BATCH_SIZE: usize = 150; + // SQLite has a variable limit (default 999). Each item uses 5 variables, + // so batch in chunks of ~180 items to stay well under the limit. + const BATCH_SIZE: usize = 180; items.chunks(BATCH_SIZE).try_for_each(|chunk| { - let placeholders = vec!["(?, ?, ?, ?, ?, ?)"; chunk.len()].join(", "); + let placeholders = vec!["(?, ?, ?, ?, ?)"; chunk.len()].join(", "); let sql = format!( - "INSERT INTO {} (db_site_id, key, entity_id, modified_gmt, parent, menu_order) VALUES {}", + "INSERT INTO {} (list_metadata_id, entity_id, modified_gmt, parent, menu_order) VALUES {}", Self::items_table().table_name(), placeholders ); let params: Vec> = chunk .iter() - .flat_map(|item| -> [Box; 6] { + .flat_map(|item| -> [Box; 5] { [ - Box::new(site.row_id), - Box::new(key.as_str().to_string()), + Box::new(list_metadata_id), Box::new(item.entity_id), Box::new(item.modified_gmt.clone()), Box::new(item.parent), @@ -392,25 +401,12 @@ impl ListMetadataRepository { site: &DbSite, key: &ListKey, ) -> Result<(), SqliteDbError> { - // Delete items first (no FK constraint to header) - let delete_items_sql = format!( - "DELETE FROM {} WHERE db_site_id = ? AND key = ?", - Self::items_table().table_name() - ); - executor.execute( - &delete_items_sql, - rusqlite::params![site.row_id, key.as_str()], - )?; - - // Delete header (state will be cascade deleted via FK) - let delete_header_sql = format!( + // Delete header - items and state are cascade deleted via FK + let sql = format!( "DELETE FROM {} WHERE db_site_id = ? AND key = ?", Self::header_table().table_name() ); - executor.execute( - &delete_header_sql, - rusqlite::params![site.row_id, key.as_str()], - )?; + executor.execute(&sql, rusqlite::params![site.row_id, key.as_str()])?; Ok(()) } @@ -577,8 +573,11 @@ impl ListMetadataRepository { item_row_id: RowId, ) -> Result { let sql = format!( - "SELECT 1 FROM {} WHERE rowid = ? AND db_site_id = ? AND key = ?", - Self::items_table().table_name() + "SELECT 1 FROM {} i \ + JOIN {} m ON i.list_metadata_id = m.rowid \ + WHERE i.rowid = ? AND m.db_site_id = ? AND m.key = ?", + Self::items_table().table_name(), + Self::header_table().table_name() ); let mut stmt = executor.prepare(&sql)?; let result = stmt.query_row( @@ -782,7 +781,7 @@ mod tests { #[rstest] fn test_list_metadata_items_column_enum_matches_schema(test_ctx: TestContext) { let sql = format!( - "SELECT rowid, db_site_id, key, entity_id, modified_gmt, parent, menu_order FROM {}", + "SELECT rowid, list_metadata_id, entity_id, modified_gmt, parent, menu_order FROM {}", ListMetadataRepository::items_table().table_name() ); let stmt = test_ctx.conn.prepare(&sql); From bb3f2ddf8bfd7c3277a0b832a4d7ca4f16ca08bc Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 12:55:18 -0500 Subject: [PATCH 10/25] Add log crate for structured logging Adds the `log` facade crate to enable proper logging. Debug logs added to key list metadata operations: - begin_refresh, begin_fetch_next_page - complete_sync, complete_sync_with_error - set_items, append_items, delete_list Replaces eprintln! with log::warn! for unknown table warnings. --- Cargo.lock | 1 + Cargo.toml | 1 + wp_mobile_cache/Cargo.toml | 1 + wp_mobile_cache/src/lib.rs | 2 +- wp_mobile_cache/src/repository/list_metadata.rs | 16 ++++++++++++++++ 5 files changed, 20 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 6750f3ee4..e0794b38e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5306,6 +5306,7 @@ version = "0.1.0" dependencies = [ "chrono", "integration_test_credentials", + "log", "rstest", "rusqlite", "serde", diff --git a/Cargo.toml b/Cargo.toml index 4fc9bf349..73756858c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,7 @@ hyper-util = "0.1.19" indoc = "2.0" itertools = "0.14.0" linkify = "0.10.0" +log = "0.4" parse_link_header = "0.4" paste = "1.0" proc-macro-crate = "3.4.0" diff --git a/wp_mobile_cache/Cargo.toml b/wp_mobile_cache/Cargo.toml index 13736a7cb..45b9009ee 100644 --- a/wp_mobile_cache/Cargo.toml +++ b/wp_mobile_cache/Cargo.toml @@ -8,6 +8,7 @@ default = ["rusqlite/bundled"] test-helpers = ["dep:chrono", "dep:integration_test_credentials", "dep:rstest"] [dependencies] +log = { workspace = true } rusqlite = { workspace = true, features = ["hooks"] } serde = { workspace = true } serde_json = { workspace = true } diff --git a/wp_mobile_cache/src/lib.rs b/wp_mobile_cache/src/lib.rs index 25f2e04bc..22005e0dd 100644 --- a/wp_mobile_cache/src/lib.rs +++ b/wp_mobile_cache/src/lib.rs @@ -291,7 +291,7 @@ impl WpApiCache { // Ignore SQLite system tables (sqlite_sequence, sqlite_master, etc.) // and migration tracking table (_migrations) if !table_name.starts_with("sqlite_") && table_name != "_migrations" { - eprintln!("Warning: Unknown table in update hook: {}", table_name); + log::warn!("Unknown table in update hook: {}", table_name); } } } diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 57ffacf77..57bcf241d 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -234,6 +234,8 @@ impl ListMetadataRepository { key: &ListKey, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { + log::debug!("ListMetadataRepository::set_items: key={}, count={}", key, items.len()); + let list_metadata_id = Self::get_or_create(executor, site, key)?; // Delete existing items @@ -259,6 +261,8 @@ impl ListMetadataRepository { key: &ListKey, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { + log::debug!("ListMetadataRepository::append_items: key={}, count={}", key, items.len()); + let list_metadata_id = Self::get_or_create(executor, site, key)?; Self::insert_items(executor, list_metadata_id, items) } @@ -401,6 +405,8 @@ impl ListMetadataRepository { site: &DbSite, key: &ListKey, ) -> Result<(), SqliteDbError> { + log::debug!("ListMetadataRepository::delete_list: key={}", key); + // Delete header - items and state are cascade deleted via FK let sql = format!( "DELETE FROM {} WHERE db_site_id = ? AND key = ?", @@ -429,6 +435,8 @@ impl ListMetadataRepository { site: &DbSite, key: &ListKey, ) -> Result { + log::debug!("ListMetadataRepository::begin_refresh: key={}", key); + // Ensure header exists and get its ID let list_metadata_id = Self::get_or_create(executor, site, key)?; @@ -468,6 +476,8 @@ impl ListMetadataRepository { site: &DbSite, key: &ListKey, ) -> Result, SqliteDbError> { + log::debug!("ListMetadataRepository::begin_fetch_next_page: key={}", key); + let header = match Self::get_header(executor, site, key)? { Some(h) => h, None => return Ok(None), // List doesn't exist @@ -504,6 +514,7 @@ impl ListMetadataRepository { executor: &impl QueryExecutor, list_metadata_id: RowId, ) -> Result<(), SqliteDbError> { + log::debug!("ListMetadataRepository::complete_sync: list_metadata_id={}", list_metadata_id.0); Self::update_state(executor, list_metadata_id, ListState::Idle, None) } @@ -515,6 +526,11 @@ impl ListMetadataRepository { list_metadata_id: RowId, error_message: &str, ) -> Result<(), SqliteDbError> { + log::debug!( + "ListMetadataRepository::complete_sync_with_error: list_metadata_id={}, error={}", + list_metadata_id.0, + error_message + ); Self::update_state( executor, list_metadata_id, From ba4ec2aea7293e4855071030e55e112b1afd4b35 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 13:20:41 -0500 Subject: [PATCH 11/25] Add ToSql/FromSql for ListKey and log stale state reset errors Implement ToSql/FromSql traits for ListKey to simplify repository code by using the type directly in SQL params instead of .as_str(). Also add logging for reset_stale_fetching_states failures instead of silently ignoring errors. Changes: - Add ToSql/FromSql implementations for ListKey - Replace key.as_str() with key in all SQL params - Log warning on reset_stale_fetching_states failure --- wp_mobile_cache/src/lib.rs | 9 ++++++--- wp_mobile_cache/src/list_metadata.rs | 12 ++++++++++++ wp_mobile_cache/src/repository/list_metadata.rs | 14 +++++++------- 3 files changed, 25 insertions(+), 10 deletions(-) diff --git a/wp_mobile_cache/src/lib.rs b/wp_mobile_cache/src/lib.rs index 22005e0dd..7456f4adc 100644 --- a/wp_mobile_cache/src/lib.rs +++ b/wp_mobile_cache/src/lib.rs @@ -265,9 +265,12 @@ impl WpApiCache { let version = mgr.perform_migrations().map_err(SqliteDbError::from)?; // Reset stale fetching states after migrations complete. - // Errors are ignored: this is a best-effort cleanup, and failure doesn't - // affect core functionality (worst case: UI shows stale loading state). - let _ = Self::reset_stale_fetching_states_internal(connection); + // Errors are logged but not propagated: this is a best-effort cleanup, + // and failure doesn't affect core functionality (worst case: UI shows + // stale loading state). + if let Err(e) = Self::reset_stale_fetching_states_internal(connection) { + log::warn!("Failed to reset stale fetching states: {}", e); + } Ok(version) }) diff --git a/wp_mobile_cache/src/list_metadata.rs b/wp_mobile_cache/src/list_metadata.rs index cb23fd5b6..9c05ed0a1 100644 --- a/wp_mobile_cache/src/list_metadata.rs +++ b/wp_mobile_cache/src/list_metadata.rs @@ -45,6 +45,18 @@ impl From for ListKey { } } +impl ToSql for ListKey { + fn to_sql(&self) -> rusqlite::Result> { + Ok(ToSqlOutput::from(self.0.as_str())) + } +} + +impl FromSql for ListKey { + fn column_result(value: rusqlite::types::ValueRef<'_>) -> FromSqlResult { + String::column_result(value).map(ListKey) + } +} + /// Represents list metadata header in the database. /// /// Stores pagination info and version for a specific list (e.g., "edit:posts:publish"). diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 57bcf241d..d27adc1b4 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -47,7 +47,7 @@ impl ListMetadataRepository { Self::header_table().table_name() ); let mut stmt = executor.prepare(&sql)?; - let mut rows = stmt.query_map(rusqlite::params![site.row_id, key.as_str()], |row| { + let mut rows = stmt.query_map(rusqlite::params![site.row_id, key], |row| { DbListMetadata::from_row(row) .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e))) })?; @@ -77,7 +77,7 @@ impl ListMetadataRepository { "INSERT INTO {} (db_site_id, key, current_page, per_page, version) VALUES (?, ?, 0, 20, 0)", Self::header_table().table_name() ); - executor.execute(&sql, rusqlite::params![site.row_id, key.as_str()])?; + executor.execute(&sql, rusqlite::params![site.row_id, key])?; Ok(executor.last_insert_rowid()) } @@ -163,7 +163,7 @@ impl ListMetadataRepository { Self::state_table().table_name() ); let mut stmt = executor.prepare(&sql)?; - let mut rows = stmt.query_map(rusqlite::params![site.row_id, key.as_str()], |row| { + let mut rows = stmt.query_map(rusqlite::params![site.row_id, key], |row| { DbListHeaderWithState::from_row(row) .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e))) })?; @@ -332,7 +332,7 @@ impl ListMetadataRepository { update.current_page, update.per_page, site.row_id, - key.as_str() + key ], )?; @@ -393,7 +393,7 @@ impl ListMetadataRepository { Self::header_table().table_name() ); - executor.execute(&sql, rusqlite::params![site.row_id, key.as_str()])?; + executor.execute(&sql, rusqlite::params![site.row_id, key])?; // Return the new version Self::get_version(executor, site, key) @@ -412,7 +412,7 @@ impl ListMetadataRepository { "DELETE FROM {} WHERE db_site_id = ? AND key = ?", Self::header_table().table_name() ); - executor.execute(&sql, rusqlite::params![site.row_id, key.as_str()])?; + executor.execute(&sql, rusqlite::params![site.row_id, key])?; Ok(()) } @@ -597,7 +597,7 @@ impl ListMetadataRepository { ); let mut stmt = executor.prepare(&sql)?; let result = stmt.query_row( - rusqlite::params![item_row_id, site.row_id, key.as_str()], + rusqlite::params![item_row_id, site.row_id, key], |_| Ok(()), ); From a100ad51622f7d9c0d11d4f6ee6baa38d62f6930 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 13:21:58 -0500 Subject: [PATCH 12/25] Remove unused update hook helper functions --- .../src/repository/list_metadata.rs | 69 ------------------- 1 file changed, 69 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index d27adc1b4..0d927ced8 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -538,75 +538,6 @@ impl ListMetadataRepository { Some(error_message), ) } - - // ============================================ - // Relevance checking for update hooks - // ============================================ - - /// Get the list_metadata_id (rowid) for a given key. - /// - /// Returns None if no list exists for this key yet. - /// Used by collections to cache the ID for relevance checking. - pub fn get_list_metadata_id( - executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, - ) -> Result, SqliteDbError> { - Self::get_header(executor, site, key).map(|opt| opt.map(|h| h.row_id)) - } - - /// Get the list_metadata_id that a state row belongs to. - /// - /// Given a rowid from the list_metadata_state table, returns the - /// list_metadata_id (FK to list_metadata) that this state belongs to. - /// Returns None if the state row doesn't exist. - pub fn get_list_metadata_id_for_state_row( - executor: &impl QueryExecutor, - state_row_id: RowId, - ) -> Result, SqliteDbError> { - let sql = format!( - "SELECT list_metadata_id FROM {} WHERE rowid = ?", - Self::state_table().table_name() - ); - let mut stmt = executor.prepare(&sql)?; - let result = stmt.query_row([state_row_id], |row| row.get::<_, RowId>(0)); - - match result { - Ok(id) => Ok(Some(id)), - Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), - Err(e) => Err(SqliteDbError::from(e)), - } - } - - /// Check if a list_metadata_items row belongs to a specific key. - /// - /// Given a rowid from the list_metadata_items table, checks if the item - /// belongs to the list identified by (site, key). - pub fn is_item_row_for_key( - executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, - item_row_id: RowId, - ) -> Result { - let sql = format!( - "SELECT 1 FROM {} i \ - JOIN {} m ON i.list_metadata_id = m.rowid \ - WHERE i.rowid = ? AND m.db_site_id = ? AND m.key = ?", - Self::items_table().table_name(), - Self::header_table().table_name() - ); - let mut stmt = executor.prepare(&sql)?; - let result = stmt.query_row( - rusqlite::params![item_row_id, site.row_id, key], - |_| Ok(()), - ); - - match result { - Ok(()) => Ok(true), - Err(rusqlite::Error::QueryReturnedNoRows) => Ok(false), - Err(e) => Err(SqliteDbError::from(e)), - } - } } /// Information returned when starting a refresh operation. From 8d39ebff48132b13baca343134fa2a094b68bab0 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 13:27:14 -0500 Subject: [PATCH 13/25] Remove select_modified_gmt_by_ids from this PR --- wp_mobile_cache/src/repository/posts.rs | 58 +------------------------ 1 file changed, 1 insertion(+), 57 deletions(-) diff --git a/wp_mobile_cache/src/repository/posts.rs b/wp_mobile_cache/src/repository/posts.rs index 4d6a76a9f..910162403 100644 --- a/wp_mobile_cache/src/repository/posts.rs +++ b/wp_mobile_cache/src/repository/posts.rs @@ -20,7 +20,7 @@ use crate::{ term_relationships::DbTermRelationship, }; use rusqlite::{OptionalExtension, Row}; -use std::{collections::HashMap, marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc}; use wp_api::{ posts::{ AnyPostWithEditContext, AnyPostWithEmbedContext, AnyPostWithViewContext, @@ -28,7 +28,6 @@ use wp_api::{ PostGuidWithViewContext, PostId, PostTitleWithEditContext, PostTitleWithEmbedContext, PostTitleWithViewContext, SparsePostExcerpt, }, - prelude::WpGmtDateTime, taxonomies::TaxonomyType, terms::TermId, }; @@ -308,61 +307,6 @@ impl PostRepository { })) } - /// Select `modified_gmt` timestamps for multiple posts by their WordPress post IDs. - /// - /// This is a lightweight query used for staleness detection - it only fetches - /// the `id` and `modified_gmt` columns without loading the full post data. - /// - /// Returns a HashMap mapping post IDs to their cached `modified_gmt` timestamps. - /// Posts not found in the cache are simply omitted from the result. - /// - /// # Arguments - /// * `executor` - Database connection or transaction - /// * `site` - The site to query posts for - /// * `post_ids` - WordPress post IDs to look up - /// - /// # Returns - /// HashMap where keys are post IDs and values are their `modified_gmt` timestamps. - pub fn select_modified_gmt_by_ids( - &self, - executor: &impl QueryExecutor, - site: &DbSite, - post_ids: &[i64], - ) -> Result, SqliteDbError> { - if post_ids.is_empty() { - return Ok(HashMap::new()); - } - - let ids_str = post_ids - .iter() - .map(|id| id.to_string()) - .collect::>() - .join(", "); - - let sql = format!( - "SELECT id, modified_gmt FROM {} WHERE db_site_id = ? AND id IN ({})", - Self::table_name(), - ids_str - ); - - let mut stmt = executor.prepare(&sql)?; - let rows = stmt.query_map([site.row_id], |row| { - let id: i64 = row.get(0)?; - let modified_gmt_str: String = row.get(1)?; - Ok((id, modified_gmt_str)) - })?; - - let mut result = HashMap::new(); - for row_result in rows { - let (id, modified_gmt_str) = row_result.map_err(SqliteDbError::from)?; - if let Ok(modified_gmt) = modified_gmt_str.parse::() { - result.insert(id, modified_gmt); - } - } - - Ok(result) - } - /// Delete a post by its EntityId for a given site. /// /// Returns the number of rows deleted (0 or 1). From 52fca2924fc2afe7a15eafe09e333d0f08f10693 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 13:37:08 -0500 Subject: [PATCH 14/25] Split get_items and get_item_count into by_list_metadata_id and by_list_key variants Allows callers who already have the list_metadata_id to skip an extra header lookup query. --- .../src/repository/list_metadata.rs | 73 ++++++++++++------- 1 file changed, 47 insertions(+), 26 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 0d927ced8..57a62c18b 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -82,17 +82,14 @@ impl ListMetadataRepository { Ok(executor.last_insert_rowid()) } - /// Get all items for a list, ordered by rowid (insertion order = display order). - pub fn get_items( + /// Get all items for a list by ID, ordered by rowid (insertion order = display order). + /// + /// Use this when you already have the `list_metadata_id` from a previous call + /// (e.g., from `get_or_create` or `begin_refresh`) to avoid an extra lookup. + pub fn get_items_by_list_metadata_id( executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, + list_metadata_id: RowId, ) -> Result, SqliteDbError> { - let list_metadata_id = match Self::get_header(executor, site, key)? { - Some(header) => header.row_id, - None => return Ok(Vec::new()), - }; - let sql = format!( "SELECT * FROM {} WHERE list_metadata_id = ? ORDER BY rowid", Self::items_table().table_name() @@ -107,6 +104,20 @@ impl ListMetadataRepository { .map_err(SqliteDbError::from) } + /// Get all items for a list by site and key, ordered by rowid (insertion order = display order). + /// + /// If you already have the `list_metadata_id`, use `get_items_by_list_metadata_id` instead. + pub fn get_items_by_list_key( + executor: &impl QueryExecutor, + site: &DbSite, + key: &ListKey, + ) -> Result, SqliteDbError> { + match Self::get_header(executor, site, key)? { + Some(header) => Self::get_items_by_list_metadata_id(executor, header.row_id), + None => Ok(Vec::new()), + } + } + /// Get the current sync state for a list. /// /// Returns None if no state record exists (list not yet synced). @@ -200,17 +211,13 @@ impl ListMetadataRepository { Ok(current_version == expected_version) } - /// Get the item count for a list. - pub fn get_item_count( + /// Get the item count for a list by ID. + /// + /// Use this when you already have the `list_metadata_id` from a previous call. + pub fn get_item_count_by_list_metadata_id( executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, + list_metadata_id: RowId, ) -> Result { - let list_metadata_id = match Self::get_header(executor, site, key)? { - Some(header) => header.row_id, - None => return Ok(0), - }; - let sql = format!( "SELECT COUNT(*) FROM {} WHERE list_metadata_id = ?", Self::items_table().table_name() @@ -220,6 +227,20 @@ impl ListMetadataRepository { .map_err(SqliteDbError::from) } + /// Get the item count for a list by site and key. + /// + /// If you already have the `list_metadata_id`, use `get_item_count_by_list_metadata_id` instead. + pub fn get_item_count_by_list_key( + executor: &impl QueryExecutor, + site: &DbSite, + key: &ListKey, + ) -> Result { + match Self::get_header(executor, site, key)? { + Some(header) => Self::get_item_count_by_list_metadata_id(executor, header.row_id), + None => Ok(0), + } + } + // ============================================================ // Write Operations // ============================================================ @@ -645,7 +666,7 @@ mod tests { #[rstest] fn test_get_items_returns_empty_for_non_existent_list(test_ctx: TestContext) { - let items = ListMetadataRepository::get_items( + let items = ListMetadataRepository::get_items_by_list_key( &test_ctx.conn, &test_ctx.site, &ListKey::from("nonexistent:key"), @@ -702,7 +723,7 @@ mod tests { #[rstest] fn test_get_item_count_returns_zero_for_empty_list(test_ctx: TestContext) { - let count = ListMetadataRepository::get_item_count( + let count = ListMetadataRepository::get_item_count_by_list_key( &test_ctx.conn, &test_ctx.site, &ListKey::from("empty:list"), @@ -783,7 +804,7 @@ mod tests { ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); let retrieved = - ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(retrieved.len(), 3); assert_eq!(retrieved[0].entity_id, 100); assert_eq!(retrieved[0].parent, Some(50)); @@ -842,7 +863,7 @@ mod tests { .unwrap(); let retrieved = - ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(retrieved.len(), 3); assert_eq!(retrieved[0].entity_id, 10); assert_eq!(retrieved[1].entity_id, 20); @@ -890,7 +911,7 @@ mod tests { .unwrap(); let retrieved = - ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(retrieved.len(), 4); assert_eq!(retrieved[0].entity_id, 1); assert_eq!(retrieved[1].entity_id, 2); @@ -1046,7 +1067,7 @@ mod tests { .is_some() ); assert_eq!( - ListMetadataRepository::get_item_count(&test_ctx.conn, &test_ctx.site, &key).unwrap(), + ListMetadataRepository::get_item_count_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(), 1 ); @@ -1060,7 +1081,7 @@ mod tests { .is_none() ); assert_eq!( - ListMetadataRepository::get_item_count(&test_ctx.conn, &test_ctx.site, &key).unwrap(), + ListMetadataRepository::get_item_count_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(), 0 ); } @@ -1082,7 +1103,7 @@ mod tests { ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); let retrieved = - ListMetadataRepository::get_items(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(retrieved.len(), 10); // Verify order is preserved (rowid ordering) From 565eb4bd46f1d63a303c408e78d7d978c35b2bf6 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 13:39:51 -0500 Subject: [PATCH 15/25] Remove redundant check_version function --- .../src/repository/list_metadata.rs | 89 +++++++++---------- 1 file changed, 40 insertions(+), 49 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 57a62c18b..6595d556f 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -197,20 +197,6 @@ impl ListMetadataRepository { Ok(header.map(|h| h.version).unwrap_or(0)) } - /// Check if the current version matches the expected version. - /// - /// Used for concurrency control to detect if a refresh happened - /// while a load-more operation was in progress. - pub fn check_version( - executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, - expected_version: i64, - ) -> Result { - let current_version = Self::get_version(executor, site, key)?; - Ok(current_version == expected_version) - } - /// Get the item count for a list by ID. /// /// Use this when you already have the `list_metadata_id` from a previous call. @@ -255,7 +241,11 @@ impl ListMetadataRepository { key: &ListKey, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { - log::debug!("ListMetadataRepository::set_items: key={}, count={}", key, items.len()); + log::debug!( + "ListMetadataRepository::set_items: key={}, count={}", + key, + items.len() + ); let list_metadata_id = Self::get_or_create(executor, site, key)?; @@ -282,7 +272,11 @@ impl ListMetadataRepository { key: &ListKey, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { - log::debug!("ListMetadataRepository::append_items: key={}, count={}", key, items.len()); + log::debug!( + "ListMetadataRepository::append_items: key={}, count={}", + key, + items.len() + ); let list_metadata_id = Self::get_or_create(executor, site, key)?; Self::insert_items(executor, list_metadata_id, items) @@ -535,7 +529,10 @@ impl ListMetadataRepository { executor: &impl QueryExecutor, list_metadata_id: RowId, ) -> Result<(), SqliteDbError> { - log::debug!("ListMetadataRepository::complete_sync: list_metadata_id={}", list_metadata_id.0); + log::debug!( + "ListMetadataRepository::complete_sync: list_metadata_id={}", + list_metadata_id.0 + ); Self::update_state(executor, list_metadata_id, ListState::Idle, None) } @@ -703,24 +700,6 @@ mod tests { assert_eq!(version, 0); } - #[rstest] - fn test_check_version_returns_true_for_matching_version(test_ctx: TestContext) { - let key = ListKey::from("edit:posts:publish"); - - // Create header (version = 0) - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); - - // Check version matches - let matches = - ListMetadataRepository::check_version(&test_ctx.conn, &test_ctx.site, &key, 0).unwrap(); - assert!(matches); - - // Check version doesn't match - let matches = - ListMetadataRepository::check_version(&test_ctx.conn, &test_ctx.site, &key, 1).unwrap(); - assert!(!matches); - } - #[rstest] fn test_get_item_count_returns_zero_for_empty_list(test_ctx: TestContext) { let count = ListMetadataRepository::get_item_count_by_list_key( @@ -804,7 +783,8 @@ mod tests { ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); let retrieved = - ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) + .unwrap(); assert_eq!(retrieved.len(), 3); assert_eq!(retrieved[0].entity_id, 100); assert_eq!(retrieved[0].parent, Some(50)); @@ -863,7 +843,8 @@ mod tests { .unwrap(); let retrieved = - ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) + .unwrap(); assert_eq!(retrieved.len(), 3); assert_eq!(retrieved[0].entity_id, 10); assert_eq!(retrieved[1].entity_id, 20); @@ -911,7 +892,8 @@ mod tests { .unwrap(); let retrieved = - ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) + .unwrap(); assert_eq!(retrieved.len(), 4); assert_eq!(retrieved[0].entity_id, 1); assert_eq!(retrieved[1].entity_id, 2); @@ -1067,7 +1049,12 @@ mod tests { .is_some() ); assert_eq!( - ListMetadataRepository::get_item_count_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(), + ListMetadataRepository::get_item_count_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key + ) + .unwrap(), 1 ); @@ -1081,7 +1068,12 @@ mod tests { .is_none() ); assert_eq!( - ListMetadataRepository::get_item_count_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(), + ListMetadataRepository::get_item_count_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key + ) + .unwrap(), 0 ); } @@ -1103,7 +1095,8 @@ mod tests { ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); let retrieved = - ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) + .unwrap(); assert_eq!(retrieved.len(), 10); // Verify order is preserved (rowid ordering) @@ -1290,13 +1283,11 @@ mod tests { ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); // Version check should fail (stale) - let is_valid = ListMetadataRepository::check_version( - &test_ctx.conn, - &test_ctx.site, - &key, - captured_version, - ) - .unwrap(); - assert!(!is_valid, "Version should not match after refresh"); + let current_version = + ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + assert_ne!( + current_version, captured_version, + "Version should not match after refresh" + ); } } From 745be0b1eed616402d37b99c0105038631d9f5f2 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 13:46:54 -0500 Subject: [PATCH 16/25] Add by_list_metadata_id variants for write operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split functions that take site+key to allow callers with an existing list_metadata_id to skip the header lookup query. Renamed functions for consistency: - set_items → set_items_by_list_metadata_id / set_items_by_list_key - append_items → append_items_by_list_metadata_id / append_items_by_list_key - update_header → update_header_by_list_metadata_id / update_header_by_list_key - increment_version → increment_version_by_list_metadata_id / increment_version_by_list_key - get_state → get_state_by_list_metadata_id - get_state_by_key → get_state_by_list_key - update_state → update_state_by_list_metadata_id - update_state_by_key → update_state_by_list_key --- .../src/repository/list_metadata.rs | 196 +++++++++++------- 1 file changed, 118 insertions(+), 78 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 6595d556f..679001de8 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -118,10 +118,10 @@ impl ListMetadataRepository { } } - /// Get the current sync state for a list. + /// Get the current sync state for a list by ID. /// /// Returns None if no state record exists (list not yet synced). - pub fn get_state( + pub fn get_state_by_list_metadata_id( executor: &impl QueryExecutor, list_metadata_id: RowId, ) -> Result, SqliteDbError> { @@ -145,7 +145,7 @@ impl ListMetadataRepository { /// /// Uses a JOIN query internally for efficiency. /// Returns ListState::Idle if the list or state doesn't exist. - pub fn get_state_by_key( + pub fn get_state_by_list_key( executor: &impl QueryExecutor, site: &DbSite, key: &ListKey, @@ -231,24 +231,21 @@ impl ListMetadataRepository { // Write Operations // ============================================================ - /// Set items for a list, replacing any existing items. + /// Set items for a list by ID, replacing any existing items. /// /// Used for refresh (page 1) - deletes all existing items and inserts new ones. /// Items are inserted in order, so rowid determines display order. - pub fn set_items( + pub fn set_items_by_list_metadata_id( executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, + list_metadata_id: RowId, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { log::debug!( - "ListMetadataRepository::set_items: key={}, count={}", - key, + "ListMetadataRepository::set_items_by_list_metadata_id: list_metadata_id={}, count={}", + list_metadata_id.0, items.len() ); - let list_metadata_id = Self::get_or_create(executor, site, key)?; - // Delete existing items let delete_sql = format!( "DELETE FROM {} WHERE list_metadata_id = ?", @@ -257,31 +254,53 @@ impl ListMetadataRepository { executor.execute(&delete_sql, rusqlite::params![list_metadata_id])?; // Insert new items - Self::insert_items(executor, list_metadata_id, items)?; + Self::insert_items(executor, list_metadata_id, items) + } - Ok(()) + /// Set items for a list by site and key, replacing any existing items. + /// + /// If you already have the `list_metadata_id`, use `set_items_by_list_metadata_id` instead. + pub fn set_items_by_list_key( + executor: &impl QueryExecutor, + site: &DbSite, + key: &ListKey, + items: &[ListMetadataItemInput], + ) -> Result<(), SqliteDbError> { + let list_metadata_id = Self::get_or_create(executor, site, key)?; + Self::set_items_by_list_metadata_id(executor, list_metadata_id, items) } - /// Append items to an existing list. + /// Append items to an existing list by ID. /// /// Used for load-more (page 2+) - appends items without deleting existing ones. /// Items are inserted in order, so they appear after existing items. - pub fn append_items( + pub fn append_items_by_list_metadata_id( executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, + list_metadata_id: RowId, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { log::debug!( - "ListMetadataRepository::append_items: key={}, count={}", - key, + "ListMetadataRepository::append_items_by_list_metadata_id: list_metadata_id={}, count={}", + list_metadata_id.0, items.len() ); - let list_metadata_id = Self::get_or_create(executor, site, key)?; Self::insert_items(executor, list_metadata_id, items) } + /// Append items to an existing list by site and key. + /// + /// If you already have the `list_metadata_id`, use `append_items_by_list_metadata_id` instead. + pub fn append_items_by_list_key( + executor: &impl QueryExecutor, + site: &DbSite, + key: &ListKey, + items: &[ListMetadataItemInput], + ) -> Result<(), SqliteDbError> { + let list_metadata_id = Self::get_or_create(executor, site, key)?; + Self::append_items_by_list_metadata_id(executor, list_metadata_id, items) + } + /// Internal helper to insert items using batch insert for better performance. fn insert_items( executor: &impl QueryExecutor, @@ -324,18 +343,14 @@ impl ListMetadataRepository { }) } - /// Update header pagination info. - pub fn update_header( + /// Update header pagination info by ID. + pub fn update_header_by_list_metadata_id( executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, + list_metadata_id: RowId, update: &ListMetadataHeaderUpdate, ) -> Result<(), SqliteDbError> { - // Ensure header exists - Self::get_or_create(executor, site, key)?; - let sql = format!( - "UPDATE {} SET total_pages = ?, total_items = ?, current_page = ?, per_page = ?, last_fetched_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now') WHERE db_site_id = ? AND key = ?", + "UPDATE {} SET total_pages = ?, total_items = ?, current_page = ?, per_page = ?, last_fetched_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now') WHERE rowid = ?", Self::header_table().table_name() ); @@ -346,18 +361,30 @@ impl ListMetadataRepository { update.total_items, update.current_page, update.per_page, - site.row_id, - key + list_metadata_id ], )?; Ok(()) } - /// Update sync state for a list. + /// Update header pagination info by site and key. + /// + /// If you already have the `list_metadata_id`, use `update_header_by_list_metadata_id` instead. + pub fn update_header_by_list_key( + executor: &impl QueryExecutor, + site: &DbSite, + key: &ListKey, + update: &ListMetadataHeaderUpdate, + ) -> Result<(), SqliteDbError> { + let list_metadata_id = Self::get_or_create(executor, site, key)?; + Self::update_header_by_list_metadata_id(executor, list_metadata_id, update) + } + + /// Update sync state for a list by ID. /// /// Creates the state record if it doesn't exist (upsert). - pub fn update_state( + pub fn update_state_by_list_metadata_id( executor: &impl QueryExecutor, list_metadata_id: RowId, state: ListState, @@ -380,8 +407,8 @@ impl ListMetadataRepository { /// Update sync state for a list by site and key. /// - /// Convenience method that looks up or creates the list_metadata_id first. - pub fn update_state_by_key( + /// If you already have the `list_metadata_id`, use `update_state_by_list_metadata_id` instead. + pub fn update_state_by_list_key( executor: &impl QueryExecutor, site: &DbSite, key: &ListKey, @@ -389,29 +416,42 @@ impl ListMetadataRepository { error_message: Option<&str>, ) -> Result<(), SqliteDbError> { let list_metadata_id = Self::get_or_create(executor, site, key)?; - Self::update_state(executor, list_metadata_id, state, error_message) + Self::update_state_by_list_metadata_id(executor, list_metadata_id, state, error_message) } - /// Increment version and return the new value. + /// Increment version by ID and return the new value. /// /// Used when starting a refresh to invalidate any in-flight load-more operations. - pub fn increment_version( + pub fn increment_version_by_list_metadata_id( executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, + list_metadata_id: RowId, ) -> Result { - // Ensure header exists - Self::get_or_create(executor, site, key)?; - let sql = format!( - "UPDATE {} SET version = version + 1, last_first_page_fetched_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now') WHERE db_site_id = ? AND key = ?", + "UPDATE {} SET version = version + 1, last_first_page_fetched_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now') WHERE rowid = ?", Self::header_table().table_name() ); - - executor.execute(&sql, rusqlite::params![site.row_id, key])?; + executor.execute(&sql, rusqlite::params![list_metadata_id])?; // Return the new version - Self::get_version(executor, site, key) + let sql = format!( + "SELECT version FROM {} WHERE rowid = ?", + Self::header_table().table_name() + ); + let mut stmt = executor.prepare(&sql)?; + stmt.query_row(rusqlite::params![list_metadata_id], |row| row.get(0)) + .map_err(SqliteDbError::from) + } + + /// Increment version by site and key and return the new value. + /// + /// If you already have the `list_metadata_id`, use `increment_version_by_list_metadata_id` instead. + pub fn increment_version_by_list_key( + executor: &impl QueryExecutor, + site: &DbSite, + key: &ListKey, + ) -> Result { + let list_metadata_id = Self::get_or_create(executor, site, key)?; + Self::increment_version_by_list_metadata_id(executor, list_metadata_id) } /// Delete all data for a list (header, items, and state). @@ -456,10 +496,10 @@ impl ListMetadataRepository { let list_metadata_id = Self::get_or_create(executor, site, key)?; // Increment version (invalidates any in-flight load-more) - let version = Self::increment_version(executor, site, key)?; + let version = Self::increment_version_by_list_metadata_id(executor, list_metadata_id)?; // Update state to fetching - Self::update_state( + Self::update_state_by_list_metadata_id( executor, list_metadata_id, ListState::FetchingFirstPage, @@ -512,7 +552,7 @@ impl ListMetadataRepository { let next_page = header.current_page + 1; // Update state to fetching - Self::update_state(executor, header.row_id, ListState::FetchingNextPage, None)?; + Self::update_state_by_list_metadata_id(executor, header.row_id, ListState::FetchingNextPage, None)?; Ok(Some(FetchNextPageInfo { list_metadata_id: header.row_id, @@ -533,7 +573,7 @@ impl ListMetadataRepository { "ListMetadataRepository::complete_sync: list_metadata_id={}", list_metadata_id.0 ); - Self::update_state(executor, list_metadata_id, ListState::Idle, None) + Self::update_state_by_list_metadata_id(executor, list_metadata_id, ListState::Idle, None) } /// Complete a sync operation with an error. @@ -549,7 +589,7 @@ impl ListMetadataRepository { list_metadata_id.0, error_message ); - Self::update_state( + Self::update_state_by_list_metadata_id( executor, list_metadata_id, ListState::Error, @@ -674,13 +714,13 @@ mod tests { #[rstest] fn test_get_state_returns_none_for_non_existent(test_ctx: TestContext) { - let result = ListMetadataRepository::get_state(&test_ctx.conn, RowId(999999)).unwrap(); + let result = ListMetadataRepository::get_state_by_list_metadata_id(&test_ctx.conn, RowId(999999)).unwrap(); assert!(result.is_none()); } #[rstest] fn test_get_state_by_key_returns_idle_for_non_existent_list(test_ctx: TestContext) { - let state = ListMetadataRepository::get_state_by_key( + let state = ListMetadataRepository::get_state_by_list_key( &test_ctx.conn, &test_ctx.site, &ListKey::from("nonexistent:key"), @@ -780,7 +820,7 @@ mod tests { }, ]; - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); + ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); let retrieved = ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) @@ -815,7 +855,7 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &initial_items) + ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &initial_items) .unwrap(); // Replace with new items @@ -839,7 +879,7 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &new_items) + ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &new_items) .unwrap(); let retrieved = @@ -870,7 +910,7 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &initial_items) + ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &initial_items) .unwrap(); // Append more items @@ -888,7 +928,7 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::append_items(&test_ctx.conn, &test_ctx.site, &key, &more_items) + ListMetadataRepository::append_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &more_items) .unwrap(); let retrieved = @@ -912,7 +952,7 @@ mod tests { per_page: 20, }; - ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, &key, &update) + ListMetadataRepository::update_header_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &update) .unwrap(); let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) @@ -931,7 +971,7 @@ mod tests { let list_id = ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); - ListMetadataRepository::update_state( + ListMetadataRepository::update_state_by_list_metadata_id( &test_ctx.conn, list_id, ListState::FetchingFirstPage, @@ -939,7 +979,7 @@ mod tests { ) .unwrap(); - let state = ListMetadataRepository::get_state(&test_ctx.conn, list_id) + let state = ListMetadataRepository::get_state_by_list_metadata_id(&test_ctx.conn, list_id) .unwrap() .unwrap(); assert_eq!(state.state, ListState::FetchingFirstPage); @@ -954,7 +994,7 @@ mod tests { ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); // Set initial state - ListMetadataRepository::update_state( + ListMetadataRepository::update_state_by_list_metadata_id( &test_ctx.conn, list_id, ListState::FetchingFirstPage, @@ -963,7 +1003,7 @@ mod tests { .unwrap(); // Update to error state - ListMetadataRepository::update_state( + ListMetadataRepository::update_state_by_list_metadata_id( &test_ctx.conn, list_id, ListState::Error, @@ -971,7 +1011,7 @@ mod tests { ) .unwrap(); - let state = ListMetadataRepository::get_state(&test_ctx.conn, list_id) + let state = ListMetadataRepository::get_state_by_list_metadata_id(&test_ctx.conn, list_id) .unwrap() .unwrap(); assert_eq!(state.state, ListState::Error); @@ -982,7 +1022,7 @@ mod tests { fn test_update_state_by_key(test_ctx: TestContext) { let key = ListKey::from("edit:posts:pending"); - ListMetadataRepository::update_state_by_key( + ListMetadataRepository::update_state_by_list_key( &test_ctx.conn, &test_ctx.site, &key, @@ -992,7 +1032,7 @@ mod tests { .unwrap(); let state = - ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(state, ListState::FetchingNextPage); } @@ -1008,13 +1048,13 @@ mod tests { // Increment version let new_version = - ListMetadataRepository::increment_version(&test_ctx.conn, &test_ctx.site, &key) + ListMetadataRepository::increment_version_by_list_key(&test_ctx.conn, &test_ctx.site, &key) .unwrap(); assert_eq!(new_version, 1); // Increment again let newer_version = - ListMetadataRepository::increment_version(&test_ctx.conn, &test_ctx.site, &key) + ListMetadataRepository::increment_version_by_list_key(&test_ctx.conn, &test_ctx.site, &key) .unwrap(); assert_eq!(newer_version, 2); @@ -1038,8 +1078,8 @@ mod tests { parent: None, menu_order: None, }]; - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); - ListMetadataRepository::update_state(&test_ctx.conn, list_id, ListState::Idle, None) + ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); + ListMetadataRepository::update_state_by_list_metadata_id(&test_ctx.conn, list_id, ListState::Idle, None) .unwrap(); // Verify data exists @@ -1092,7 +1132,7 @@ mod tests { }) .collect(); - ListMetadataRepository::set_items(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); + ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); let retrieved = ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) @@ -1122,7 +1162,7 @@ mod tests { // Verify state is FetchingFirstPage let state = - ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(state, ListState::FetchingFirstPage); } @@ -1177,7 +1217,7 @@ mod tests { current_page: 3, per_page: 20, }; - ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, &key, &update) + ListMetadataRepository::update_header_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &update) .unwrap(); let result = @@ -1197,7 +1237,7 @@ mod tests { current_page: 2, per_page: 20, }; - ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, &key, &update) + ListMetadataRepository::update_header_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &update) .unwrap(); let result = @@ -1211,7 +1251,7 @@ mod tests { // Verify state changed to FetchingNextPage let state = - ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(state, ListState::FetchingNextPage); } @@ -1224,7 +1264,7 @@ mod tests { ListMetadataRepository::complete_sync(&test_ctx.conn, info.list_metadata_id).unwrap(); let state = - ListMetadataRepository::get_state_by_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); assert_eq!(state, ListState::Idle); } @@ -1241,7 +1281,7 @@ mod tests { ) .unwrap(); - let state_record = ListMetadataRepository::get_state(&test_ctx.conn, info.list_metadata_id) + let state_record = ListMetadataRepository::get_state_by_list_metadata_id(&test_ctx.conn, info.list_metadata_id) .unwrap() .unwrap(); assert_eq!(state_record.state, ListState::Error); @@ -1267,7 +1307,7 @@ mod tests { current_page: 1, per_page: 20, }; - ListMetadataRepository::update_header(&test_ctx.conn, &test_ctx.site, &key, &update) + ListMetadataRepository::update_header_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &update) .unwrap(); ListMetadataRepository::complete_sync(&test_ctx.conn, refresh_info.list_metadata_id) .unwrap(); From fcf34301d67aef83284ca1a6676a37548d5a06d8 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 13:49:43 -0500 Subject: [PATCH 17/25] Replace unwrap with expect for better panic messages --- .../src/repository/list_metadata.rs | 284 +++++++++++------- 1 file changed, 182 insertions(+), 102 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 679001de8..52d7cc59d 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -507,7 +507,8 @@ impl ListMetadataRepository { )?; // Get header for pagination info - let header = Self::get_header(executor, site, key)?.unwrap(); + let header = + Self::get_header(executor, site, key)?.expect("header must exist after get_or_create"); Ok(RefreshInfo { list_metadata_id, @@ -552,7 +553,12 @@ impl ListMetadataRepository { let next_page = header.current_page + 1; // Update state to fetching - Self::update_state_by_list_metadata_id(executor, header.row_id, ListState::FetchingNextPage, None)?; + Self::update_state_by_list_metadata_id( + executor, + header.row_id, + ListState::FetchingNextPage, + None, + )?; Ok(Some(FetchNextPageInfo { list_metadata_id: header.row_id, @@ -661,7 +667,7 @@ mod tests { &test_ctx.site, &ListKey::from("nonexistent:key"), ) - .unwrap(); + .expect("should succeed"); assert!(result.is_none()); } @@ -670,13 +676,13 @@ mod tests { let key = ListKey::from("edit:posts:publish"); // Create new header - let row_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + let row_id = ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); // Verify it was created with defaults let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) - .unwrap() - .unwrap(); + .expect("query should succeed") + .expect("should succeed"); assert_eq!(header.row_id, row_id); assert_eq!(header.key, key.as_str()); assert_eq!(header.current_page, 0); @@ -692,11 +698,13 @@ mod tests { // Create initial header let first_row_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); // Get or create again should return same rowid let second_row_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); assert_eq!(first_row_id, second_row_id); } @@ -708,13 +716,15 @@ mod tests { &test_ctx.site, &ListKey::from("nonexistent:key"), ) - .unwrap(); + .expect("should succeed"); assert!(items.is_empty()); } #[rstest] fn test_get_state_returns_none_for_non_existent(test_ctx: TestContext) { - let result = ListMetadataRepository::get_state_by_list_metadata_id(&test_ctx.conn, RowId(999999)).unwrap(); + let result = + ListMetadataRepository::get_state_by_list_metadata_id(&test_ctx.conn, RowId(999999)) + .expect("should succeed"); assert!(result.is_none()); } @@ -725,7 +735,7 @@ mod tests { &test_ctx.site, &ListKey::from("nonexistent:key"), ) - .unwrap(); + .expect("should succeed"); assert_eq!(state, ListState::Idle); } @@ -736,7 +746,7 @@ mod tests { &test_ctx.site, &ListKey::from("nonexistent:key"), ) - .unwrap(); + .expect("should succeed"); assert_eq!(version, 0); } @@ -747,7 +757,7 @@ mod tests { &test_ctx.site, &ListKey::from("empty:list"), ) - .unwrap(); + .expect("should succeed"); assert_eq!(count, 0); } @@ -820,11 +830,12 @@ mod tests { }, ]; - ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); + ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items) + .expect("should succeed"); let retrieved = ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) - .unwrap(); + .expect("should succeed"); assert_eq!(retrieved.len(), 3); assert_eq!(retrieved[0].entity_id, 100); assert_eq!(retrieved[0].parent, Some(50)); @@ -855,8 +866,13 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &initial_items) - .unwrap(); + ListMetadataRepository::set_items_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + &initial_items, + ) + .expect("should succeed"); // Replace with new items let new_items = vec![ @@ -879,12 +895,17 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &new_items) - .unwrap(); + ListMetadataRepository::set_items_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + &new_items, + ) + .expect("should succeed"); let retrieved = ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) - .unwrap(); + .expect("should succeed"); assert_eq!(retrieved.len(), 3); assert_eq!(retrieved[0].entity_id, 10); assert_eq!(retrieved[1].entity_id, 20); @@ -910,8 +931,13 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &initial_items) - .unwrap(); + ListMetadataRepository::set_items_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + &initial_items, + ) + .expect("should succeed"); // Append more items let more_items = vec![ @@ -928,12 +954,17 @@ mod tests { menu_order: None, }, ]; - ListMetadataRepository::append_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &more_items) - .unwrap(); + ListMetadataRepository::append_items_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + &more_items, + ) + .expect("should succeed"); let retrieved = ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) - .unwrap(); + .expect("should succeed"); assert_eq!(retrieved.len(), 4); assert_eq!(retrieved[0].entity_id, 1); assert_eq!(retrieved[1].entity_id, 2); @@ -952,12 +983,17 @@ mod tests { per_page: 20, }; - ListMetadataRepository::update_header_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &update) - .unwrap(); + ListMetadataRepository::update_header_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + &update, + ) + .expect("should succeed"); let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) - .unwrap() - .unwrap(); + .expect("query should succeed") + .expect("should succeed"); assert_eq!(header.total_pages, Some(5)); assert_eq!(header.total_items, Some(100)); assert_eq!(header.current_page, 1); @@ -969,19 +1005,19 @@ mod tests { fn test_update_state_creates_new_state(test_ctx: TestContext) { let key = ListKey::from("edit:posts:publish"); - let list_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + let list_id = ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); ListMetadataRepository::update_state_by_list_metadata_id( &test_ctx.conn, list_id, ListState::FetchingFirstPage, None, ) - .unwrap(); + .expect("should succeed"); let state = ListMetadataRepository::get_state_by_list_metadata_id(&test_ctx.conn, list_id) - .unwrap() - .unwrap(); + .expect("query should succeed") + .expect("should succeed"); assert_eq!(state.state, ListState::FetchingFirstPage); assert!(state.error_message.is_none()); } @@ -990,8 +1026,8 @@ mod tests { fn test_update_state_updates_existing_state(test_ctx: TestContext) { let key = ListKey::from("edit:posts:draft"); - let list_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + let list_id = ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); // Set initial state ListMetadataRepository::update_state_by_list_metadata_id( @@ -1000,7 +1036,7 @@ mod tests { ListState::FetchingFirstPage, None, ) - .unwrap(); + .expect("should succeed"); // Update to error state ListMetadataRepository::update_state_by_list_metadata_id( @@ -1009,11 +1045,11 @@ mod tests { ListState::Error, Some("Network error"), ) - .unwrap(); + .expect("should succeed"); let state = ListMetadataRepository::get_state_by_list_metadata_id(&test_ctx.conn, list_id) - .unwrap() - .unwrap(); + .expect("query should succeed") + .expect("should succeed"); assert_eq!(state.state, ListState::Error); assert_eq!(state.error_message.as_deref(), Some("Network error")); } @@ -1029,10 +1065,11 @@ mod tests { ListState::FetchingNextPage, None, ) - .unwrap(); + .expect("should succeed"); let state = - ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); assert_eq!(state, ListState::FetchingNextPage); } @@ -1041,27 +1078,35 @@ mod tests { let key = ListKey::from("edit:posts:publish"); // Create header (version starts at 0) - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); let initial_version = - ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); assert_eq!(initial_version, 0); // Increment version - let new_version = - ListMetadataRepository::increment_version_by_list_key(&test_ctx.conn, &test_ctx.site, &key) - .unwrap(); + let new_version = ListMetadataRepository::increment_version_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + ) + .expect("should succeed"); assert_eq!(new_version, 1); // Increment again - let newer_version = - ListMetadataRepository::increment_version_by_list_key(&test_ctx.conn, &test_ctx.site, &key) - .unwrap(); + let newer_version = ListMetadataRepository::increment_version_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + ) + .expect("should succeed"); assert_eq!(newer_version, 2); // Verify last_first_page_fetched_at is set let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) - .unwrap() - .unwrap(); + .expect("query should succeed") + .expect("should succeed"); assert!(header.last_first_page_fetched_at.is_some()); } @@ -1070,22 +1115,28 @@ mod tests { let key = ListKey::from("edit:posts:publish"); // Create header and add items and state - let list_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + let list_id = ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); let items = vec![ListMetadataItemInput { entity_id: 1, modified_gmt: None, parent: None, menu_order: None, }]; - ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); - ListMetadataRepository::update_state_by_list_metadata_id(&test_ctx.conn, list_id, ListState::Idle, None) - .unwrap(); + ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items) + .expect("should succeed"); + ListMetadataRepository::update_state_by_list_metadata_id( + &test_ctx.conn, + list_id, + ListState::Idle, + None, + ) + .expect("should succeed"); // Verify data exists assert!( ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) - .unwrap() + .expect("query should succeed") .is_some() ); assert_eq!( @@ -1094,17 +1145,18 @@ mod tests { &test_ctx.site, &key ) - .unwrap(), + .expect("query should succeed"), 1 ); // Delete the list - ListMetadataRepository::delete_list(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::delete_list(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); // Verify everything is deleted assert!( ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) - .unwrap() + .expect("query should succeed") .is_none() ); assert_eq!( @@ -1113,7 +1165,7 @@ mod tests { &test_ctx.site, &key ) - .unwrap(), + .expect("query should succeed"), 0 ); } @@ -1132,11 +1184,12 @@ mod tests { }) .collect(); - ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items).unwrap(); + ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items) + .expect("should succeed"); let retrieved = ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) - .unwrap(); + .expect("should succeed"); assert_eq!(retrieved.len(), 10); // Verify order is preserved (rowid ordering) @@ -1153,8 +1206,8 @@ mod tests { fn test_begin_refresh_creates_header_and_sets_state(test_ctx: TestContext) { let key = ListKey::from("edit:posts:publish"); - let info = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + let info = ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); // Verify version was incremented (from 0 to 1) assert_eq!(info.version, 1); @@ -1162,7 +1215,8 @@ mod tests { // Verify state is FetchingFirstPage let state = - ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); assert_eq!(state, ListState::FetchingFirstPage); } @@ -1170,15 +1224,16 @@ mod tests { fn test_begin_refresh_increments_version_each_time(test_ctx: TestContext) { let key = ListKey::from("edit:posts:draft"); - let info1 = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + let info1 = ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); assert_eq!(info1.version, 1); // Complete the first refresh - ListMetadataRepository::complete_sync(&test_ctx.conn, info1.list_metadata_id).unwrap(); + ListMetadataRepository::complete_sync(&test_ctx.conn, info1.list_metadata_id) + .expect("should succeed"); - let info2 = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + let info2 = ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); assert_eq!(info2.version, 2); } @@ -1189,7 +1244,7 @@ mod tests { &test_ctx.site, &ListKey::from("nonexistent"), ) - .unwrap(); + .expect("should succeed"); assert!(result.is_none()); } @@ -1198,11 +1253,12 @@ mod tests { let key = ListKey::from("edit:posts:publish"); // Create header but don't set current_page - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); let result = ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) - .unwrap(); + .expect("should succeed"); assert!(result.is_none()); } @@ -1217,12 +1273,17 @@ mod tests { current_page: 3, per_page: 20, }; - ListMetadataRepository::update_header_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &update) - .unwrap(); + ListMetadataRepository::update_header_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + &update, + ) + .expect("should succeed"); let result = ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) - .unwrap(); + .expect("should succeed"); assert!(result.is_none()); } @@ -1237,21 +1298,27 @@ mod tests { current_page: 2, per_page: 20, }; - ListMetadataRepository::update_header_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &update) - .unwrap(); + ListMetadataRepository::update_header_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + &update, + ) + .expect("should succeed"); let result = ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) - .unwrap(); + .expect("should succeed"); assert!(result.is_some()); - let info = result.unwrap(); + let info = result.expect("should succeed"); assert_eq!(info.page, 3); // next page assert_eq!(info.per_page, 20); // Verify state changed to FetchingNextPage let state = - ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); assert_eq!(state, ListState::FetchingNextPage); } @@ -1259,12 +1326,14 @@ mod tests { fn test_complete_sync_sets_state_to_idle(test_ctx: TestContext) { let key = ListKey::from("edit:posts:publish"); - let info = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); - ListMetadataRepository::complete_sync(&test_ctx.conn, info.list_metadata_id).unwrap(); + let info = ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); + ListMetadataRepository::complete_sync(&test_ctx.conn, info.list_metadata_id) + .expect("should succeed"); let state = - ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); assert_eq!(state, ListState::Idle); } @@ -1272,18 +1341,21 @@ mod tests { fn test_complete_sync_with_error_sets_state_and_message(test_ctx: TestContext) { let key = ListKey::from("edit:posts:publish"); - let info = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + let info = ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); ListMetadataRepository::complete_sync_with_error( &test_ctx.conn, info.list_metadata_id, "Network timeout", ) - .unwrap(); + .expect("should succeed"); - let state_record = ListMetadataRepository::get_state_by_list_metadata_id(&test_ctx.conn, info.list_metadata_id) - .unwrap() - .unwrap(); + let state_record = ListMetadataRepository::get_state_by_list_metadata_id( + &test_ctx.conn, + info.list_metadata_id, + ) + .expect("query should succeed") + .expect("should succeed"); assert_eq!(state_record.state, ListState::Error); assert_eq!( state_record.error_message.as_deref(), @@ -1297,7 +1369,8 @@ mod tests { // Start a refresh (version becomes 1) let refresh_info = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); assert_eq!(refresh_info.version, 1); // Update header to simulate page 1 loaded @@ -1307,24 +1380,31 @@ mod tests { current_page: 1, per_page: 20, }; - ListMetadataRepository::update_header_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &update) - .unwrap(); + ListMetadataRepository::update_header_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + &update, + ) + .expect("should succeed"); ListMetadataRepository::complete_sync(&test_ctx.conn, refresh_info.list_metadata_id) - .unwrap(); + .expect("should succeed"); // Start load-next-page (captures version = 1) let next_page_info = ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) - .unwrap() - .unwrap(); + .expect("query should succeed") + .expect("should succeed"); let captured_version = next_page_info.version; // Another refresh happens (version becomes 2) - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); // Version check should fail (stale) let current_version = - ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, &key).unwrap(); + ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); assert_ne!( current_version, captured_version, "Version should not match after refresh" From 893245bb6de1625e45b1c8f1781efc827992508a Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 14:15:00 -0500 Subject: [PATCH 18/25] Update migration count in Kotlin and Swift tests --- .../kotlin/src/integrationTest/kotlin/WordPressApiCacheTest.kt | 2 +- .../Tests/wordpress-api-cache/WordPressApiCacheTests.swift | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/native/kotlin/api/kotlin/src/integrationTest/kotlin/WordPressApiCacheTest.kt b/native/kotlin/api/kotlin/src/integrationTest/kotlin/WordPressApiCacheTest.kt index 8bd3a07ea..7e5f98b4a 100644 --- a/native/kotlin/api/kotlin/src/integrationTest/kotlin/WordPressApiCacheTest.kt +++ b/native/kotlin/api/kotlin/src/integrationTest/kotlin/WordPressApiCacheTest.kt @@ -10,6 +10,6 @@ class WordPressApiCacheTest { @Test fun testThatMigrationsWork() = runTest { - assertEquals(6, WordPressApiCache().performMigrations()) + assertEquals(7, WordPressApiCache().performMigrations()) } } diff --git a/native/swift/Tests/wordpress-api-cache/WordPressApiCacheTests.swift b/native/swift/Tests/wordpress-api-cache/WordPressApiCacheTests.swift index 8ccf97a39..853c45ade 100644 --- a/native/swift/Tests/wordpress-api-cache/WordPressApiCacheTests.swift +++ b/native/swift/Tests/wordpress-api-cache/WordPressApiCacheTests.swift @@ -16,7 +16,7 @@ actor Test { @Test func testMigrationsWork() async throws { let migrationsPerformed = try await self.cache.performMigrations() - #expect(migrationsPerformed == 6) + #expect(migrationsPerformed == 7) } #if !os(Linux) From 53405b4e9afff01a16567ea01616c1bac869d3a8 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 14:26:09 -0500 Subject: [PATCH 19/25] Handle race condition in get_or_create Add `ConstraintViolation` variant to `SqliteDbError` to distinguish UNIQUE constraint violations from other SQLite errors. Use this in `ListMetadataRepository::get_or_create` to handle the rare case where another thread creates the same header between our SELECT and INSERT. Changes: - Add `SqliteDbError::ConstraintViolation` variant - Update `From` to detect constraint violations - Update `get_or_create` to catch constraint violations and re-fetch --- wp_mobile_cache/src/lib.rs | 9 +++++++++ .../src/repository/list_metadata.rs | 20 +++++++++++++++++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/wp_mobile_cache/src/lib.rs b/wp_mobile_cache/src/lib.rs index 7456f4adc..d5c75347b 100644 --- a/wp_mobile_cache/src/lib.rs +++ b/wp_mobile_cache/src/lib.rs @@ -16,6 +16,7 @@ pub mod test_fixtures; #[derive(Debug, Clone, PartialEq, Eq, thiserror::Error, uniffi::Error)] pub enum SqliteDbError { SqliteError(String), + ConstraintViolation(String), TableNameMismatch { expected: DbTable, actual: DbTable }, } @@ -23,6 +24,9 @@ impl std::fmt::Display for SqliteDbError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { SqliteDbError::SqliteError(message) => write!(f, "SqliteDbError: message={}", message), + SqliteDbError::ConstraintViolation(message) => { + write!(f, "Constraint violation: {}", message) + } SqliteDbError::TableNameMismatch { expected, actual } => { write!( f, @@ -37,6 +41,11 @@ impl std::fmt::Display for SqliteDbError { impl From for SqliteDbError { fn from(err: rusqlite::Error) -> Self { + if let rusqlite::Error::SqliteFailure(sqlite_err, _) = &err + && sqlite_err.code == rusqlite::ErrorCode::ConstraintViolation + { + return SqliteDbError::ConstraintViolation(err.to_string()); + } SqliteDbError::SqliteError(err.to_string()) } } diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 52d7cc59d..ab416bac2 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -62,6 +62,9 @@ impl ListMetadataRepository { /// /// If the header doesn't exist, creates it with default values and returns its rowid. /// If it exists, returns the existing rowid. + /// + /// This function is safe against race conditions: if another thread creates the header + /// between our SELECT and INSERT, we catch the constraint violation and re-fetch. pub fn get_or_create( executor: &impl QueryExecutor, site: &DbSite, @@ -77,9 +80,22 @@ impl ListMetadataRepository { "INSERT INTO {} (db_site_id, key, current_page, per_page, version) VALUES (?, ?, 0, 20, 0)", Self::header_table().table_name() ); - executor.execute(&sql, rusqlite::params![site.row_id, key])?; - Ok(executor.last_insert_rowid()) + match executor.execute(&sql, rusqlite::params![site.row_id, key]) { + Ok(_) => Ok(executor.last_insert_rowid()), + Err(SqliteDbError::ConstraintViolation(_)) => { + // Race condition: another thread created it between our SELECT and INSERT. + // Re-fetch to get the row created by the other thread. + Self::get_header(executor, site, key)? + .map(|h| h.row_id) + .ok_or_else(|| { + SqliteDbError::SqliteError( + "Header disappeared after constraint violation".to_string(), + ) + }) + } + Err(e) => Err(e), + } } /// Get all items for a list by ID, ordered by rowid (insertion order = display order). From cb2f1872195ad58a9c3a5c13304373e5e34a413d Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 14:33:23 -0500 Subject: [PATCH 20/25] Add optimized get_or_create_and_increment_version method Add `get_or_create_and_increment_version` that uses a single `INSERT ... ON CONFLICT DO UPDATE ... RETURNING` query to atomically create or update a header while incrementing its version. This reduces `begin_refresh` from 5-6 queries to 2 queries: - Before: get_or_create (1-2) + increment_version (2) + update_state (1) + get_header (1) - After: get_or_create_and_increment_version (1) + update_state (1) Changes: - Add `HeaderVersionInfo` struct for the return type - Add `get_or_create_and_increment_version` method using RETURNING clause - Update `begin_refresh` to use the new optimized method - Add tests for the new method --- .../src/repository/list_metadata.rs | 121 +++++++++++++++--- 1 file changed, 104 insertions(+), 17 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index ab416bac2..3c85c4b96 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -488,6 +488,40 @@ impl ListMetadataRepository { Ok(()) } + /// Get or create a list header and increment its version in a single query. + /// + /// Uses `INSERT ... ON CONFLICT DO UPDATE ... RETURNING` to atomically: + /// - Create the header with version=1 if it doesn't exist + /// - Increment the version and update `last_first_page_fetched_at` if it exists + /// - Return the rowid, new version, and per_page + /// + /// This is more efficient than calling `get_or_create` + `increment_version` separately. + pub fn get_or_create_and_increment_version( + executor: &impl QueryExecutor, + site: &DbSite, + key: &ListKey, + ) -> Result { + let sql = format!( + "INSERT INTO {} (db_site_id, key, current_page, per_page, version, last_first_page_fetched_at) \ + VALUES (?1, ?2, 0, 20, 1, strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) \ + ON CONFLICT(db_site_id, key) DO UPDATE SET \ + version = version + 1, \ + last_first_page_fetched_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now') \ + RETURNING rowid, version, per_page", + Self::header_table().table_name() + ); + + let mut stmt = executor.prepare(&sql)?; + stmt.query_row(rusqlite::params![site.row_id, key], |row| { + Ok(HeaderVersionInfo { + list_metadata_id: row.get(0)?, + version: row.get(1)?, + per_page: row.get(2)?, + }) + }) + .map_err(SqliteDbError::from) + } + // ============================================================ // Concurrency Helpers // ============================================================ @@ -495,10 +529,9 @@ impl ListMetadataRepository { /// Begin a refresh operation (fetch first page). /// /// Atomically: - /// 1. Creates header if needed - /// 2. Increments version (invalidates any in-flight load-more) - /// 3. Updates state to FetchingFirstPage - /// 4. Returns info needed for the fetch + /// 1. Creates header if needed and increments version + /// 2. Updates state to FetchingFirstPage + /// 3. Returns info needed for the fetch /// /// Call this before starting an API fetch for page 1. pub fn begin_refresh( @@ -508,28 +541,21 @@ impl ListMetadataRepository { ) -> Result { log::debug!("ListMetadataRepository::begin_refresh: key={}", key); - // Ensure header exists and get its ID - let list_metadata_id = Self::get_or_create(executor, site, key)?; - - // Increment version (invalidates any in-flight load-more) - let version = Self::increment_version_by_list_metadata_id(executor, list_metadata_id)?; + // Get or create header and increment version in a single query + let header_info = Self::get_or_create_and_increment_version(executor, site, key)?; // Update state to fetching Self::update_state_by_list_metadata_id( executor, - list_metadata_id, + header_info.list_metadata_id, ListState::FetchingFirstPage, None, )?; - // Get header for pagination info - let header = - Self::get_header(executor, site, key)?.expect("header must exist after get_or_create"); - Ok(RefreshInfo { - list_metadata_id, - version, - per_page: header.per_page, + list_metadata_id: header_info.list_metadata_id, + version: header_info.version, + per_page: header_info.per_page, }) } @@ -620,6 +646,17 @@ impl ListMetadataRepository { } } +/// Header info returned from `get_or_create_and_increment_version`. +#[derive(Debug, Clone)] +pub struct HeaderVersionInfo { + /// Row ID of the list_metadata record + pub list_metadata_id: RowId, + /// Current version number + pub version: i64, + /// Items per page setting + pub per_page: i64, +} + /// Information returned when starting a refresh operation. #[derive(Debug, Clone)] pub struct RefreshInfo { @@ -1126,6 +1163,56 @@ mod tests { assert!(header.last_first_page_fetched_at.is_some()); } + #[rstest] + fn test_get_or_create_and_increment_version_creates_new(test_ctx: TestContext) { + let key = ListKey::from("edit:posts:new"); + + // First call creates header with version 1 + let info = ListMetadataRepository::get_or_create_and_increment_version( + &test_ctx.conn, + &test_ctx.site, + &key, + ) + .expect("should succeed"); + assert_eq!(info.version, 1); + assert_eq!(info.per_page, 20); + + // Verify header was created + let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) + .expect("query should succeed") + .expect("should exist"); + assert_eq!(header.row_id, info.list_metadata_id); + assert_eq!(header.version, 1); + } + + #[rstest] + fn test_get_or_create_and_increment_version_increments_existing(test_ctx: TestContext) { + let key = ListKey::from("edit:posts:existing"); + + // Create header first + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) + .expect("should succeed"); + + // Now call get_or_create_and_increment_version - should increment from 0 to 1 + let info1 = ListMetadataRepository::get_or_create_and_increment_version( + &test_ctx.conn, + &test_ctx.site, + &key, + ) + .expect("should succeed"); + assert_eq!(info1.version, 1); + + // Call again - should increment to 2 + let info2 = ListMetadataRepository::get_or_create_and_increment_version( + &test_ctx.conn, + &test_ctx.site, + &key, + ) + .expect("should succeed"); + assert_eq!(info2.version, 2); + assert_eq!(info2.list_metadata_id, info1.list_metadata_id); + } + #[rstest] fn test_delete_list_removes_all_data(test_ctx: TestContext) { let key = ListKey::from("edit:posts:publish"); From 48e9dd09d309ecd78e7ba1bb41bc94cdc0aef160 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 14:41:57 -0500 Subject: [PATCH 21/25] Remove standalone increment_version methods Remove `increment_version_by_list_metadata_id` and `increment_version_by_list_key` since version increment should only happen through `get_or_create_and_increment_version` as part of a proper refresh flow, not called arbitrarily. --- .../src/repository/list_metadata.rs | 72 ------------------- 1 file changed, 72 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 3c85c4b96..7eb34a655 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -435,41 +435,6 @@ impl ListMetadataRepository { Self::update_state_by_list_metadata_id(executor, list_metadata_id, state, error_message) } - /// Increment version by ID and return the new value. - /// - /// Used when starting a refresh to invalidate any in-flight load-more operations. - pub fn increment_version_by_list_metadata_id( - executor: &impl QueryExecutor, - list_metadata_id: RowId, - ) -> Result { - let sql = format!( - "UPDATE {} SET version = version + 1, last_first_page_fetched_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now') WHERE rowid = ?", - Self::header_table().table_name() - ); - executor.execute(&sql, rusqlite::params![list_metadata_id])?; - - // Return the new version - let sql = format!( - "SELECT version FROM {} WHERE rowid = ?", - Self::header_table().table_name() - ); - let mut stmt = executor.prepare(&sql)?; - stmt.query_row(rusqlite::params![list_metadata_id], |row| row.get(0)) - .map_err(SqliteDbError::from) - } - - /// Increment version by site and key and return the new value. - /// - /// If you already have the `list_metadata_id`, use `increment_version_by_list_metadata_id` instead. - pub fn increment_version_by_list_key( - executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, - ) -> Result { - let list_metadata_id = Self::get_or_create(executor, site, key)?; - Self::increment_version_by_list_metadata_id(executor, list_metadata_id) - } - /// Delete all data for a list (header, items, and state). pub fn delete_list( executor: &impl QueryExecutor, @@ -1126,43 +1091,6 @@ mod tests { assert_eq!(state, ListState::FetchingNextPage); } - #[rstest] - fn test_increment_version(test_ctx: TestContext) { - let key = ListKey::from("edit:posts:publish"); - - // Create header (version starts at 0) - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - let initial_version = - ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - assert_eq!(initial_version, 0); - - // Increment version - let new_version = ListMetadataRepository::increment_version_by_list_key( - &test_ctx.conn, - &test_ctx.site, - &key, - ) - .expect("should succeed"); - assert_eq!(new_version, 1); - - // Increment again - let newer_version = ListMetadataRepository::increment_version_by_list_key( - &test_ctx.conn, - &test_ctx.site, - &key, - ) - .expect("should succeed"); - assert_eq!(newer_version, 2); - - // Verify last_first_page_fetched_at is set - let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) - .expect("query should succeed") - .expect("should succeed"); - assert!(header.last_first_page_fetched_at.is_some()); - } - #[rstest] fn test_get_or_create_and_increment_version_creates_new(test_ctx: TestContext) { let key = ListKey::from("edit:posts:new"); From 41f9ba5ad62007cf1df3f87a42a741961e638205 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 14:44:05 -0500 Subject: [PATCH 22/25] Remove concurrency helpers from repository layer Move concurrency orchestration logic out of the repository layer. These helpers (`begin_refresh`, `begin_fetch_next_page`, `complete_sync`, `complete_sync_with_error`) belong in a service layer since they: - Orchestrate multiple repository operations - Apply business rules (checking page counts, deciding state transitions) - Return domain-specific result types The repository now provides clean primitives that a service layer can compose: - `get_or_create_and_increment_version` for atomic create/increment - `update_state_by_list_metadata_id` for state changes - `get_header`, `get_items`, etc. for reads --- .../src/repository/list_metadata.rs | 360 ------------------ 1 file changed, 360 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 7eb34a655..3fba33d08 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -486,129 +486,6 @@ impl ListMetadataRepository { }) .map_err(SqliteDbError::from) } - - // ============================================================ - // Concurrency Helpers - // ============================================================ - - /// Begin a refresh operation (fetch first page). - /// - /// Atomically: - /// 1. Creates header if needed and increments version - /// 2. Updates state to FetchingFirstPage - /// 3. Returns info needed for the fetch - /// - /// Call this before starting an API fetch for page 1. - pub fn begin_refresh( - executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, - ) -> Result { - log::debug!("ListMetadataRepository::begin_refresh: key={}", key); - - // Get or create header and increment version in a single query - let header_info = Self::get_or_create_and_increment_version(executor, site, key)?; - - // Update state to fetching - Self::update_state_by_list_metadata_id( - executor, - header_info.list_metadata_id, - ListState::FetchingFirstPage, - None, - )?; - - Ok(RefreshInfo { - list_metadata_id: header_info.list_metadata_id, - version: header_info.version, - per_page: header_info.per_page, - }) - } - - /// Begin a load-next-page operation. - /// - /// Atomically: - /// 1. Gets current pagination state - /// 2. Checks if there are more pages to load - /// 3. Updates state to FetchingNextPage - /// 4. Returns info needed for the fetch (including version for later check) - /// - /// Returns None if already at the last page or no pages loaded yet. - /// Call this before starting an API fetch for page N+1. - pub fn begin_fetch_next_page( - executor: &impl QueryExecutor, - site: &DbSite, - key: &ListKey, - ) -> Result, SqliteDbError> { - log::debug!("ListMetadataRepository::begin_fetch_next_page: key={}", key); - - let header = match Self::get_header(executor, site, key)? { - Some(h) => h, - None => return Ok(None), // List doesn't exist - }; - - // Check if we have pages loaded and more to fetch - if header.current_page == 0 { - return Ok(None); // No pages loaded yet, need refresh first - } - - if let Some(total_pages) = header.total_pages - && header.current_page >= total_pages - { - return Ok(None); // Already at last page - } - - let next_page = header.current_page + 1; - - // Update state to fetching - Self::update_state_by_list_metadata_id( - executor, - header.row_id, - ListState::FetchingNextPage, - None, - )?; - - Ok(Some(FetchNextPageInfo { - list_metadata_id: header.row_id, - page: next_page, - version: header.version, - per_page: header.per_page, - })) - } - - /// Complete a sync operation successfully. - /// - /// Updates state to Idle and clears any error message. - pub fn complete_sync( - executor: &impl QueryExecutor, - list_metadata_id: RowId, - ) -> Result<(), SqliteDbError> { - log::debug!( - "ListMetadataRepository::complete_sync: list_metadata_id={}", - list_metadata_id.0 - ); - Self::update_state_by_list_metadata_id(executor, list_metadata_id, ListState::Idle, None) - } - - /// Complete a sync operation with an error. - /// - /// Updates state to Error with the provided message. - pub fn complete_sync_with_error( - executor: &impl QueryExecutor, - list_metadata_id: RowId, - error_message: &str, - ) -> Result<(), SqliteDbError> { - log::debug!( - "ListMetadataRepository::complete_sync_with_error: list_metadata_id={}, error={}", - list_metadata_id.0, - error_message - ); - Self::update_state_by_list_metadata_id( - executor, - list_metadata_id, - ListState::Error, - Some(error_message), - ) - } } /// Header info returned from `get_or_create_and_increment_version`. @@ -622,30 +499,6 @@ pub struct HeaderVersionInfo { pub per_page: i64, } -/// Information returned when starting a refresh operation. -#[derive(Debug, Clone)] -pub struct RefreshInfo { - /// Row ID of the list_metadata record - pub list_metadata_id: RowId, - /// New version number (for concurrency checking) - pub version: i64, - /// Items per page setting - pub per_page: i64, -} - -/// Information returned when starting a load-next-page operation. -#[derive(Debug, Clone)] -pub struct FetchNextPageInfo { - /// Row ID of the list_metadata record - pub list_metadata_id: RowId, - /// Page number to fetch - pub page: i64, - /// Version at start (check before storing results) - pub version: i64, - /// Items per page setting - pub per_page: i64, -} - /// Input for creating a list metadata item. #[derive(Debug, Clone)] pub struct ListMetadataItemInput { @@ -1228,217 +1081,4 @@ mod tests { assert_eq!(item.entity_id, ((i + 1) * 100) as i64); } } - - // ============================================================ - // Concurrency Helper Tests - // ============================================================ - - #[rstest] - fn test_begin_refresh_creates_header_and_sets_state(test_ctx: TestContext) { - let key = ListKey::from("edit:posts:publish"); - - let info = ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - - // Verify version was incremented (from 0 to 1) - assert_eq!(info.version, 1); - assert_eq!(info.per_page, 20); // default - - // Verify state is FetchingFirstPage - let state = - ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - assert_eq!(state, ListState::FetchingFirstPage); - } - - #[rstest] - fn test_begin_refresh_increments_version_each_time(test_ctx: TestContext) { - let key = ListKey::from("edit:posts:draft"); - - let info1 = ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - assert_eq!(info1.version, 1); - - // Complete the first refresh - ListMetadataRepository::complete_sync(&test_ctx.conn, info1.list_metadata_id) - .expect("should succeed"); - - let info2 = ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - assert_eq!(info2.version, 2); - } - - #[rstest] - fn test_begin_fetch_next_page_returns_none_for_non_existent_list(test_ctx: TestContext) { - let result = ListMetadataRepository::begin_fetch_next_page( - &test_ctx.conn, - &test_ctx.site, - &ListKey::from("nonexistent"), - ) - .expect("should succeed"); - assert!(result.is_none()); - } - - #[rstest] - fn test_begin_fetch_next_page_returns_none_when_no_pages_loaded(test_ctx: TestContext) { - let key = ListKey::from("edit:posts:publish"); - - // Create header but don't set current_page - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - - let result = - ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - assert!(result.is_none()); - } - - #[rstest] - fn test_begin_fetch_next_page_returns_none_at_last_page(test_ctx: TestContext) { - let key = ListKey::from("edit:posts:publish"); - - // Set up header with current_page = total_pages - let update = ListMetadataHeaderUpdate { - total_pages: Some(3), - total_items: Some(60), - current_page: 3, - per_page: 20, - }; - ListMetadataRepository::update_header_by_list_key( - &test_ctx.conn, - &test_ctx.site, - &key, - &update, - ) - .expect("should succeed"); - - let result = - ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - assert!(result.is_none()); - } - - #[rstest] - fn test_begin_fetch_next_page_returns_info_when_more_pages(test_ctx: TestContext) { - let key = ListKey::from("edit:posts:publish"); - - // Set up header with more pages available - let update = ListMetadataHeaderUpdate { - total_pages: Some(5), - total_items: Some(100), - current_page: 2, - per_page: 20, - }; - ListMetadataRepository::update_header_by_list_key( - &test_ctx.conn, - &test_ctx.site, - &key, - &update, - ) - .expect("should succeed"); - - let result = - ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - assert!(result.is_some()); - - let info = result.expect("should succeed"); - assert_eq!(info.page, 3); // next page - assert_eq!(info.per_page, 20); - - // Verify state changed to FetchingNextPage - let state = - ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - assert_eq!(state, ListState::FetchingNextPage); - } - - #[rstest] - fn test_complete_sync_sets_state_to_idle(test_ctx: TestContext) { - let key = ListKey::from("edit:posts:publish"); - - let info = ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - ListMetadataRepository::complete_sync(&test_ctx.conn, info.list_metadata_id) - .expect("should succeed"); - - let state = - ListMetadataRepository::get_state_by_list_key(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - assert_eq!(state, ListState::Idle); - } - - #[rstest] - fn test_complete_sync_with_error_sets_state_and_message(test_ctx: TestContext) { - let key = ListKey::from("edit:posts:publish"); - - let info = ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - ListMetadataRepository::complete_sync_with_error( - &test_ctx.conn, - info.list_metadata_id, - "Network timeout", - ) - .expect("should succeed"); - - let state_record = ListMetadataRepository::get_state_by_list_metadata_id( - &test_ctx.conn, - info.list_metadata_id, - ) - .expect("query should succeed") - .expect("should succeed"); - assert_eq!(state_record.state, ListState::Error); - assert_eq!( - state_record.error_message.as_deref(), - Some("Network timeout") - ); - } - - #[rstest] - fn test_version_check_detects_stale_operation(test_ctx: TestContext) { - let key = ListKey::from("edit:posts:publish"); - - // Start a refresh (version becomes 1) - let refresh_info = - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - assert_eq!(refresh_info.version, 1); - - // Update header to simulate page 1 loaded - let update = ListMetadataHeaderUpdate { - total_pages: Some(5), - total_items: Some(100), - current_page: 1, - per_page: 20, - }; - ListMetadataRepository::update_header_by_list_key( - &test_ctx.conn, - &test_ctx.site, - &key, - &update, - ) - .expect("should succeed"); - ListMetadataRepository::complete_sync(&test_ctx.conn, refresh_info.list_metadata_id) - .expect("should succeed"); - - // Start load-next-page (captures version = 1) - let next_page_info = - ListMetadataRepository::begin_fetch_next_page(&test_ctx.conn, &test_ctx.site, &key) - .expect("query should succeed") - .expect("should succeed"); - let captured_version = next_page_info.version; - - // Another refresh happens (version becomes 2) - ListMetadataRepository::begin_refresh(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - - // Version check should fail (stale) - let current_version = - ListMetadataRepository::get_version(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); - assert_ne!( - current_version, captured_version, - "Version should not match after refresh" - ); - } } From 811eb46b0d7dacec4593376e905e7b3848b0d612 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 14:45:56 -0500 Subject: [PATCH 23/25] Move reset_stale_fetching_states to ListMetadataRepository Move `reset_stale_fetching_states_internal` from `WpApiCache` to `ListMetadataRepository` where it belongs. The repository owns the state table and should provide all operations on it. `WpApiCache::perform_migrations` still calls this method after migrations complete, but now delegates to the repository. --- wp_mobile_cache/src/lib.rs | 29 ++----------------- .../src/repository/list_metadata.rs | 27 +++++++++++++++++ 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/wp_mobile_cache/src/lib.rs b/wp_mobile_cache/src/lib.rs index d5c75347b..7009158b2 100644 --- a/wp_mobile_cache/src/lib.rs +++ b/wp_mobile_cache/src/lib.rs @@ -3,6 +3,8 @@ use rusqlite::types::{FromSql, FromSqlResult, ToSql, ToSqlOutput}; use rusqlite::{Connection, Result as SqliteResult, params}; use std::sync::Mutex; +use crate::repository::list_metadata::ListMetadataRepository; + pub mod context; pub mod db_types; pub mod entity; @@ -277,7 +279,7 @@ impl WpApiCache { // Errors are logged but not propagated: this is a best-effort cleanup, // and failure doesn't affect core functionality (worst case: UI shows // stale loading state). - if let Err(e) = Self::reset_stale_fetching_states_internal(connection) { + if let Err(e) = ListMetadataRepository::reset_stale_fetching_states(connection) { log::warn!("Failed to reset stale fetching states: {}", e); } @@ -320,31 +322,6 @@ impl WpApiCache { } impl WpApiCache { - /// Resets stale fetching states (`FetchingFirstPage`, `FetchingNextPage`) to `Idle`. - /// - /// If the app terminates while a fetch is in progress, these transient states persist - /// in the database. On next launch, this could cause perpetual loading indicators or - /// blocked fetches. We reset them during `WpApiCache` initialization since it's - /// typically created once at app startup. - /// - /// Note: `Error` states are intentionally preserved for UI feedback and debugging. - fn reset_stale_fetching_states_internal( - connection: &mut Connection, - ) -> Result { - use crate::list_metadata::ListState; - - connection - .execute( - "UPDATE list_metadata_state SET state = ?1 WHERE state IN (?2, ?3)", - params![ - ListState::Idle as i32, - ListState::FetchingFirstPage as i32, - ListState::FetchingNextPage as i32, - ], - ) - .map_err(SqliteDbError::from) - } - /// Execute a database operation with scoped access to the connection. /// /// This is the **only** way to access the database. The provided closure diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 3fba33d08..af21e4ccb 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -486,6 +486,33 @@ impl ListMetadataRepository { }) .map_err(SqliteDbError::from) } + + /// Reset stale fetching states to Idle. + /// + /// If the app terminates while a fetch is in progress, `FetchingFirstPage` and + /// `FetchingNextPage` states persist in the database. On next launch, this could + /// cause perpetual loading indicators or blocked fetches. + /// + /// This resets those transient states to `Idle`. Error states are intentionally + /// preserved for UI feedback and debugging. + /// + /// Returns the number of rows updated. + pub fn reset_stale_fetching_states( + executor: &impl QueryExecutor, + ) -> Result { + let sql = format!( + "UPDATE {} SET state = ?1 WHERE state IN (?2, ?3)", + Self::state_table().table_name() + ); + executor.execute( + &sql, + rusqlite::params![ + ListState::Idle as i32, + ListState::FetchingFirstPage as i32, + ListState::FetchingNextPage as i32, + ], + ) + } } /// Header info returned from `get_or_create_and_increment_version`. From 23add507d4c32494f2621731dfffeb2fd02536b9 Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 16:40:07 -0500 Subject: [PATCH 24/25] Make per_page a required parameter with validation The repository layer should not dictate per_page values - this must be set by the service layer to match networking configuration. Changes: - Add PerPageMismatch error variant to SqliteDbError - Make per_page a required parameter in get_or_create - Make per_page required in get_or_create_and_increment_version - Update set_items_by_list_key to require per_page - Update append_items_by_list_key to require per_page - Update update_state_by_list_key to require per_page - Return error when existing header has different per_page --- wp_mobile_cache/src/lib.rs | 8 + .../src/repository/list_metadata.rs | 210 +++++++++++++----- 2 files changed, 166 insertions(+), 52 deletions(-) diff --git a/wp_mobile_cache/src/lib.rs b/wp_mobile_cache/src/lib.rs index 7009158b2..0e2ff5643 100644 --- a/wp_mobile_cache/src/lib.rs +++ b/wp_mobile_cache/src/lib.rs @@ -20,6 +20,7 @@ pub enum SqliteDbError { SqliteError(String), ConstraintViolation(String), TableNameMismatch { expected: DbTable, actual: DbTable }, + PerPageMismatch { expected: i64, actual: i64 }, } impl std::fmt::Display for SqliteDbError { @@ -37,6 +38,13 @@ impl std::fmt::Display for SqliteDbError { actual.table_name() ) } + SqliteDbError::PerPageMismatch { expected, actual } => { + write!( + f, + "per_page mismatch: expected {}, but list has {}", + expected, actual + ) + } } } } diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index af21e4ccb..884093447 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -60,8 +60,9 @@ impl ListMetadataRepository { /// Get or create list metadata header. /// - /// If the header doesn't exist, creates it with default values and returns its rowid. - /// If it exists, returns the existing rowid. + /// If the header doesn't exist, creates it with the given `per_page` and returns its rowid. + /// If it exists with matching `per_page`, returns the existing rowid. + /// If it exists with different `per_page`, returns `PerPageMismatch` error. /// /// This function is safe against race conditions: if another thread creates the header /// between our SELECT and INSERT, we catch the constraint violation and re-fetch. @@ -69,30 +70,42 @@ impl ListMetadataRepository { executor: &impl QueryExecutor, site: &DbSite, key: &ListKey, + per_page: i64, ) -> Result { // Try to get existing if let Some(header) = Self::get_header(executor, site, key)? { + if header.per_page != per_page { + return Err(SqliteDbError::PerPageMismatch { + expected: per_page, + actual: header.per_page, + }); + } return Ok(header.row_id); } - // Create new header with defaults + // Create new header let sql = format!( - "INSERT INTO {} (db_site_id, key, current_page, per_page, version) VALUES (?, ?, 0, 20, 0)", + "INSERT INTO {} (db_site_id, key, current_page, per_page, version) VALUES (?, ?, 0, ?, 0)", Self::header_table().table_name() ); - match executor.execute(&sql, rusqlite::params![site.row_id, key]) { + match executor.execute(&sql, rusqlite::params![site.row_id, key, per_page]) { Ok(_) => Ok(executor.last_insert_rowid()), Err(SqliteDbError::ConstraintViolation(_)) => { // Race condition: another thread created it between our SELECT and INSERT. - // Re-fetch to get the row created by the other thread. - Self::get_header(executor, site, key)? - .map(|h| h.row_id) - .ok_or_else(|| { - SqliteDbError::SqliteError( - "Header disappeared after constraint violation".to_string(), - ) - }) + // Re-fetch and validate per_page matches. + let header = Self::get_header(executor, site, key)?.ok_or_else(|| { + SqliteDbError::SqliteError( + "Header disappeared after constraint violation".to_string(), + ) + })?; + if header.per_page != per_page { + return Err(SqliteDbError::PerPageMismatch { + expected: per_page, + actual: header.per_page, + }); + } + Ok(header.row_id) } Err(e) => Err(e), } @@ -280,9 +293,10 @@ impl ListMetadataRepository { executor: &impl QueryExecutor, site: &DbSite, key: &ListKey, + per_page: i64, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { - let list_metadata_id = Self::get_or_create(executor, site, key)?; + let list_metadata_id = Self::get_or_create(executor, site, key, per_page)?; Self::set_items_by_list_metadata_id(executor, list_metadata_id, items) } @@ -311,9 +325,10 @@ impl ListMetadataRepository { executor: &impl QueryExecutor, site: &DbSite, key: &ListKey, + per_page: i64, items: &[ListMetadataItemInput], ) -> Result<(), SqliteDbError> { - let list_metadata_id = Self::get_or_create(executor, site, key)?; + let list_metadata_id = Self::get_or_create(executor, site, key, per_page)?; Self::append_items_by_list_metadata_id(executor, list_metadata_id, items) } @@ -393,7 +408,7 @@ impl ListMetadataRepository { key: &ListKey, update: &ListMetadataHeaderUpdate, ) -> Result<(), SqliteDbError> { - let list_metadata_id = Self::get_or_create(executor, site, key)?; + let list_metadata_id = Self::get_or_create(executor, site, key, update.per_page)?; Self::update_header_by_list_metadata_id(executor, list_metadata_id, update) } @@ -428,10 +443,11 @@ impl ListMetadataRepository { executor: &impl QueryExecutor, site: &DbSite, key: &ListKey, + per_page: i64, state: ListState, error_message: Option<&str>, ) -> Result<(), SqliteDbError> { - let list_metadata_id = Self::get_or_create(executor, site, key)?; + let list_metadata_id = Self::get_or_create(executor, site, key, per_page)?; Self::update_state_by_list_metadata_id(executor, list_metadata_id, state, error_message) } @@ -456,19 +472,22 @@ impl ListMetadataRepository { /// Get or create a list header and increment its version in a single query. /// /// Uses `INSERT ... ON CONFLICT DO UPDATE ... RETURNING` to atomically: - /// - Create the header with version=1 if it doesn't exist + /// - Create the header with the given `per_page` and version=1 if it doesn't exist /// - Increment the version and update `last_first_page_fetched_at` if it exists /// - Return the rowid, new version, and per_page /// + /// Returns `PerPageMismatch` error if the existing header has a different `per_page`. + /// /// This is more efficient than calling `get_or_create` + `increment_version` separately. pub fn get_or_create_and_increment_version( executor: &impl QueryExecutor, site: &DbSite, key: &ListKey, + per_page: i64, ) -> Result { let sql = format!( "INSERT INTO {} (db_site_id, key, current_page, per_page, version, last_first_page_fetched_at) \ - VALUES (?1, ?2, 0, 20, 1, strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) \ + VALUES (?1, ?2, 0, ?3, 1, strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) \ ON CONFLICT(db_site_id, key) DO UPDATE SET \ version = version + 1, \ last_first_page_fetched_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now') \ @@ -477,14 +496,25 @@ impl ListMetadataRepository { ); let mut stmt = executor.prepare(&sql)?; - stmt.query_row(rusqlite::params![site.row_id, key], |row| { - Ok(HeaderVersionInfo { - list_metadata_id: row.get(0)?, - version: row.get(1)?, - per_page: row.get(2)?, + let info = stmt + .query_row(rusqlite::params![site.row_id, key, per_page], |row| { + Ok(HeaderVersionInfo { + list_metadata_id: row.get(0)?, + version: row.get(1)?, + per_page: row.get(2)?, + }) }) - }) - .map_err(SqliteDbError::from) + .map_err(SqliteDbError::from)?; + + // Validate per_page matches (could differ if row already existed) + if info.per_page != per_page { + return Err(SqliteDbError::PerPageMismatch { + expected: per_page, + actual: info.per_page, + }); + } + + Ok(info) } /// Reset stale fetching states to Idle. @@ -558,6 +588,8 @@ mod tests { use crate::test_fixtures::{TestContext, test_ctx}; use rstest::*; + const TEST_PER_PAGE: i64 = 25; + #[rstest] fn test_get_header_returns_none_for_non_existent(test_ctx: TestContext) { let result = ListMetadataRepository::get_header( @@ -574,17 +606,22 @@ mod tests { let key = ListKey::from("edit:posts:publish"); // Create new header - let row_id = ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); + let row_id = ListMetadataRepository::get_or_create( + &test_ctx.conn, + &test_ctx.site, + &key, + TEST_PER_PAGE, + ) + .expect("should succeed"); - // Verify it was created with defaults + // Verify it was created with provided per_page let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) .expect("query should succeed") .expect("should succeed"); assert_eq!(header.row_id, row_id); assert_eq!(header.key, key.as_str()); assert_eq!(header.current_page, 0); - assert_eq!(header.per_page, 20); + assert_eq!(header.per_page, TEST_PER_PAGE); assert_eq!(header.version, 0); assert!(header.total_pages.is_none()); assert!(header.total_items.is_none()); @@ -595,18 +632,46 @@ mod tests { let key = ListKey::from("edit:posts:draft"); // Create initial header - let first_row_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); + let first_row_id = ListMetadataRepository::get_or_create( + &test_ctx.conn, + &test_ctx.site, + &key, + TEST_PER_PAGE, + ) + .expect("should succeed"); // Get or create again should return same rowid - let second_row_id = - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); + let second_row_id = ListMetadataRepository::get_or_create( + &test_ctx.conn, + &test_ctx.site, + &key, + TEST_PER_PAGE, + ) + .expect("should succeed"); assert_eq!(first_row_id, second_row_id); } + #[rstest] + fn test_get_or_create_fails_on_per_page_mismatch(test_ctx: TestContext) { + let key = ListKey::from("edit:posts:mismatch"); + + // Create header with per_page = 25 + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key, 25) + .expect("should succeed"); + + // Try to get_or_create with different per_page + let result = + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key, 10); + assert!(matches!( + result, + Err(SqliteDbError::PerPageMismatch { + expected: 10, + actual: 25 + }) + )); + } + #[rstest] fn test_get_items_returns_empty_for_non_existent_list(test_ctx: TestContext) { let items = ListMetadataRepository::get_items_by_list_key( @@ -728,8 +793,14 @@ mod tests { }, ]; - ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items) - .expect("should succeed"); + ListMetadataRepository::set_items_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + TEST_PER_PAGE, + &items, + ) + .expect("should succeed"); let retrieved = ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) @@ -768,6 +839,7 @@ mod tests { &test_ctx.conn, &test_ctx.site, &key, + TEST_PER_PAGE, &initial_items, ) .expect("should succeed"); @@ -797,6 +869,7 @@ mod tests { &test_ctx.conn, &test_ctx.site, &key, + TEST_PER_PAGE, &new_items, ) .expect("should succeed"); @@ -833,6 +906,7 @@ mod tests { &test_ctx.conn, &test_ctx.site, &key, + TEST_PER_PAGE, &initial_items, ) .expect("should succeed"); @@ -856,6 +930,7 @@ mod tests { &test_ctx.conn, &test_ctx.site, &key, + TEST_PER_PAGE, &more_items, ) .expect("should succeed"); @@ -878,7 +953,7 @@ mod tests { total_pages: Some(5), total_items: Some(100), current_page: 1, - per_page: 20, + per_page: TEST_PER_PAGE, }; ListMetadataRepository::update_header_by_list_key( @@ -895,7 +970,7 @@ mod tests { assert_eq!(header.total_pages, Some(5)); assert_eq!(header.total_items, Some(100)); assert_eq!(header.current_page, 1); - assert_eq!(header.per_page, 20); + assert_eq!(header.per_page, TEST_PER_PAGE); assert!(header.last_fetched_at.is_some()); } @@ -903,8 +978,13 @@ mod tests { fn test_update_state_creates_new_state(test_ctx: TestContext) { let key = ListKey::from("edit:posts:publish"); - let list_id = ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); + let list_id = ListMetadataRepository::get_or_create( + &test_ctx.conn, + &test_ctx.site, + &key, + TEST_PER_PAGE, + ) + .expect("should succeed"); ListMetadataRepository::update_state_by_list_metadata_id( &test_ctx.conn, list_id, @@ -924,8 +1004,13 @@ mod tests { fn test_update_state_updates_existing_state(test_ctx: TestContext) { let key = ListKey::from("edit:posts:draft"); - let list_id = ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); + let list_id = ListMetadataRepository::get_or_create( + &test_ctx.conn, + &test_ctx.site, + &key, + TEST_PER_PAGE, + ) + .expect("should succeed"); // Set initial state ListMetadataRepository::update_state_by_list_metadata_id( @@ -960,6 +1045,7 @@ mod tests { &test_ctx.conn, &test_ctx.site, &key, + TEST_PER_PAGE, ListState::FetchingNextPage, None, ) @@ -980,10 +1066,11 @@ mod tests { &test_ctx.conn, &test_ctx.site, &key, + TEST_PER_PAGE, ) .expect("should succeed"); assert_eq!(info.version, 1); - assert_eq!(info.per_page, 20); + assert_eq!(info.per_page, TEST_PER_PAGE); // Verify header was created let header = ListMetadataRepository::get_header(&test_ctx.conn, &test_ctx.site, &key) @@ -998,7 +1085,7 @@ mod tests { let key = ListKey::from("edit:posts:existing"); // Create header first - ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) + ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key, TEST_PER_PAGE) .expect("should succeed"); // Now call get_or_create_and_increment_version - should increment from 0 to 1 @@ -1006,6 +1093,7 @@ mod tests { &test_ctx.conn, &test_ctx.site, &key, + TEST_PER_PAGE, ) .expect("should succeed"); assert_eq!(info1.version, 1); @@ -1015,6 +1103,7 @@ mod tests { &test_ctx.conn, &test_ctx.site, &key, + TEST_PER_PAGE, ) .expect("should succeed"); assert_eq!(info2.version, 2); @@ -1026,16 +1115,27 @@ mod tests { let key = ListKey::from("edit:posts:publish"); // Create header and add items and state - let list_id = ListMetadataRepository::get_or_create(&test_ctx.conn, &test_ctx.site, &key) - .expect("should succeed"); + let list_id = ListMetadataRepository::get_or_create( + &test_ctx.conn, + &test_ctx.site, + &key, + TEST_PER_PAGE, + ) + .expect("should succeed"); let items = vec![ListMetadataItemInput { entity_id: 1, modified_gmt: None, parent: None, menu_order: None, }]; - ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items) - .expect("should succeed"); + ListMetadataRepository::set_items_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + TEST_PER_PAGE, + &items, + ) + .expect("should succeed"); ListMetadataRepository::update_state_by_list_metadata_id( &test_ctx.conn, list_id, @@ -1095,8 +1195,14 @@ mod tests { }) .collect(); - ListMetadataRepository::set_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key, &items) - .expect("should succeed"); + ListMetadataRepository::set_items_by_list_key( + &test_ctx.conn, + &test_ctx.site, + &key, + TEST_PER_PAGE, + &items, + ) + .expect("should succeed"); let retrieved = ListMetadataRepository::get_items_by_list_key(&test_ctx.conn, &test_ctx.site, &key) From e43fe4acc2ccfcafd9310fbac01e812a2f8e350e Mon Sep 17 00:00:00 2001 From: Oguz Kocer Date: Thu, 18 Dec 2025 16:43:02 -0500 Subject: [PATCH 25/25] Use PRAGMA-based column enum tests for list_metadata Update schema validation tests to use `get_table_column_names` helper, which verifies that column enum indices match actual database schema positions via PRAGMA table_info. This matches the pattern used by posts repository tests and provides stronger guarantees that the column enums won't break if migrations reorder columns. --- .../src/repository/list_metadata.rs | 82 +++++++++++++------ 1 file changed, 57 insertions(+), 25 deletions(-) diff --git a/wp_mobile_cache/src/repository/list_metadata.rs b/wp_mobile_cache/src/repository/list_metadata.rs index 884093447..3dae69304 100644 --- a/wp_mobile_cache/src/repository/list_metadata.rs +++ b/wp_mobile_cache/src/repository/list_metadata.rs @@ -585,7 +585,11 @@ pub struct ListMetadataHeaderUpdate { #[cfg(test)] mod tests { use super::*; - use crate::test_fixtures::{TestContext, test_ctx}; + use crate::db_types::db_list_metadata::{ + ListMetadataColumn, ListMetadataItemColumn, ListMetadataStateColumn, + }; + use crate::db_types::row_ext::ColumnIndex; + use crate::test_fixtures::{TestContext, get_table_column_names, test_ctx}; use rstest::*; const TEST_PER_PAGE: i64 = 25; @@ -724,44 +728,72 @@ mod tests { assert_eq!(count, 0); } + /// Verify that ListMetadataColumn enum values match the actual database schema. + /// This test protects against column reordering in migrations breaking the positional index mapping. #[rstest] fn test_list_metadata_column_enum_matches_schema(test_ctx: TestContext) { - // Verify column order by selecting specific columns and checking positions - let sql = format!( - "SELECT rowid, db_site_id, key, total_pages, total_items, current_page, per_page, last_first_page_fetched_at, last_fetched_at, version FROM {}", - ListMetadataRepository::header_table().table_name() + use ListMetadataColumn::*; + + let columns = get_table_column_names( + &test_ctx.conn, + ListMetadataRepository::header_table().table_name(), ); - let stmt = test_ctx.conn.prepare(&sql); - assert!( - stmt.is_ok(), - "Column order mismatch - SELECT with explicit columns failed" + + // Verify each enum value maps to the correct column name + assert_eq!(columns[Rowid.as_index()], "rowid"); + assert_eq!(columns[DbSiteId.as_index()], "db_site_id"); + assert_eq!(columns[Key.as_index()], "key"); + assert_eq!(columns[TotalPages.as_index()], "total_pages"); + assert_eq!(columns[TotalItems.as_index()], "total_items"); + assert_eq!(columns[CurrentPage.as_index()], "current_page"); + assert_eq!(columns[PerPage.as_index()], "per_page"); + assert_eq!( + columns[LastFirstPageFetchedAt.as_index()], + "last_first_page_fetched_at" ); + assert_eq!(columns[LastFetchedAt.as_index()], "last_fetched_at"); + assert_eq!(columns[Version.as_index()], "version"); + + assert_eq!(columns.len(), Version.as_index() + 1); } + /// Verify that ListMetadataItemColumn enum values match the actual database schema. #[rstest] fn test_list_metadata_items_column_enum_matches_schema(test_ctx: TestContext) { - let sql = format!( - "SELECT rowid, list_metadata_id, entity_id, modified_gmt, parent, menu_order FROM {}", - ListMetadataRepository::items_table().table_name() - ); - let stmt = test_ctx.conn.prepare(&sql); - assert!( - stmt.is_ok(), - "Column order mismatch - SELECT with explicit columns failed" + use ListMetadataItemColumn::*; + + let columns = get_table_column_names( + &test_ctx.conn, + ListMetadataRepository::items_table().table_name(), ); + + assert_eq!(columns[Rowid.as_index()], "rowid"); + assert_eq!(columns[ListMetadataId.as_index()], "list_metadata_id"); + assert_eq!(columns[EntityId.as_index()], "entity_id"); + assert_eq!(columns[ModifiedGmt.as_index()], "modified_gmt"); + assert_eq!(columns[Parent.as_index()], "parent"); + assert_eq!(columns[MenuOrder.as_index()], "menu_order"); + + assert_eq!(columns.len(), MenuOrder.as_index() + 1); } + /// Verify that ListMetadataStateColumn enum values match the actual database schema. #[rstest] fn test_list_metadata_state_column_enum_matches_schema(test_ctx: TestContext) { - let sql = format!( - "SELECT rowid, list_metadata_id, state, error_message, updated_at FROM {}", - ListMetadataRepository::state_table().table_name() - ); - let stmt = test_ctx.conn.prepare(&sql); - assert!( - stmt.is_ok(), - "Column order mismatch - SELECT with explicit columns failed" + use ListMetadataStateColumn::*; + + let columns = get_table_column_names( + &test_ctx.conn, + ListMetadataRepository::state_table().table_name(), ); + + assert_eq!(columns[Rowid.as_index()], "rowid"); + assert_eq!(columns[ListMetadataId.as_index()], "list_metadata_id"); + assert_eq!(columns[State.as_index()], "state"); + assert_eq!(columns[ErrorMessage.as_index()], "error_message"); + assert_eq!(columns[UpdatedAt.as_index()], "updated_at"); + + assert_eq!(columns.len(), UpdatedAt.as_index() + 1); } // ============================================================