diff --git a/pkgs/cli/__tests__/commands/install/create-flows-directory.test.ts b/pkgs/cli/__tests__/commands/install/create-flows-directory.test.ts
index fa4b40540..e7dd42444 100644
--- a/pkgs/cli/__tests__/commands/install/create-flows-directory.test.ts
+++ b/pkgs/cli/__tests__/commands/install/create-flows-directory.test.ts
@@ -35,7 +35,7 @@ describe('createFlowsDirectory', () => {
// Verify all files exist
const indexPath = path.join(flowsDir, 'index.ts');
- const exampleFlowPath = path.join(flowsDir, 'example_flow.ts');
+ const exampleFlowPath = path.join(flowsDir, 'example-flow.ts');
expect(fs.existsSync(indexPath)).toBe(true);
expect(fs.existsSync(exampleFlowPath)).toBe(true);
@@ -51,18 +51,18 @@ describe('createFlowsDirectory', () => {
const indexContent = fs.readFileSync(indexPath, 'utf8');
// Should have export for ExampleFlow
- expect(indexContent).toContain("export { ExampleFlow } from './example_flow.ts'");
+ expect(indexContent).toContain("export { ExampleFlow } from './example-flow.ts';");
// Should have documenting comment
expect(indexContent).toContain('Re-export all flows');
});
- it('should create example_flow.ts with named export', async () => {
+ it('should create example-flow.ts with named export', async () => {
await createFlowsDirectory({
supabasePath,
autoConfirm: true,
});
- const exampleFlowPath = path.join(flowsDir, 'example_flow.ts');
+ const exampleFlowPath = path.join(flowsDir, 'example-flow.ts');
const exampleFlowContent = fs.readFileSync(exampleFlowPath, 'utf8');
// Should use named export (not default)
@@ -70,7 +70,7 @@ describe('createFlowsDirectory', () => {
// Should import Flow from @pgflow/dsl
expect(exampleFlowContent).toContain("import { Flow } from '@pgflow/dsl'");
// Should have correct slug
- expect(exampleFlowContent).toContain("slug: 'example_flow'");
+ expect(exampleFlowContent).toContain("slug: 'exampleFlow'");
// Should have input type
expect(exampleFlowContent).toContain('type Input');
// Should have at least one step
@@ -82,7 +82,7 @@ describe('createFlowsDirectory', () => {
fs.mkdirSync(flowsDir, { recursive: true });
const indexPath = path.join(flowsDir, 'index.ts');
- const exampleFlowPath = path.join(flowsDir, 'example_flow.ts');
+ const exampleFlowPath = path.join(flowsDir, 'example-flow.ts');
fs.writeFileSync(indexPath, '// existing content');
fs.writeFileSync(exampleFlowPath, '// existing content');
@@ -105,7 +105,7 @@ describe('createFlowsDirectory', () => {
fs.mkdirSync(flowsDir, { recursive: true });
const indexPath = path.join(flowsDir, 'index.ts');
- const exampleFlowPath = path.join(flowsDir, 'example_flow.ts');
+ const exampleFlowPath = path.join(flowsDir, 'example-flow.ts');
// Only create index.ts
fs.writeFileSync(indexPath, '// existing content');
@@ -115,13 +115,13 @@ describe('createFlowsDirectory', () => {
autoConfirm: true,
});
- // Should return true because example_flow.ts was created
+ // Should return true because example-flow.ts was created
expect(result).toBe(true);
// Verify index.ts was not modified
expect(fs.readFileSync(indexPath, 'utf8')).toBe('// existing content');
- // Verify example_flow.ts was created
+ // Verify example-flow.ts was created
expect(fs.existsSync(exampleFlowPath)).toBe(true);
const exampleContent = fs.readFileSync(exampleFlowPath, 'utf8');
@@ -145,6 +145,6 @@ describe('createFlowsDirectory', () => {
// Verify files exist
expect(fs.existsSync(path.join(flowsDir, 'index.ts'))).toBe(true);
- expect(fs.existsSync(path.join(flowsDir, 'example_flow.ts'))).toBe(true);
+ expect(fs.existsSync(path.join(flowsDir, 'example-flow.ts'))).toBe(true);
});
});
diff --git a/pkgs/cli/src/commands/install/create-flows-directory.ts b/pkgs/cli/src/commands/install/create-flows-directory.ts
index 483aa26ed..32f6b80bd 100644
--- a/pkgs/cli/src/commands/install/create-flows-directory.ts
+++ b/pkgs/cli/src/commands/install/create-flows-directory.ts
@@ -4,16 +4,16 @@ import { log, confirm } from '@clack/prompts';
import chalk from 'chalk';
const INDEX_TS_TEMPLATE = `// Re-export all flows from this directory
-// Example: export { MyFlow } from './my_flow.ts';
+// Example: export { MyFlow } from './my-flow.ts';
-export { ExampleFlow } from './example_flow.ts';
+export { ExampleFlow } from './example-flow.ts';
`;
const EXAMPLE_FLOW_TEMPLATE = `import { Flow } from '@pgflow/dsl';
type Input = { name: string };
-export const ExampleFlow = new Flow({ slug: 'example_flow' })
+export const ExampleFlow = new Flow({ slug: 'exampleFlow' })
.step({ slug: 'greet' }, (input) => \`Hello, \${input.run.name}!\`);
`;
@@ -27,12 +27,12 @@ export async function createFlowsDirectory({
const flowsDir = path.join(supabasePath, 'flows');
const indexPath = path.join(flowsDir, 'index.ts');
- const exampleFlowPath = path.join(flowsDir, 'example_flow.ts');
+ const exampleFlowPath = path.join(flowsDir, 'example-flow.ts');
// Relative paths for display
const relativeFlowsDir = 'supabase/flows';
const relativeIndexPath = `${relativeFlowsDir}/index.ts`;
- const relativeExampleFlowPath = `${relativeFlowsDir}/example_flow.ts`;
+ const relativeExampleFlowPath = `${relativeFlowsDir}/example-flow.ts`;
// Check what needs to be created
const filesToCreate: Array<{ path: string; relativePath: string }> = [];
diff --git a/pkgs/client/README.md b/pkgs/client/README.md
index 45e06360e..89b6c1f94 100644
--- a/pkgs/client/README.md
+++ b/pkgs/client/README.md
@@ -230,7 +230,7 @@ When using with `@pgflow/dsl`, you get full type safety:
import { Flow } from '@pgflow/dsl';
// Define your flow
-const AnalyzeWebsite = new Flow<{ url: string }>({ slug: 'analyze_website' })
+const AnalyzeWebsite = new Flow<{ url: string }>({ slug: 'analyzeWebsite' })
.step({ slug: 'scrape' }, async (input) => ({ content: 'html...' }))
.step({ slug: 'analyze' }, async (input) => ({ sentiment: 0.8 }));
diff --git a/pkgs/client/SECURITY.md b/pkgs/client/SECURITY.md
index 6b8d6081e..42b62f3c4 100644
--- a/pkgs/client/SECURITY.md
+++ b/pkgs/client/SECURITY.md
@@ -22,11 +22,13 @@ pgflow ships with NO permissions. The SQL below is a **convenience snippet** tha
> [!CAUTION]
> This SQL grants BROAD permissions! After running this, ANY authenticated user can:
+>
> - Start ANY flow
> - View ANY run (if they know the run_id)
> - See ALL flow definitions
->
+>
> It is YOUR responsibility to:
+>
> - Tailor these permissions to your specific needs
> - Implement Row Level Security policies
> - Add proper access controls
@@ -51,40 +53,42 @@ This is suitable for development and trusted environments only.
Since pgflow doesn't handle security yet, you might want to:
1. **Add Row Level Security**
-
+
The key to implementing RLS with pgflow is to include a `user_id` field in your flow's input object. This allows you to create policies that check if the current user matches the user who started the flow.
-
+
First, include user_id in your flow input type:
+
```typescript
import { Flow } from '@pgflow/dsl';
-
+
// Define input type with user_id
type MyFlowInput = {
- user_id: string; // <<<<< Add this field
+ user_id: string; // <<<<< Add this field
data: string;
// ... other fields
};
-
+
export const MyFlow = new Flow({
- slug: 'my_secure_flow',
- })
+ slug: 'mySecureFlow',
+ });
// ... rest of flow definition
```
-
+
Then create RLS policies and an index for performance:
+
```sql
-- Enable RLS on tables you want to protect
ALTER TABLE pgflow.runs ENABLE ROW LEVEL SECURITY;
-
+
-- Create index for better RLS performance
CREATE INDEX idx_runs_user_id ON pgflow.runs ((input->>'user_id'));
-
+
-- Create your own policies based on your needs
-- Example: Users can only see their own runs
CREATE POLICY "Users see own runs" ON pgflow.runs
FOR SELECT USING ((SELECT auth.uid())::text = input->>'user_id');
```
-
+
For more details about the pgflow schema and the `runs` table structure, see the [Schema Design section](../core/README.md#schema-design) in the core documentation.
2. **Track User Attribution**
@@ -93,4 +97,4 @@ Since pgflow doesn't handle security yet, you might want to:
## Questions?
-If you have security concerns or suggestions, please share them in the [GitHub discussions](https://github.com/pgflow-dev/pgflow/discussions).
\ No newline at end of file
+If you have security concerns or suggestions, please share them in the [GitHub discussions](https://github.com/pgflow-dev/pgflow/discussions).
diff --git a/pkgs/dsl/README.md b/pkgs/dsl/README.md
index 2759411ae..6b9dfd648 100644
--- a/pkgs/dsl/README.md
+++ b/pkgs/dsl/README.md
@@ -31,7 +31,7 @@ type Input = {
// Define a flow with steps and dependencies
export const AnalyzeWebsite = new Flow({
- slug: 'analyze_website',
+ slug: 'analyzeWebsite',
maxAttempts: 3,
baseDelay: 5,
timeout: 10,
@@ -98,13 +98,13 @@ A semantic wrapper around `.step()` that provides type enforcement for steps tha
```typescript
// Fetch an array of items to be processed
.array(
- { slug: 'fetch_items' },
+ { slug: 'fetchItems' },
async () => [1, 2, 3, 4, 5]
)
// With dependencies - combining data from multiple sources
.array(
- { slug: 'combine_results', dependsOn: ['source1', 'source2'] },
+ { slug: 'combineResults', dependsOn: ['source1', 'source2'] },
async (input) => [...input.source1, ...input.source2]
)
```
@@ -131,7 +131,7 @@ Processes arrays element-by-element, similar to JavaScript's `Array.map()`. The
```typescript
// ROOT MAP - No array: property means use flow input
// Flow input MUST be an array (e.g., ["hello", "world"])
-new Flow({ slug: 'process_strings' })
+new Flow({ slug: 'processStrings' })
.map(
{ slug: 'uppercase' }, // No array: property!
(item) => item.toUpperCase()
@@ -139,7 +139,7 @@ new Flow({ slug: 'process_strings' })
// Each string in the input array gets uppercased in parallel
// DEPENDENT MAP - array: property specifies the source step
-new Flow<{}>({ slug: 'data_pipeline' })
+new Flow<{}>({ slug: 'dataPipeline' })
.array({ slug: 'numbers' }, () => [1, 2, 3])
.map(
{ slug: 'double', array: 'numbers' }, // Processes 'numbers' output
@@ -166,7 +166,7 @@ The `.map()` method provides full TypeScript type inference for array elements:
```typescript
type User = { id: number; name: string };
-new Flow<{}>({ slug: 'user_flow' })
+new Flow<{}>({ slug: 'userFlow' })
.array({ slug: 'users' }, (): User[] => [
{ id: 1, name: 'Alice' },
{ id: 2, name: 'Bob' }
@@ -181,7 +181,7 @@ new Flow<{}>({ slug: 'user_flow' })
```typescript
// Batch processing - process multiple items in parallel
-new Flow({ slug: 'batch_processor' })
+new Flow({ slug: 'batchProcessor' })
.map({ slug: 'validate' }, (item) => {
if (item < 0) throw new Error('Invalid item');
return item;
@@ -192,9 +192,9 @@ new Flow({ slug: 'batch_processor' })
});
// Data transformation pipeline
-new Flow<{}>({ slug: 'etl_pipeline' })
- .step({ slug: 'fetch_urls' }, () => ['url1', 'url2', 'url3'])
- .map({ slug: 'scrape', array: 'fetch_urls' }, async (url) => {
+new Flow<{}>({ slug: 'etlPipeline' })
+ .step({ slug: 'fetchUrls' }, () => ['url1', 'url2', 'url3'])
+ .map({ slug: 'scrape', array: 'fetchUrls' }, async (url) => {
return await fetchContent(url);
})
.map({ slug: 'extract', array: 'scrape' }, (html) => {
@@ -271,7 +271,7 @@ To use Supabase resources, import the `Flow` class from the Supabase preset:
import { Flow } from '@pgflow/dsl/supabase';
const MyFlow = new Flow<{ userId: string }>({
- slug: 'my_flow',
+ slug: 'myFlow',
}).step({ slug: 'process' }, async (input, context) => {
// TypeScript knows context includes Supabase resources
const { data } = await context.supabase
@@ -300,7 +300,7 @@ Configure flows and steps with runtime options:
```typescript
new Flow({
- slug: 'my_flow', // Required: Unique flow identifier
+ slug: 'myFlow', // Required: Unique flow identifier
maxAttempts: 3, // Optional: Maximum retry attempts (default: 1)
baseDelay: 5, // Optional: Base delay in seconds for retries (default: 1)
timeout: 10, // Optional: Task timeout in seconds (default: 30)
diff --git a/pkgs/edge-worker/README.md b/pkgs/edge-worker/README.md
index 561f9bdb1..dd2a8cedc 100644
--- a/pkgs/edge-worker/README.md
+++ b/pkgs/edge-worker/README.md
@@ -63,7 +63,7 @@ import { Flow } from 'jsr:@pgflow/dsl/supabase';
// Define a flow using Supabase preset for Supabase resources
const AnalyzeWebsite = new Flow<{ url: string }>({
- slug: 'analyze_website',
+ slug: 'analyzeWebsite',
})
.step({ slug: 'fetch' }, async (input, context) => {
// Access Supabase resources through context
@@ -172,7 +172,7 @@ When defining flows that use Supabase resources, import `Flow` from the Supabase
```typescript
import { Flow } from 'jsr:@pgflow/dsl/supabase';
-const MyFlow = new Flow({ slug: 'my_flow' }).step(
+const MyFlow = new Flow({ slug: 'myFlow' }).step(
{ slug: 'process' },
async (input, context) => {
// TypeScript knows context includes all Supabase resources
diff --git a/pkgs/website/astro.config.mjs b/pkgs/website/astro.config.mjs
index f6c7bb756..034b0101d 100644
--- a/pkgs/website/astro.config.mjs
+++ b/pkgs/website/astro.config.mjs
@@ -339,7 +339,7 @@ export default defineConfig({
label: 'Context object',
link: '/concepts/context-object/',
},
- { label: 'Naming steps', link: '/concepts/naming-steps/' },
+ { label: 'Naming conventions', link: '/concepts/naming-conventions/' },
],
},
],
diff --git a/pkgs/website/redirects.config.mjs b/pkgs/website/redirects.config.mjs
index cc32e90ec..b30f31912 100644
--- a/pkgs/website/redirects.config.mjs
+++ b/pkgs/website/redirects.config.mjs
@@ -89,7 +89,8 @@ export const redirects = {
'/how-to/manual-installation/': '/reference/manual-installation/',
'/how-to/manually-compile-flow/': '/reference/compile-api/',
'/how-to/monitor-flow-execution/': '/deploy/monitor-execution/',
- '/how-to/naming-steps/': '/concepts/naming-steps/',
+ '/how-to/naming-steps/': '/concepts/naming-conventions/',
+ '/concepts/naming-steps/': '/concepts/naming-conventions/',
'/how-to/organize-flows-code/': '/build/organize-flow-code/',
'/how-to/prepare-db-string/': '/deploy/connection-string/',
'/how-to/prune-old-records/': '/deploy/prune-records/',
diff --git a/pkgs/website/src/content/docs/build/create-reusable-tasks.mdx b/pkgs/website/src/content/docs/build/create-reusable-tasks.mdx
index 1e986fd77..e22a070e3 100644
--- a/pkgs/website/src/content/docs/build/create-reusable-tasks.mdx
+++ b/pkgs/website/src/content/docs/build/create-reusable-tasks.mdx
@@ -61,7 +61,7 @@ To create task functions that can be used across multiple flows without tight co
}
// Flow uses handler to adapt context to task parameters
- new Flow<{ userId: string }>({ slug: 'user_flow' })
+ new Flow<{ userId: string }>({ slug: 'userFlow' })
.step({ slug: 'profile' }, async (input) =>
await fetchUserProfile(input.run.userId)
)
diff --git a/pkgs/website/src/content/docs/build/delaying-steps.mdx b/pkgs/website/src/content/docs/build/delaying-steps.mdx
index 34703177f..27ee47f0d 100644
--- a/pkgs/website/src/content/docs/build/delaying-steps.mdx
+++ b/pkgs/website/src/content/docs/build/delaying-steps.mdx
@@ -35,20 +35,20 @@ Onboarding emails with different start delays (all are root steps with no depend
```typescript
new Flow({
- slug: 'user_onboarding',
+ slug: 'userOnboarding',
maxAttempts: 3,
baseDelay: 1,
})
.step({
- slug: 'send_welcome_email',
+ slug: 'sendWelcomeEmail',
// Executes immediately when flow starts
}, sendWelcomeHandler)
.step({
- slug: 'send_day_3_tips',
+ slug: 'sendDay3Tips',
startDelay: 259200, // 3 days after flow starts
}, sendTipsHandler)
.step({
- slug: 'send_week_review',
+ slug: 'sendWeekReview',
startDelay: 604800, // 7 days after flow starts
}, sendReviewHandler)
```
@@ -59,16 +59,16 @@ Wait period after a specific step completes:
```typescript
new Flow({
- slug: 'trial_conversion',
+ slug: 'trialConversion',
maxAttempts: 3,
baseDelay: 1,
})
.step({
- slug: 'provision_trial',
+ slug: 'provisionTrial',
}, provisionHandler)
.step({
- slug: 'send_upgrade_reminder',
- dependsOn: ['provision_trial'],
+ slug: 'sendUpgradeReminder',
+ dependsOn: ['provisionTrial'],
startDelay: 1209600, // 14 days after trial provisioning completes
}, reminderHandler)
```
diff --git a/pkgs/website/src/content/docs/build/delete-flows.mdx b/pkgs/website/src/content/docs/build/delete-flows.mdx
index 8cbf0ebe2..f64dd1fbd 100644
--- a/pkgs/website/src/content/docs/build/delete-flows.mdx
+++ b/pkgs/website/src/content/docs/build/delete-flows.mdx
@@ -34,7 +34,7 @@ pgflow.delete_flow_and_data(flow_slug TEXT)
Example usage in local development:
```sql
-- Delete a specific flow and all its data
-SELECT pgflow.delete_flow_and_data('analyze_website');
+SELECT pgflow.delete_flow_and_data('analyzeWebsite');
```
This deletes the flow definition, all runs, queued messages, and task outputs for the specified flow.
diff --git a/pkgs/website/src/content/docs/build/organize-flow-code.mdx b/pkgs/website/src/content/docs/build/organize-flow-code.mdx
index b8d0c1544..9b1852714 100644
--- a/pkgs/website/src/content/docs/build/organize-flow-code.mdx
+++ b/pkgs/website/src/content/docs/build/organize-flow-code.mdx
@@ -25,7 +25,7 @@ mkdir -p supabase/tasks
- supabase
- flows
- - analyze_website.ts (flow definition)
+ - analyze-website.ts (flow definition)
- index.ts (re-exports all flows)
- tasks
- scrapeWebsite.ts
@@ -37,7 +37,7 @@ mkdir -p supabase/tasks
- pgflow
- index.ts (Control Plane)
- deno.json
- - analyze_website_worker
+ - analyze-website-worker
- index.ts (worker for this flow)
- deno.json
- migrations
@@ -57,8 +57,8 @@ Each directory uses an `index.ts` barrel file that re-exports all modules:
```typescript title="supabase/flows/index.ts"
// Re-export all flows from this directory
-export { AnalyzeWebsite } from './analyze_website.ts';
-export { ProcessOrder } from './process_order.ts';
+export { AnalyzeWebsite } from './analyze-website.ts';
+export { ProcessOrder } from './process-order.ts';
```
```typescript title="supabase/tasks/index.ts" (optional)
@@ -83,9 +83,9 @@ This pattern provides:
description="How to design task functions that work well across multiple flows"
/>
diff --git a/pkgs/website/src/content/docs/build/process-arrays-in-parallel.mdx b/pkgs/website/src/content/docs/build/process-arrays-in-parallel.mdx
index 351b9b8be..9b5a062e0 100644
--- a/pkgs/website/src/content/docs/build/process-arrays-in-parallel.mdx
+++ b/pkgs/website/src/content/docs/build/process-arrays-in-parallel.mdx
@@ -21,7 +21,7 @@ A common use case is fetching and processing multiple web pages in parallel:
import { Flow } from '@pgflow/dsl/supabase';
const ScrapeMultipleUrls = new Flow({ // Flow input must be array for root map
- slug: 'scrape_multiple_urls',
+ slug: 'scrapeMultipleUrls',
maxAttempts: 3,
})
.map(
@@ -35,7 +35,7 @@ const ScrapeMultipleUrls = new Flow({ // Flow input must be array for
// Usage (SQL):
SELECT pgflow.start_flow(
- flow_slug => 'scrape_multiple_urls',
+ flow_slug => 'scrapeMultipleUrls',
input => '["https://example.com", "https://example.org", "https://example.net"]'::jsonb
);
```
@@ -49,7 +49,7 @@ Transform and validate data collections efficiently.
```typescript
const CsvProcessor = new Flow<{ csvUrl: string }>({
- slug: 'csv_processor',
+ slug: 'csvProcessor',
})
.array(
{ slug: 'csvRows' },
@@ -78,7 +78,7 @@ Map handlers only receive individual array elements. If a handler needs access t
```typescript "apiKey: string" del="id, ???" del="(id)"
// This won't work - map handler can't access input.run
const ProblemFlow = new Flow<{ apiKey: string, ids: string[] }>({
- slug: 'problem_flow',
+ slug: 'problemFlow',
})
.map({ slug: 'fetch' }, async (id) => {
// Can't access input.run.apiKey here!
@@ -90,7 +90,7 @@ const ProblemFlow = new Flow<{ apiKey: string, ids: string[] }>({
```typescript ins={4-13} ins="item.id, item.apiKey" ins="(item)"
const SolutionFlow = new Flow<{ apiKey: string, ids: string[] }>({
- slug: 'solution_flow',
+ slug: 'solutionFlow',
})
.array(
{ slug: 'prepareItems' },
@@ -125,7 +125,7 @@ When a map step receives an empty array, pgflow optimizes by completing the enti
```typescript
const EmptyHandling = new Flow<{}>({
- slug: 'empty_handling',
+ slug: 'emptyHandling',
})
.array({ slug: 'items' }, async () => {
const results = await fetchData();
diff --git a/pkgs/website/src/content/docs/build/retrying-steps.mdx b/pkgs/website/src/content/docs/build/retrying-steps.mdx
index fcd6d7e28..83d67e9ba 100644
--- a/pkgs/website/src/content/docs/build/retrying-steps.mdx
+++ b/pkgs/website/src/content/docs/build/retrying-steps.mdx
@@ -77,21 +77,21 @@ When different steps have different reliability requirements:
```typescript
new Flow({
- slug: 'data_pipeline',
+ slug: 'dataPipeline',
maxAttempts: 3, // Sensible defaults
baseDelay: 1,
})
.step({
- slug: 'validate_input',
+ slug: 'validateInput',
maxAttempts: 1, // No retries - validation should not fail
}, validateHandler)
.step({
- slug: 'fetch_external',
+ slug: 'fetchExternal',
maxAttempts: 5, // External API might be flaky
baseDelay: 10, // Longer delays for external service
}, fetchHandler)
.step({
- slug: 'save_results',
+ slug: 'saveResults',
// Use flow defaults
}, saveHandler)
```
diff --git a/pkgs/website/src/content/docs/build/starting-flows/index.mdx b/pkgs/website/src/content/docs/build/starting-flows/index.mdx
index 497bc0d29..cc8c38d4a 100644
--- a/pkgs/website/src/content/docs/build/starting-flows/index.mdx
+++ b/pkgs/website/src/content/docs/build/starting-flows/index.mdx
@@ -51,7 +51,7 @@ Use [RPC calls](https://supabase.com/docs/reference/javascript/rpc) when you jus
```typescript
await supabase.schema('pgflow').rpc('start_flow', {
- flow_slug: 'send_email',
+ flow_slug: 'sendEmail',
input: { to: 'user@example.com' }
});
```
diff --git a/pkgs/website/src/content/docs/build/starting-flows/supabase-rpc.mdx b/pkgs/website/src/content/docs/build/starting-flows/supabase-rpc.mdx
index b396b6920..b71cbf62c 100644
--- a/pkgs/website/src/content/docs/build/starting-flows/supabase-rpc.mdx
+++ b/pkgs/website/src/content/docs/build/starting-flows/supabase-rpc.mdx
@@ -34,7 +34,7 @@ const supabase = createClient(
const { data, error } = await supabase
.schema('pgflow')
.rpc('start_flow', {
- flow_slug: 'send_welcome_email',
+ flow_slug: 'sendWelcomeEmail',
input: {
email: 'user@example.com',
name: 'John Doe'
@@ -64,9 +64,9 @@ const run_id = randomUUID();
const { data, error } = await supabase
.schema('pgflow')
.rpc('start_flow', {
- flow_slug: 'process_upload',
+ flow_slug: 'processUpload',
input: {
- file_url: 'https://example.com/file.pdf'
+ fileUrl: 'https://example.com/file.pdf'
},
run_id: run_id
});
diff --git a/pkgs/website/src/content/docs/build/starting-flows/typescript-client.mdx b/pkgs/website/src/content/docs/build/starting-flows/typescript-client.mdx
index a93e08c77..3d84687ff 100644
--- a/pkgs/website/src/content/docs/build/starting-flows/typescript-client.mdx
+++ b/pkgs/website/src/content/docs/build/starting-flows/typescript-client.mdx
@@ -76,7 +76,7 @@ The simplest pattern is to start a workflow and wait for it to complete:
```typescript
// Start the workflow
-const run = await pgflow.startFlow('analyze_website', {
+const run = await pgflow.startFlow('analyzeWebsite', {
url: 'https://example.com',
});
@@ -331,7 +331,7 @@ When using with `@pgflow/dsl`, get full type inference:
import { Flow } from '@pgflow/dsl';
// Define your flow
-const AnalyzeWebsite = new Flow<{ url: string }>({ slug: 'analyze_website' })
+const AnalyzeWebsite = new Flow<{ url: string }>({ slug: 'analyzeWebsite' })
.step({ slug: 'scrape' }, async (input) => ({
content: 'html...',
title: 'Page Title'
diff --git a/pkgs/website/src/content/docs/build/version-flows.mdx b/pkgs/website/src/content/docs/build/version-flows.mdx
index 768bf5899..378fc1511 100644
--- a/pkgs/website/src/content/docs/build/version-flows.mdx
+++ b/pkgs/website/src/content/docs/build/version-flows.mdx
@@ -41,8 +41,8 @@ Put the new flow in its own file with a versioned slug.
- supabase/
- flows/
- - greet_user.ts
- - **greet_user_v2.ts** // new version
+ - greet-user.ts
+ - **greet-user-v2.ts** // new version
- index.ts
- tasks/
- fetchUserData.ts
@@ -53,9 +53,9 @@ Put the new flow in its own file with a versioned slug.
1. **Create new flow file**
- ```typescript title="supabase/flows/greet_user_v2.ts"
+ ```typescript title="supabase/flows/greet-user-v2.ts"
export const GreetUserV2 = new Flow({
- slug: 'greet_user_v2',
+ slug: 'greetUserV2',
// ...new configuration and step definitions
})
```
@@ -63,7 +63,7 @@ Put the new flow in its own file with a versioned slug.
Then add it to `supabase/flows/index.ts`:
```typescript
- export { GreetUserV2 } from './greet_user_v2.ts';
+ export { GreetUserV2 } from './greet-user-v2.ts';
```
2. **Compile it**
@@ -71,7 +71,7 @@ Put the new flow in its own file with a versioned slug.
[Compile the new flow to SQL](/get-started/flows/compile-flow/) which generates a migration file:
```bash frame="none"
- npx pgflow@latest compile greet_user_v2
+ npx pgflow@latest compile greetUserV2
```
3. **Run migration**
diff --git a/pkgs/website/src/content/docs/comparisons/dbos.mdx b/pkgs/website/src/content/docs/comparisons/dbos.mdx
index b738476f8..380f55ce5 100644
--- a/pkgs/website/src/content/docs/comparisons/dbos.mdx
+++ b/pkgs/website/src/content/docs/comparisons/dbos.mdx
@@ -49,7 +49,7 @@ pgflow puts **PostgreSQL at the center** of your workflow orchestration. Flows a
```typescript
// In pgflow, the database orchestrates the workflow
new Flow<{ url: string }>({
- slug: 'analyze_website',
+ slug: 'analyzeWebsite',
})
.step(
{ slug: 'extract' },
diff --git a/pkgs/website/src/content/docs/comparisons/inngest.mdx b/pkgs/website/src/content/docs/comparisons/inngest.mdx
index 4b37619d8..e311c9da3 100644
--- a/pkgs/website/src/content/docs/comparisons/inngest.mdx
+++ b/pkgs/website/src/content/docs/comparisons/inngest.mdx
@@ -54,7 +54,7 @@ pgflow puts **PostgreSQL at the center** of your workflow orchestration. Flows a
```typescript
// In pgflow, the database orchestrates the workflow
new Flow<{ url: string }>({
- slug: 'analyze_website',
+ slug: 'analyzeWebsite',
})
.step(
{ slug: 'extract' },
diff --git a/pkgs/website/src/content/docs/comparisons/trigger.mdx b/pkgs/website/src/content/docs/comparisons/trigger.mdx
index 6fb5a2a7e..ce12c913f 100644
--- a/pkgs/website/src/content/docs/comparisons/trigger.mdx
+++ b/pkgs/website/src/content/docs/comparisons/trigger.mdx
@@ -54,7 +54,7 @@ pgflow puts **PostgreSQL at the center** of your workflow orchestration. Flows a
```typescript
// In pgflow, the database orchestrates the workflow
new Flow<{ url: string }>({
- slug: 'analyze_website',
+ slug: 'analyzeWebsite',
})
.step(
{ slug: 'extract' },
diff --git a/pkgs/website/src/content/docs/concepts/compilation.mdx b/pkgs/website/src/content/docs/concepts/compilation.mdx
index 4434c2769..63caaaf20 100644
--- a/pkgs/website/src/content/docs/concepts/compilation.mdx
+++ b/pkgs/website/src/content/docs/concepts/compilation.mdx
@@ -11,7 +11,7 @@ pgflow compiles TypeScript flow definitions to SQL migrations via an HTTP-based
## How It Works
-When you run `pgflow compile greet_user`, the following happens:
+When you run `pgflow compile greetUser`, the following happens:
```
┌─────────────┐ HTTP GET ┌─────────────────────┐
diff --git a/pkgs/website/src/content/docs/concepts/context-object.mdx b/pkgs/website/src/content/docs/concepts/context-object.mdx
index dbcdba33b..9183e1609 100644
--- a/pkgs/website/src/content/docs/concepts/context-object.mdx
+++ b/pkgs/website/src/content/docs/concepts/context-object.mdx
@@ -53,7 +53,7 @@ import { sql } from '../db.js';
import { supabase } from '../supabase-client.js';
const ProcessUserFlow = new Flow<{ userId: string }>({
- slug: 'process_user'
+ slug: 'processUser'
})
.step({ slug: 'validate' }, async (input) => {
const [user] = await sql`SELECT * FROM users WHERE id = ${input.userId}`;
@@ -88,7 +88,7 @@ async function processOrder(input: { orderId: string }) {
import { Flow } from '@pgflow/dsl/supabase';
const ProcessUserFlow = new Flow<{ userId: string }>({
- slug: 'process_user'
+ slug: 'processUser'
})
.step({ slug: 'validate' }, async (input, ctx) => {
const [user] = await ctx.sql`SELECT * FROM users WHERE id = ${input.userId}`;
@@ -142,7 +142,7 @@ While examples in this documentation use `ctx.` for clarity, destructuring often
```typescript ins="{ sql, supabase, stepTask }"
// Destructure only what you need
-.step({ slug: 'sync_data' }, async (input, { sql, supabase, stepTask }) => {
+.step({ slug: 'syncData' }, async (input, { sql, supabase, stepTask }) => {
console.log(`Syncing for run ${stepTask.run_id}`);
const data = await sql`SELECT * FROM pending_sync WHERE user_id = ${input.userId}`;
diff --git a/pkgs/website/src/content/docs/concepts/data-model.mdx b/pkgs/website/src/content/docs/concepts/data-model.mdx
index cc257c4ce..733a38a74 100644
--- a/pkgs/website/src/content/docs/concepts/data-model.mdx
+++ b/pkgs/website/src/content/docs/concepts/data-model.mdx
@@ -13,7 +13,7 @@ pgflow's data model separates flow definitions from runtime execution state. Flo
### 🏷️ Slugs as Identifiers
-Flows and steps are identified by slugs - simple text identifiers like `'analyze_website'` or `'fetch_data'`. Slugs must be valid identifiers (alphanumeric plus underscores, max 128 characters) and serve as natural, readable keys throughout the system.
+Flows and steps are identified by slugs - simple text identifiers like `'analyzeWebsite'` or `'fetchData'`. Slugs use camelCase and must be valid identifiers (alphanumeric, max 128 characters), serving as natural, readable keys throughout the system.
### 🔑 Composite Keys with Denormalization
diff --git a/pkgs/website/src/content/docs/concepts/index.mdx b/pkgs/website/src/content/docs/concepts/index.mdx
index d20ef175c..f30e1f0bc 100644
--- a/pkgs/website/src/content/docs/concepts/index.mdx
+++ b/pkgs/website/src/content/docs/concepts/index.mdx
@@ -55,8 +55,8 @@ Steps execute through **tasks** (the actual units of work) - regular steps have
description="Understanding how pgflow provides platform resources to handlers through context"
/>
diff --git a/pkgs/website/src/content/docs/concepts/map-steps.mdx b/pkgs/website/src/content/docs/concepts/map-steps.mdx
index 2243dc531..8ced85918 100644
--- a/pkgs/website/src/content/docs/concepts/map-steps.mdx
+++ b/pkgs/website/src/content/docs/concepts/map-steps.mdx
@@ -146,7 +146,7 @@ Process the flow's input array directly by omitting the `array` property:
```typescript
// Flow input MUST be an array
-new Flow({ slug: 'batch_processor' })
+new Flow({ slug: 'batchProcessor' })
.map(
{ slug: 'processEach' }, // No 'array' property
(item) => processItem(item)
@@ -156,7 +156,7 @@ new Flow({ slug: 'batch_processor' })
Starting this flow:
```sql
SELECT pgflow.start_flow(
- flow_slug => 'batch_processor',
+ flow_slug => 'batchProcessor',
input => '["item1", "item2", "item3"]'::jsonb
);
```
@@ -166,7 +166,7 @@ SELECT pgflow.start_flow(
Process another step's array output by specifying the `array` property:
```typescript
-new Flow<{ searchQuery: string }>({ slug: 'search_pipeline' })
+new Flow<{ searchQuery: string }>({ slug: 'searchPipeline' })
.array(
{ slug: 'searchResults' },
async (input) => await searchAPI(input.run.searchQuery)
diff --git a/pkgs/website/src/content/docs/concepts/naming-conventions.mdx b/pkgs/website/src/content/docs/concepts/naming-conventions.mdx
new file mode 100644
index 000000000..8d588f516
--- /dev/null
+++ b/pkgs/website/src/content/docs/concepts/naming-conventions.mdx
@@ -0,0 +1,165 @@
+---
+title: Naming conventions
+description: Naming conventions for flows, steps, files, and workers in pgflow
+sidebar:
+ order: 70
+---
+
+This guide covers all naming conventions in pgflow - from flow slugs to file names.
+
+## Quick reference
+
+| Component | Convention | Example |
+|-----------|------------|---------|
+| Flow slug | camelCase | `greetUser` |
+| Step slug | camelCase | `fetchUser` |
+| Flow file | kebab-case | `greet-user.ts` |
+| Flow export | PascalCase | `GreetUser` |
+| Worker directory | kebab-case + `-worker` | `greet-user-worker/` |
+
+**Conversion rule**: camelCase slug to kebab-case file: `greetUser` -> `greet-user.ts`
+
+## Flow slug naming
+
+Flow slugs use **camelCase**:
+
+```typescript
+export const GreetUser = new Flow({ slug: 'greetUser' })
+export const ProcessOrder = new Flow({ slug: 'processOrder' })
+export const AnalyzeWebsite = new Flow({ slug: 'analyzeWebsite' })
+```
+
+Flow slugs are stored in the database and used to identify flows when starting runs. Using camelCase keeps them consistent with step slugs and JavaScript conventions.
+
+## File naming
+
+Flow files use **kebab-case** (industry standard for TypeScript):
+
+| Flow Slug | File Name |
+|-----------|-----------|
+| `greetUser` | `greet-user.ts` |
+| `processOrder` | `process-order.ts` |
+| `analyzeWebsite` | `analyze-website.ts` |
+
+This follows TypeScript/JavaScript ecosystem conventions and ensures compatibility across all filesystems (including case-insensitive systems like macOS and Windows).
+
+### Directory structure
+
+```
+supabase/
+ flows/
+ index.ts # Barrel file re-exporting all flows
+ greet-user.ts # kebab-case filename
+ process-order.ts
+ analyze-website.ts
+ functions/
+ pgflow/ # Control plane (serves all flows)
+ index.ts
+```
+
+### Barrel file pattern
+
+The `flows/index.ts` file re-exports all flows:
+
+```typescript
+// supabase/flows/index.ts
+export { GreetUser } from './greet-user.ts';
+export { ProcessOrder } from './process-order.ts';
+export { AnalyzeWebsite } from './analyze-website.ts';
+```
+
+## Worker directory naming
+
+Worker edge functions use **kebab-case** with a `-worker` suffix:
+
+| Flow Slug | Worker Directory |
+|-----------|------------------|
+| `greetUser` | `greet-user-worker/` |
+| `processOrder` | `process-order-worker/` |
+| `analyzeWebsite` | `analyze-website-worker/` |
+
+This follows Supabase Edge Functions conventions and makes it clear which directory contains a worker vs other types of functions.
+
+## Step slug naming
+
+Step slugs use **camelCase** and should reflect the step's primary purpose.
+
+### Recommended approach: Hybrid naming
+
+Ask yourself: **"What matters more - the returned data or that the action happened?"**
+
+- Use **nouns** when downstream steps process the returned data meaningfully
+- Use **verb-noun** when the side effect is the primary purpose, even if data is returned
+
+### Result-focused naming (nouns)
+
+```ts
+// These steps produce data that other steps consume
+.step({ slug: "website" }, ...) // Returns website content
+.step({ slug: "summary", dependsOn: ["website"] }, ...) // Returns summary text
+.map({ slug: "userProfiles", array: "userIds" }, ...) // Returns user profile objects
+```
+
+### Action-focused naming (verb-noun)
+
+```ts
+// These steps are primarily about their side effects
+.step({ slug: "saveToDb", dependsOn: ["summary"] }, ...) // Side effect: database write
+.map({ slug: "sendEmails", array: "users" }, ...) // Side effect: emails sent
+.map({ slug: "resizeImages", array: "images" }, ...) // Action: transformation
+```
+
+## Why this works well
+
+1. **Result-focused steps** with noun names create intuitive property access:
+ ```ts
+ // Natural to access data
+ ({ website, userProfiles }) => {
+ return analyzeData(website.content, userProfiles);
+ }
+ ```
+
+2. **Action-focused steps** with verb names clearly communicate their purpose:
+ ```ts
+ // Clear that this is about the action, not the return value
+ .step({ slug: "notifyAdmins" }, async () => {
+ await sendSlackMessage("Process complete");
+ return { notified: true }; // Simple confirmation
+ })
+ ```
+
+3. The distinction helps readers quickly understand whether a step exists to:
+ - **Produce data** for the workflow (noun)
+ - **Perform an action** with side effects (verb-noun)
+
+## The nuance: Many steps do both
+
+Most real-world steps both perform actions AND return data. The naming should reflect the **primary purpose**:
+
+```ts
+// Primary purpose: Send notifications (side effect)
+// The success/failure data is just metadata about the action
+.map({ slug: "sendNotifications" }, async (user) => {
+ const sent = await sendEmail(user);
+ return { userId: user.id, sent, timestamp: Date.now() };
+})
+.step({ slug: "updateDatabase", dependsOn: ["sendNotifications"] }, (input) => {
+ // Even though we use the results, the primary goal was sending
+ const successful = input.sendNotifications.filter(r => r.sent);
+ await markUsersNotified(successful);
+});
+
+// Primary purpose: Get user profiles (data)
+// The API call is just how we get the data
+.map({ slug: "userProfiles" }, async (userId) => {
+ return await fetchUserProfile(userId); // Side effect: API call
+})
+.step({ slug: "analyze", dependsOn: ["userProfiles"] }, (input) => {
+ // We care about the profile data, not that an API was called
+ return analyzeProfiles(input.userProfiles);
+});
+```
+
+## Consistency matters
+
+While this guide recommends the hybrid pattern for step naming, the most important thing is consistency within your project. Document the chosen convention and apply it throughout the codebase.
diff --git a/pkgs/website/src/content/docs/concepts/naming-steps.mdx b/pkgs/website/src/content/docs/concepts/naming-steps.mdx
deleted file mode 100644
index 612216554..000000000
--- a/pkgs/website/src/content/docs/concepts/naming-steps.mdx
+++ /dev/null
@@ -1,95 +0,0 @@
----
-title: Naming workflow steps effectively
-description: Best practices for naming steps in your pgflow flows
-sidebar:
- order: 70
----
-
-Step naming is an important design decision that affects workflow readability and maintainability. After analyzing multiple pgflow projects, these patterns have proven effective.
-
-## Recommended approach: Hybrid naming
-
-Ask yourself: **"What matters more - the returned data or that the action happened?"**
-
-- Use **nouns** when downstream steps process the returned data meaningfully
-- Use **verb-noun** when the side effect is the primary purpose, even if data is returned
-
-### Result-focused naming (nouns)
-
-```ts
-// These steps produce data that other steps consume
-.step({ slug: "website" }, ...) // Returns website content
-.step({ slug: "summary", dependsOn: ["website"] }, ...) // Returns summary text
-.map({ slug: "userProfiles", array: "userIds" }, ...) // Returns user profile objects
-```
-
-### Action-focused naming (verb-noun)
-
-```ts
-// These steps are primarily about their side effects
-.step({ slug: "saveToDb", dependsOn: ["summary"] }, ...) // Side effect: database write
-.map({ slug: "sendEmails", array: "users" }, ...) // Side effect: emails sent
-.map({ slug: "resizeImages", array: "images" }, ...) // Action: transformation
-```
-
-## Why this works well
-
-1. **Result-focused steps** with noun names create intuitive property access:
- ```ts
- // Natural to access data
- ({ website, userProfiles }) => {
- return analyzeData(website.content, userProfiles);
- }
- ```
-
-2. **Action-focused steps** with verb names clearly communicate their purpose:
- ```ts
- // Clear that this is about the action, not the return value
- .step({ slug: "notifyAdmins" }, async () => {
- await sendSlackMessage("Process complete");
- return { notified: true }; // Simple confirmation
- })
- ```
-
-3. The distinction helps readers quickly understand whether a step exists to:
- - **Produce data** for the workflow (noun)
- - **Perform an action** with side effects (verb-noun)
-
-## The nuance: Many steps do both
-
-Most real-world steps both perform actions AND return data. The naming should reflect the **primary purpose**:
-
-```ts
-// Primary purpose: Send notifications (side effect)
-// The success/failure data is just metadata about the action
-.map({ slug: "sendNotifications" }, async (user) => {
- const sent = await sendEmail(user);
- return { userId: user.id, sent, timestamp: Date.now() };
-})
-.step({ slug: "updateDatabase", dependsOn: ["sendNotifications"] }, (input) => {
- // Even though we use the results, the primary goal was sending
- const successful = input.sendNotifications.filter(r => r.sent);
- await markUsersNotified(successful);
-});
-
-// Primary purpose: Get user profiles (data)
-// The API call is just how we get the data
-.map({ slug: "userProfiles" }, async (userId) => {
- return await fetchUserProfile(userId); // Side effect: API call
-})
-.step({ slug: "analyze", dependsOn: ["userProfiles"] }, (input) => {
- // We care about the profile data, not that an API was called
- return analyzeProfiles(input.userProfiles);
-});
-```
-
-## Use camelCase for step slugs
-
-```ts
-.step({ slug: "websiteContent" }, ...) // Correct
-.step({ slug: "website_content" }, ...) // Avoid
-```
-
-Step slugs are used as identifiers in TypeScript and must match exactly when referenced in dependency arrays. Following JavaScript conventions with camelCase helps maintain consistency.
-
-While this guide recommends the hybrid pattern, the most important thing is consistency within your project. Document the chosen convention and apply it throughout the codebase.
diff --git a/pkgs/website/src/content/docs/concepts/understanding-flows.mdx b/pkgs/website/src/content/docs/concepts/understanding-flows.mdx
index f349c2261..77a1bdf82 100644
--- a/pkgs/website/src/content/docs/concepts/understanding-flows.mdx
+++ b/pkgs/website/src/content/docs/concepts/understanding-flows.mdx
@@ -23,7 +23,7 @@ The Flow class uses a fluent interface to build this declarative manifest:
```typescript
// Method chaining for flow definition
-new Flow({ slug: 'my_flow' })
+new Flow({ slug: 'myFlow' })
.step({ slug: 'step1' }, async (input) => { /* ... */ })
.step({ slug: 'step2' }, async (input) => { /* ... */ });
```
@@ -48,7 +48,7 @@ Consider this example:
```typescript "input.run.url" "input.run.userId" "input.scrape.content"
new Flow<{ url: string, userId: string }>({
- slug: 'analyze_website',
+ slug: 'analyzeWebsite',
})
.step(
{ slug: 'scrape' },
@@ -82,7 +82,7 @@ type WebsiteInput = { url: string, userId: string };
// Create a flow with that input type
new Flow({
- slug: 'analyze_website',
+ slug: 'analyzeWebsite',
})
.step(
{ slug: 'scrape' },
@@ -135,7 +135,7 @@ A semantic wrapper around `.step()` that enforces array return types. Useful for
```typescript
// Fetch an array of items
.array(
- { slug: 'fetch_items' },
+ { slug: 'fetchItems' },
async () => ['item1', 'item2', 'item3']
)
@@ -152,21 +152,21 @@ Processes arrays element-by-element in parallel. Unlike regular steps, map handl
```typescript
// Root map: processes flow input array directly
-new Flow({ slug: 'batch_processor' })
+new Flow({ slug: 'batchProcessor' })
.map(
{ slug: 'uppercase' }, // No 'array' property
(item) => item.toUpperCase()
);
// Dependent map: processes another step's array output
-new Flow<{}>({ slug: 'data_pipeline' })
- .array({ slug: 'fetch_ids' }, () => ['id1', 'id2', 'id3'])
+new Flow<{}>({ slug: 'dataPipeline' })
+ .array({ slug: 'fetchIds' }, () => ['id1', 'id2', 'id3'])
.map(
- { slug: 'fetch_details', array: 'fetch_ids' },
+ { slug: 'fetchDetails', array: 'fetchIds' },
async (id) => await fetchUserDetails(id)
)
.map(
- { slug: 'enrich', array: 'fetch_details' },
+ { slug: 'enrich', array: 'fetchDetails' },
async (user) => ({ ...user, enriched: true })
);
```
@@ -279,7 +279,7 @@ type Input = {
};
new Flow({
- slug: 'user_report',
+ slug: 'userReport',
})
// Step 1: Fetch user data
.step(
diff --git a/pkgs/website/src/content/docs/deploy/tune-flow-config.mdx b/pkgs/website/src/content/docs/deploy/tune-flow-config.mdx
index 3a9758cfe..b4f590db5 100644
--- a/pkgs/website/src/content/docs/deploy/tune-flow-config.mdx
+++ b/pkgs/website/src/content/docs/deploy/tune-flow-config.mdx
@@ -55,12 +55,12 @@ A common scenario is adjusting retry behavior after observing production issues:
-- Check current settings
SELECT step_slug, opt_max_attempts, opt_timeout
FROM pgflow.steps
-WHERE flow_slug = 'analyze_website';
+WHERE flow_slug = 'analyzeWebsite';
-- Update API call step to retry more
UPDATE pgflow.steps
SET opt_max_attempts = 5, opt_timeout = 120
-WHERE flow_slug = 'analyze_website' AND step_slug = 'website';
+WHERE flow_slug = 'analyzeWebsite' AND step_slug = 'website';
```