From 8a7c59cacbfc1ad30d54c1aace18f79f9f2f5959 Mon Sep 17 00:00:00 2001 From: christopherkindl <53372002+christopherkindl@users.noreply.github.com> Date: Sun, 12 Apr 2026 02:18:59 +0200 Subject: [PATCH 1/6] fix(docs): remove scroll-smooth from html to fix navigation scroll position (#1694) The global `scroll-smooth` CSS class on caused Next.js navigation scroll-to-top to animate instead of jumping instantly, making cross-page links appear stuck at the previous scroll position. Signed-off-by: christopherkindl <53372002+christopherkindl@users.noreply.github.com> Co-authored-by: Claude Opus 4.6 (1M context) --- docs/app/[lang]/layout.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/app/[lang]/layout.tsx b/docs/app/[lang]/layout.tsx index 98c8e61210..23b49d8cb2 100644 --- a/docs/app/[lang]/layout.tsx +++ b/docs/app/[lang]/layout.tsx @@ -28,7 +28,7 @@ const Layout = async ({ children, params }: LayoutProps<'/[lang]'>) => { return ( From 851a53016473c937e93b2f4897e7f4683e4058c9 Mon Sep 17 00:00:00 2001 From: John Lindquist Date: Sat, 11 Apr 2026 18:40:07 -0600 Subject: [PATCH 2/6] Add cookbook with 50 workflow pattern recipes (#1564) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add cookbook section with 50 workflow pattern recipes Migrate the "Workflow API Explorer" decision tree concept from workflow-campaign-demos into useworkflow.dev docs as a Cookbook. Infrastructure: - docs/lib/cookbook-tree.ts: decision tree data, 50 recipe metadata entries, slug-to-category mapping - docs/components/geistdocs/cookbook-explorer.tsx: interactive "I want to..." decision tree UI with breadcrumb navigation - docs/content/docs/cookbook/index.mdx: landing page rendering CookbookExplorer component - docs/content/docs/cookbook/meta.json + 8 category meta.json files for sidebar nav - docs/content/docs/meta.json: added cookbook to docs nav between foundations and how-it-works - docs/app/[lang]/docs/[[...slug]]/page.tsx: registered CookbookExplorer component 50 recipe MDX files across 8 categories (payments, approvals, resilience, notifications, webhooks, data-processing, routing, observability), each with: - Frontmatter (title, description, type: guide, summary with use-case scenario) - Simplified code snippet (core pattern only, stripped of demo UI concerns) - Full implementation code snippet (exact source from campaign demos) - Key APIs section with links to API reference docs * ploop: iteration 1 checkpoint Automated checkpoint commit. Ploop-Iter: 1 * fix: polish cookbook public routing Keep the cookbook surface canonical at /cookbooks so docs navigation, sitemap output, and AI/chat entry points stop leaking the legacy /docs/cookbook paths. Correct the approval-chain example so the docs teach the intended sequential approval semantics instead of implying the workflow approves after the first successful level. This keeps the cookbook aligned with the docs quality bar and avoids misleading readers with inconsistent behavior. Ploop-Iter: 2 * docs: canonicalize cookbook doc routes Align cookbook-facing docs outputs with the new public route so redirects, sitemap entries, and LLM-facing exports stay consistent. This keeps the polished cookbook section discoverable at its canonical location while trimming the last demo-heavy recipe examples toward the same concise style as the rest of the docs. Ploop-Iter: 3 * ploop: iteration 4 checkpoint Automated checkpoint commit. Ploop-Iter: 4 * fix(docs): finalize cookbook route split Keep cookbook content discoverable after moving it to a first-class /cookbooks surface so navigation, canonical metadata, and markdown consumers resolve the new public URLs consistently. Avoid serving the legacy /docs/cookbook tree as if it were still part of the docs section, which reduces duplicate navigation paths and prevents stale static output from competing with the new route structure. Ploop-Iter: 5 * docs: improve cookbook discovery The cookbook landing page needs to work for both exploratory users and users who already know the pattern they want. This keeps the guided decision tree while adding shared category metadata and a searchable browse mode so recipe discovery feels faster and more consistent with the rest of the docs experience. Ploop-Iter: 6 * docs: refine cookbook pattern examples Tighten the simplified cookbook recipes so the examples teach the intended workflow semantics clearly and consistently. The changes keep the documentation focused on the core control-flow patterns reviewers called out, while removing ambiguity around partial arrivals, deadlines, and first-success behavior. Ploop-Iter: 7 * docs: decouple cookbook sidebar tree Separate cookbook navigation from the docs page tree so the standalone /cookbooks experience stays stable after the route move and the main docs sidebar no longer leaks cookbook entries.\n\nThis keeps cookbook navigation driven by explicit recipe metadata, which avoids duplicated section titles and makes the docs and cookbook surfaces easier to evolve independently.\n\nPloop-Iter: 8 * fix(docs): align cookbook public nav Keep cookbook pages on their public /cookbooks surface so metadata and copied markdown do not leak legacy /docs/cookbook paths.\n\nSimplify sidebar rendering to trust the injected page tree, which avoids route-specific filtering and keeps cookbook navigation consistent with the active layout tree.\n\nPloop-Iter: 9 * refactor(docs): decouple cookbook routing Move cookbook rendering off the shared docs route so cookbook pages can behave like a first-class docs surface without leaking cookbook-specific UI into the main docs experience. Centralizing cookbook tree filtering keeps sidebar behavior consistent in one place and avoids duplicate cookbook navigation state across layouts. Ploop-Iter: 10 * docs: improve cookbook explorer accessibility Improve the cookbooks entrypoint so loading and keyboard navigation are usable without visual cues, and keep guided and browse modes resilient while the route hydrates. Ploop-Iter: 11 * ploop: iteration 12 checkpoint Automated checkpoint commit. Ploop-Iter: 12 * docs: rename cookbooks route to cookbook (singular) * docs: add 5 cookbook overview design variations Add getRecipeHref, getRecipesByCategory, and collectSlugs helpers to cookbook-tree.ts, then create 5 distinct overview page variations at /cookbook/v1 through /cookbook/v5 for side-by-side comparison: - v1: Category Grid (zero-JS scannable overview) - v2: Search-First (real-time filtering with category pills) - v3: Accordion Catalog (expandable category sections) - v4: Decision Wizard (step-by-step guided questions) - v5: Problem-Solution Table (master-detail by scenario) Also adds .impeccable.md with project design context. * docs: remove unrelated package.json bumps and generated artifacts * docs: restructure cookbook from 50 recipes into 27 consolidated pages Restructure the cookbook based on team meeting feedback, toolbar comments, mux-ai pattern analysis, and Vercel org code search. Consolidates duplicates, adds missing patterns, ensures all examples have proper directives and type-check against the real workflow SDK. New structure: - common-patterns/ (9): saga, batching, rate-limiting, fan-out, scheduling, idempotency, webhooks, content-router, child-workflows - agent-patterns/ (5): durable-agent, tool-streaming, human-in-the-loop, tool-orchestration, stop-workflow - integrations/ (3): ai-sdk, sandbox, chat-sdk - advanced/ (6): serializable-steps, durable-objects, isomorphic-packages, secure-credentials, custom-serialization, publishing-libraries All 92 code snippets pass docs-typecheck against real workflow SDK types. Deleted 8 old category folders. Updated cookbook-tree.ts, explorer, nav. * docs: add workflow migration guides Help teams evaluating the GA launch translate existing durable workflow systems into Vercel Workflow without reverse-engineering concept parity from product docs alone. The guides focus on real migration decisions and concrete TypeScript examples so adoption can be driven by implementation clarity rather than platform marketing. Ploop-Iter: 1 * ploop: iteration 2 checkpoint Automated checkpoint commit. Ploop-Iter: 2 * docs: refine workflow migration guides Clarify the migration narrative for GA so teams evaluating a move from Temporal, Inngest, or Step Functions get examples that are realistic enough to trust and pricing guidance that matches the current platform model. The Inngest guide needed a complete order-saga flow so the durable orchestration, compensation, and streaming patterns read as a credible migration target instead of a partial sketch. The pricing language across all three guides also needed to be aligned with current Workflow and Vercel compute billing semantics to avoid creating the wrong cost expectations during evaluation. Ploop-Iter: 3 * ploop: iteration 1 checkpoint Automated checkpoint commit. Ploop-Iter: 1 * docs(skill): refine workflow migration guidance Clarify migration rules so agents choose the correct resume primitive, keep streaming guidance aligned with runtime behavior, and avoid implying Vercel-managed execution for self-hosted targets. This reduces avoidable migration mistakes in generated guidance and keeps the skill consistent with the acceptance criteria used to evaluate it. Ploop-Iter: 2 * ploop: iteration 3 checkpoint Automated checkpoint commit. Ploop-Iter: 3 * docs(skills): refine workflow migration guidance Reduce the migration skill entry point to the decision surface agents need\nso they can select the correct resume pattern without carrying duplicate\nexamples in the initial context.\n\nClarify framework precedence so prompts that explicitly ask for\nframework-agnostic boundaries do not get Hono- or Next-specific route\nshapes, while preserving framework-specific examples when requested.\n\nCentralize canonical resume examples in the shared patterns reference to\nkeep the guidance consistent across migration paths and reduce drift.\n\nPloop-Iter: 4 * docs: add workflow deep-dive references Add a cohesive set of deep-dive reference articles so the GA launch has architecture-level documentation grounded in the current SDK implementation. This gives readers verified explanations of runtime, replay, streaming, compiler, and cost-model behavior while linking the series together for easier navigation. Ploop-Iter: 1 * ploop: iteration 5 checkpoint Automated checkpoint commit. Ploop-Iter: 5 * docs: normalize deep-dive related links Keep the new deep-dive reference pages cross-linked so readers can move between adjacent runtime concepts without depending on older how-it-works pages alone. This preserves navigational consistency across the GA launch docs set and reduces the chance that architectural explanations drift into isolated pages that are harder to discover and maintain. Ploop-Iter: 2 * docs(skill): refine workflow migration routing Tighten the migration guidance so agents choose the correct resume surface and runtime boundary earlier, reducing incorrect mixed patterns in generated migrations. Add explicit fast paths for self-hosted targets and Step Functions task-token callbacks so the skill stays consistent on callback URL vs deterministic resume decisions. Ploop-Iter: 6 * docs: refine workflow migration skill guidance Clarify route selection so the migration skill composes resume, runtime, and app-boundary concerns deterministically. Add a canonical Step Functions self-hosted Hono callback recipe so migrations produce the correct callback-url pattern without mixing incompatible hook surfaces.\n\nPloop-Iter: 7 * docs: checkpoint deep-dive drafts Preserve the verified GA launch deep-dive drafts in git so the campaign work can continue from a stable checkpoint. Capture the reviewed documentation progress now to reduce risk of drift between source-backed research and the publishable drafts. Ploop-Iter: 3 * docs(skills): tighten workflow migration routing Clarify the migration skill's route-selection rules so generated guidance stays consistent across resume surfaces, runtime targets, and named framework boundaries. This reduces ambiguous outputs where agents might mix framework syntaxes or invent callback routes for webhook-based flows, which leads to migration guidance that does not match the user's runtime model. Ploop-Iter: 8 * docs: finalize workflow deep dives Clarify the runtime mechanics behind the GA deep-dive series so launch content stays aligned with the implementation and existing docs. Tightening these explanations reduces the risk of readers internalizing inaccurate mental models about replay, compilation, and workflow execution.\n\nPloop-Iter: 4 * docs(skills): refine workflow migration routing Clarify route selection so migrations choose the correct resume surface and app boundary patterns for the target runtime and framework. Strengthen verification guidance to reject invented callback routes in URL-based flows and keep examples aligned with the documented migration rules. Ploop-Iter: 9 * docs: sync compiler deep-dive runtime details Align the compiler deep-dive trio with the actual Workflow runtime so launch materials describe the same execution model users rely on. This keeps the GA narrative accurate around deterministic replay, step queue triggers, and runtime bundle responsibilities, reducing the risk of docs teaching an architecture the SDK does not implement. Ploop-Iter: 5 * docs(skill): tighten workflow migration guidance Reduce hot-path skill context so migration routing stays easier to select and verify during activation. Trimmed examples and converted long invalid samples into concise failure rules so the skill points agents to on-demand references instead of loading bulky worked code by default. Ploop-Iter: 10 * docs: tighten workflow migration guidance Clarify route-key planning and resume-surface defaults so migration outputs stay deterministic when prompts underspecify callback behavior. Strengthen the deep-dive docs to trace runtime handoffs more directly, which reduces ambiguity about how the compiler split maps to durable execution behavior. Ploop-Iter: 11 * docs: refine streaming and cost deep dives Clarify the operational model behind durable streaming and zero-cost suspension so launch materials stay source-accurate for readers comparing workflow runtimes. The updates make the workflow-step boundary, persistence path, and queue-driven cost story more explicit, reducing ambiguity around where stream I/O is allowed and why long waits do not consume compute. Ploop-Iter: 6 * ploop: iteration 12 checkpoint Automated checkpoint commit. Ploop-Iter: 12 * docs: tighten deep-dive streaming accuracy Align the launch deep dives with the current runtime so campaign content does not misstate suspension behavior or streaming backend capabilities. These edits clarify the distinct resume paths for step suspension versus timed waits and document the backend-specific streaming guarantees now available across local, Vercel, and Postgres worlds, reducing the risk that readers build incorrect mental models from launch materials. Ploop-Iter: 7 * docs(skills): refine callback resume taxonomy Clarify when migrations should use deterministic internal resume versus generated callback URLs so skill outputs stay consistent across frameworks and hosting targets. Distinguish default webhook responses from manual-response flows to prevent ambiguous guidance and keep the shared callback references directly inspectable.\n\nPloop-Iter: 13 * docs: refine deep-dive runtime wording Clarify the runtime semantics behind suspension and durable streaming so the GA launch materials stay aligned with the source of truth. These edits tighten descriptions around wake-up paths, backend behavior, and stream lifecycle details to reduce ambiguity for readers comparing the docs to the implementation. Ploop-Iter: 8 * docs: clarify webhook response mode defaults Clarify when migrations should use the default webhook behavior versus manual responses so agents make the same callback choice across the skill entrypoint, shared patterns, and API reference. This reduces avoidable ambiguity for callback-url prompts and makes the default 202 behavior explicit unless a prompt requires custom response semantics. Ploop-Iter: 14 * docs: align cost model wake-up semantics Prevent the GA launch materials from teaching an incorrect mental model about how suspended runs wake back up. The updated wording keeps the blog, social, and reference variants anchored to the real runtime paths so readers understand which transitions are queue-delayed, which are step-driven re-enqueues, and why that distinction matters for the cost story. Ploop-Iter: 9 * ploop: iteration 10 checkpoint Automated checkpoint commit. Ploop-Iter: 10 * docs: clarify webhook resume choices Explain the resume-surface decision points so migration and API guidance steer authors toward the correct webhook or hook pattern for the prompt. Reduce common callback-routing mistakes early in the docs and skill so agents make fewer wrong assumptions during workflow migrations. Ploop-Iter: 15 * docs: tighten cost model deep dives Clarify the cost-model narrative so launch materials make source-verifiable claims about suspension, wake-up paths, and polling behavior. This keeps the GA messaging aligned with the runtime's actual control flow and avoids overclaiming where the implementation has narrower semantics than the original copy suggested. Ploop-Iter: 11 * docs(skills): tighten resume routing guidance Keep the migration skill entrypoint small so agents load the routing contract only when the source actually pauses for external resume. Clarify the public webhook docs around the default callback flow to reduce accidental use of lower-level runtime APIs.\n\nPloop-Iter: 16 * docs: tighten deep-dive runtime claims Align the launch materials with the current runtime semantics so the cost-model and execution-model narrative stays defensible against the actual implementation. This keeps the GA campaign focused on claims we can support directly from source, especially around suspension, re-enqueue behavior, and the difference between orchestration compute and client-side polling helpers. Ploop-Iter: 12 * docs: correct cost model deep dive claims Align the cost-model launch content with the runtime's actual suspension and re-entry mechanics so GA messaging does not overstate identical-cost waits or imply residency that the queue-based engine does not have. This keeps the public explanation consistent with source-backed behavior around timed wake-ups, explicit workflow re-queue after step completion, and the distinction between idle worker residency and boundary I/O. Ploop-Iter: 13 * chore: remove non-cookbook files from cookbook branch Remove deep-dive articles, migration guides/skill, vercel-toolbar skill, workflow-skills test fixtures, and misc artifacts that belong in separate branches (deep-dives, migration-guides). Revert create-webhook.mdx, getting-started/meta.json, and code-transform.mdx to main versions. * docs: address toolbar feedback on cookbook pages Address Vercel toolbar comments from Nathan Rajlich, Peter Wielander, Pranay Prakash, and Karthik Kalyanaraman on the cookbook branch. In saga.mdx, remove the unnecessary `if (!(error instanceof FatalError)) throw error;` guard in the catch block — compensations should always unwind regardless of error type. Replace the `while/pop()!()` loop with a cleaner `for...of reverse()` to avoid the non-null assertion. In ai-sdk.mdx, split the "Using Different Providers" section into two subsections: "Vercel Gateway (string model IDs)" clarifying that all string model IDs route through Gateway, and "Direct Provider Access" showing how to import from provider packages like `@workflow/ai/openai` to bypass Gateway. Change the "Tool Functions as Steps" section to "Tool Functions with Steps" and reword to explain that tool execute functions can optionally include steps via "use step" but don't have to — when they aren't steps, they run in workflow context and can modify workflow state directly. In sandbox.mdx, rewrite the page to reflect that `@vercel/sandbox` now has first-class Workflow SDK support. Replace all `declare function` stubs and `// TODO` placeholders with real `import { Sandbox } from "@vercel/sandbox"`. Remove the four separate "use step" wrapper functions (provisionSandbox, runCommand, teardownSandbox, saveSandboxSnapshot) and show direct `Sandbox.create()`, `sandbox.runCommand()`, and `sandbox.destroy()` calls in the workflow function since these implicitly run as steps. Simplify the agent tool example to use inline execute functions that call Sandbox methods directly with an `activeSandbox` variable for workflow state. Across all cookbook files, replace "Workflow DevKit" with "Workflow SDK" (8 instances in 5 files: publishing-libraries.mdx, secure-credentials.mdx, ai-sdk.mdx, chat-sdk.mdx, sandbox.mdx). * docs: simplify cookbook index to plain MDX listing Replace the interactive CookbookExplorer (726-line decision tree + browse component) with a simple MDX page that lists recipes grouped by category with linked titles and descriptions. Remove v1-v5 design variations and trim cookbook-tree.ts to sidebar-only metadata. * fix type checks --------- Co-authored-by: Karthik Kalyanaraman --- docs/app/[lang]/cookbook/[[...slug]]/page.tsx | 132 +++++++ docs/app/[lang]/cookbook/layout.tsx | 13 + docs/app/[lang]/docs/[[...slug]]/page.tsx | 17 +- docs/app/[lang]/docs/layout.tsx | 8 +- docs/app/[lang]/llms.mdx/[[...slug]]/route.ts | 5 +- docs/app/[lang]/llms.txt/route.ts | 3 +- docs/app/[lang]/sitemap.md/route.ts | 6 +- docs/app/sitemap.md/route.ts | 6 +- docs/app/sitemap.ts | 3 +- .../advanced/custom-serialization.mdx | 255 ++++++++++++ .../cookbook/advanced/durable-objects.mdx | 150 +++++++ .../cookbook/advanced/isomorphic-packages.mdx | 145 +++++++ docs/content/docs/cookbook/advanced/meta.json | 11 + .../advanced/publishing-libraries.mdx | 298 ++++++++++++++ .../cookbook/advanced/secure-credentials.mdx | 353 +++++++++++++++++ .../cookbook/advanced/serializable-steps.mdx | 149 +++++++ .../cookbook/agent-patterns/durable-agent.mdx | 191 +++++++++ .../agent-patterns/human-in-the-loop.mdx | 278 +++++++++++++ .../docs/cookbook/agent-patterns/meta.json | 10 + .../cookbook/agent-patterns/stop-workflow.mdx | 216 ++++++++++ .../agent-patterns/tool-orchestration.mdx | 255 ++++++++++++ .../agent-patterns/tool-streaming.mdx | 181 +++++++++ .../cookbook/common-patterns/batching.mdx | 179 +++++++++ .../common-patterns/child-workflows.mdx | 372 ++++++++++++++++++ .../common-patterns/content-router.mdx | 207 ++++++++++ .../docs/cookbook/common-patterns/fan-out.mdx | 208 ++++++++++ .../cookbook/common-patterns/idempotency.mdx | 134 +++++++ .../docs/cookbook/common-patterns/meta.json | 15 + .../common-patterns/rate-limiting.mdx | 228 +++++++++++ .../docs/cookbook/common-patterns/saga.mdx | 152 +++++++ .../cookbook/common-patterns/scheduling.mdx | 249 ++++++++++++ .../cookbook/common-patterns/webhooks.mdx | 185 +++++++++ docs/content/docs/cookbook/index.mdx | 42 ++ .../docs/cookbook/integrations/ai-sdk.mdx | 198 ++++++++++ .../docs/cookbook/integrations/chat-sdk.mdx | 191 +++++++++ .../docs/cookbook/integrations/meta.json | 4 + .../docs/cookbook/integrations/sandbox.mdx | 128 ++++++ docs/content/docs/cookbook/meta.json | 5 + docs/geistdocs.tsx | 4 + docs/lib/cookbook-tree.ts | 242 ++++++++++++ docs/lib/geistdocs/cookbook-source.ts | 90 +++++ docs/next.config.ts | 50 ++- .../vitest/test/cookbook-advanced.test.ts | 62 +++ workbench/vitest/test/cookbook-agents.test.ts | 132 +++++++ workbench/vitest/test/cookbook-common.test.ts | 142 +++++++ .../vitest/workflows/cookbook/batching.ts | 42 ++ .../workflows/cookbook/child-workflows.ts | 41 ++ .../workflows/cookbook/content-router.ts | 43 ++ .../workflows/cookbook/durable-agent.ts | 52 +++ .../workflows/cookbook/durable-objects.ts | 47 +++ .../vitest/workflows/cookbook/fan-out.ts | 33 ++ .../workflows/cookbook/human-in-the-loop.ts | 42 ++ .../vitest/workflows/cookbook/idempotency.ts | 23 ++ .../workflows/cookbook/rate-limiting.ts | 32 ++ workbench/vitest/workflows/cookbook/saga.ts | 54 +++ .../vitest/workflows/cookbook/scheduling.ts | 16 + .../workflows/cookbook/serializable-steps.ts | 41 ++ .../workflows/cookbook/stop-workflow.ts | 45 +++ .../workflows/cookbook/tool-orchestration.ts | 36 ++ .../workflows/cookbook/tool-streaming.ts | 36 ++ .../vitest/workflows/cookbook/webhooks.ts | 19 + 61 files changed, 6489 insertions(+), 17 deletions(-) create mode 100644 docs/app/[lang]/cookbook/[[...slug]]/page.tsx create mode 100644 docs/app/[lang]/cookbook/layout.tsx create mode 100644 docs/content/docs/cookbook/advanced/custom-serialization.mdx create mode 100644 docs/content/docs/cookbook/advanced/durable-objects.mdx create mode 100644 docs/content/docs/cookbook/advanced/isomorphic-packages.mdx create mode 100644 docs/content/docs/cookbook/advanced/meta.json create mode 100644 docs/content/docs/cookbook/advanced/publishing-libraries.mdx create mode 100644 docs/content/docs/cookbook/advanced/secure-credentials.mdx create mode 100644 docs/content/docs/cookbook/advanced/serializable-steps.mdx create mode 100644 docs/content/docs/cookbook/agent-patterns/durable-agent.mdx create mode 100644 docs/content/docs/cookbook/agent-patterns/human-in-the-loop.mdx create mode 100644 docs/content/docs/cookbook/agent-patterns/meta.json create mode 100644 docs/content/docs/cookbook/agent-patterns/stop-workflow.mdx create mode 100644 docs/content/docs/cookbook/agent-patterns/tool-orchestration.mdx create mode 100644 docs/content/docs/cookbook/agent-patterns/tool-streaming.mdx create mode 100644 docs/content/docs/cookbook/common-patterns/batching.mdx create mode 100644 docs/content/docs/cookbook/common-patterns/child-workflows.mdx create mode 100644 docs/content/docs/cookbook/common-patterns/content-router.mdx create mode 100644 docs/content/docs/cookbook/common-patterns/fan-out.mdx create mode 100644 docs/content/docs/cookbook/common-patterns/idempotency.mdx create mode 100644 docs/content/docs/cookbook/common-patterns/meta.json create mode 100644 docs/content/docs/cookbook/common-patterns/rate-limiting.mdx create mode 100644 docs/content/docs/cookbook/common-patterns/saga.mdx create mode 100644 docs/content/docs/cookbook/common-patterns/scheduling.mdx create mode 100644 docs/content/docs/cookbook/common-patterns/webhooks.mdx create mode 100644 docs/content/docs/cookbook/index.mdx create mode 100644 docs/content/docs/cookbook/integrations/ai-sdk.mdx create mode 100644 docs/content/docs/cookbook/integrations/chat-sdk.mdx create mode 100644 docs/content/docs/cookbook/integrations/meta.json create mode 100644 docs/content/docs/cookbook/integrations/sandbox.mdx create mode 100644 docs/content/docs/cookbook/meta.json create mode 100644 docs/lib/cookbook-tree.ts create mode 100644 docs/lib/geistdocs/cookbook-source.ts create mode 100644 workbench/vitest/test/cookbook-advanced.test.ts create mode 100644 workbench/vitest/test/cookbook-agents.test.ts create mode 100644 workbench/vitest/test/cookbook-common.test.ts create mode 100644 workbench/vitest/workflows/cookbook/batching.ts create mode 100644 workbench/vitest/workflows/cookbook/child-workflows.ts create mode 100644 workbench/vitest/workflows/cookbook/content-router.ts create mode 100644 workbench/vitest/workflows/cookbook/durable-agent.ts create mode 100644 workbench/vitest/workflows/cookbook/durable-objects.ts create mode 100644 workbench/vitest/workflows/cookbook/fan-out.ts create mode 100644 workbench/vitest/workflows/cookbook/human-in-the-loop.ts create mode 100644 workbench/vitest/workflows/cookbook/idempotency.ts create mode 100644 workbench/vitest/workflows/cookbook/rate-limiting.ts create mode 100644 workbench/vitest/workflows/cookbook/saga.ts create mode 100644 workbench/vitest/workflows/cookbook/scheduling.ts create mode 100644 workbench/vitest/workflows/cookbook/serializable-steps.ts create mode 100644 workbench/vitest/workflows/cookbook/stop-workflow.ts create mode 100644 workbench/vitest/workflows/cookbook/tool-orchestration.ts create mode 100644 workbench/vitest/workflows/cookbook/tool-streaming.ts create mode 100644 workbench/vitest/workflows/cookbook/webhooks.ts diff --git a/docs/app/[lang]/cookbook/[[...slug]]/page.tsx b/docs/app/[lang]/cookbook/[[...slug]]/page.tsx new file mode 100644 index 0000000000..7ac224920d --- /dev/null +++ b/docs/app/[lang]/cookbook/[[...slug]]/page.tsx @@ -0,0 +1,132 @@ +import { Step, Steps } from 'fumadocs-ui/components/steps'; +import { Tab, Tabs } from 'fumadocs-ui/components/tabs'; +import { createRelativeLink } from 'fumadocs-ui/mdx'; +import type { Metadata } from 'next'; +import { notFound } from 'next/navigation'; +import type { ComponentProps } from 'react'; +import { + rewriteCookbookUrl, + rewriteCookbookUrlsInText, +} from '@/lib/geistdocs/cookbook-source'; +import { AskAI } from '@/components/geistdocs/ask-ai'; +import { CopyPage } from '@/components/geistdocs/copy-page'; +import { + DocsBody, + DocsDescription, + DocsPage, + DocsTitle, +} from '@/components/geistdocs/docs-page'; +import { EditSource } from '@/components/geistdocs/edit-source'; +import { Feedback } from '@/components/geistdocs/feedback'; +import { getMDXComponents } from '@/components/geistdocs/mdx-components'; +import { OpenInChat } from '@/components/geistdocs/open-in-chat'; +import { ScrollTop } from '@/components/geistdocs/scroll-top'; +import { Badge } from '@/components/ui/badge'; +import { Separator } from '@/components/ui/separator'; +import { getLLMText, getPageImage, source } from '@/lib/geistdocs/source'; + +const Page = async ({ params }: PageProps<'/[lang]/cookbook/[[...slug]]'>) => { + const { slug, lang } = await params; + + // Prepend 'cookbook' to resolve from the docs source + const resolvedSlug = slug ? ['cookbook', ...slug] : ['cookbook']; + const page = source.getPage(resolvedSlug, lang); + + if (!page) { + notFound(); + } + + const publicUrl = rewriteCookbookUrl(page.url); + const publicPage = { ...page, url: publicUrl } as typeof page; + + const markdown = rewriteCookbookUrlsInText(await getLLMText(page)); + const MDX = page.data.body; + + const RelativeLink = createRelativeLink(source, publicPage); + const PublicCookbookLink = (props: ComponentProps) => { + const href = + typeof props.href === 'string' + ? rewriteCookbookUrl(props.href) + : props.href; + return ; + }; + + return ( + + + + + + + + + + ), + }} + toc={page.data.toc} + > + {page.data.title} + {page.data.description} + + + + + ); +}; + +export const generateStaticParams = () => { + // Generate params for all cookbook pages + const allParams = source.generateParams(); + return allParams + .filter((p) => Array.isArray(p.slug) && p.slug[0] === 'cookbook') + .map((p) => ({ + ...p, + slug: (p.slug as string[]).slice(1), // Remove 'cookbook' prefix + })); +}; + +export const generateMetadata = async ({ + params, +}: PageProps<'/[lang]/cookbook/[[...slug]]'>) => { + const { slug, lang } = await params; + const resolvedSlug = slug ? ['cookbook', ...slug] : ['cookbook']; + const page = source.getPage(resolvedSlug, lang); + + if (!page) { + notFound(); + } + + const publicPath = rewriteCookbookUrl(page.url); + + const metadata: Metadata = { + title: page.data.title, + description: page.data.description, + openGraph: { + images: getPageImage(page).url, + }, + alternates: { + canonical: publicPath, + types: { + 'text/markdown': `${publicPath}.md`, + }, + }, + }; + + return metadata; +}; + +export default Page; diff --git a/docs/app/[lang]/cookbook/layout.tsx b/docs/app/[lang]/cookbook/layout.tsx new file mode 100644 index 0000000000..fa72592679 --- /dev/null +++ b/docs/app/[lang]/cookbook/layout.tsx @@ -0,0 +1,13 @@ +import { DocsLayout } from '@/components/geistdocs/docs-layout'; +import { getCookbookTree } from '@/lib/geistdocs/cookbook-source'; + +const Layout = async ({ + children, + params, +}: LayoutProps<'/[lang]/cookbook'>) => { + const { lang } = await params; + + return {children}; +}; + +export default Layout; diff --git a/docs/app/[lang]/docs/[[...slug]]/page.tsx b/docs/app/[lang]/docs/[[...slug]]/page.tsx index e1bca864c3..0aaaf5fb7f 100644 --- a/docs/app/[lang]/docs/[[...slug]]/page.tsx +++ b/docs/app/[lang]/docs/[[...slug]]/page.tsx @@ -2,7 +2,8 @@ import { Step, Steps } from 'fumadocs-ui/components/steps'; import { Tab, Tabs } from 'fumadocs-ui/components/tabs'; import { createRelativeLink } from 'fumadocs-ui/mdx'; import type { Metadata } from 'next'; -import { notFound } from 'next/navigation'; +import { notFound, permanentRedirect } from 'next/navigation'; +import { rewriteCookbookUrl } from '@/lib/geistdocs/cookbook-source'; import { AgentTraces } from '@/components/custom/agent-traces'; import { FluidComputeCallout } from '@/components/custom/fluid-compute-callout'; import { AskAI } from '@/components/geistdocs/ask-ai'; @@ -31,6 +32,12 @@ const WorldTestingPerformanceNoop = () => null; const Page = async ({ params }: PageProps<'/[lang]/docs/[[...slug]]'>) => { const { slug, lang } = await params; + if (Array.isArray(slug) && slug[0] === 'cookbook') { + const rest = slug.slice(1).join('/'); + const legacyPath = `/docs/cookbook${rest ? `/${rest}` : ''}`; + permanentRedirect(`/${lang}${rewriteCookbookUrl(legacyPath)}`); + } + const page = source.getPage(slug, lang); if (!page) { @@ -85,7 +92,13 @@ const Page = async ({ params }: PageProps<'/[lang]/docs/[[...slug]]'>) => { ); }; -export const generateStaticParams = () => source.generateParams(); +export const generateStaticParams = () => + source + .generateParams() + .filter( + (params) => + !(Array.isArray(params.slug) && params.slug[0] === 'cookbook'), + ); export const generateMetadata = async ({ params, diff --git a/docs/app/[lang]/docs/layout.tsx b/docs/app/[lang]/docs/layout.tsx index 583e850925..b59f605872 100644 --- a/docs/app/[lang]/docs/layout.tsx +++ b/docs/app/[lang]/docs/layout.tsx @@ -1,10 +1,14 @@ import { DocsLayout } from '@/components/geistdocs/docs-layout'; -import { source } from '@/lib/geistdocs/source'; +import { getDocsTreeWithoutCookbook } from '@/lib/geistdocs/cookbook-source'; const Layout = async ({ children, params }: LayoutProps<'/[lang]/docs'>) => { const { lang } = await params; - return {children}; + return ( + + {children} + + ); }; export default Layout; diff --git a/docs/app/[lang]/llms.mdx/[[...slug]]/route.ts b/docs/app/[lang]/llms.mdx/[[...slug]]/route.ts index 8f6eb71527..897e54d56a 100644 --- a/docs/app/[lang]/llms.mdx/[[...slug]]/route.ts +++ b/docs/app/[lang]/llms.mdx/[[...slug]]/route.ts @@ -1,4 +1,5 @@ import { notFound } from 'next/navigation'; +import { rewriteCookbookUrlsInText } from '@/lib/geistdocs/cookbook-source'; import { getLLMText, source } from '@/lib/geistdocs/source'; import { i18n } from '@/lib/geistdocs/i18n'; @@ -18,8 +19,10 @@ export async function GET( const sitemapPath = lang === i18n.defaultLanguage ? '/sitemap.md' : `/${lang}/sitemap.md`; + const text = await getLLMText(page); + return new Response( - (await getLLMText(page)) + + rewriteCookbookUrlsInText(text) + `\n\n## Sitemap [Overview of all docs pages](${sitemapPath})\n`, { diff --git a/docs/app/[lang]/llms.txt/route.ts b/docs/app/[lang]/llms.txt/route.ts index 96f061223d..343ad45b0c 100644 --- a/docs/app/[lang]/llms.txt/route.ts +++ b/docs/app/[lang]/llms.txt/route.ts @@ -1,4 +1,5 @@ import type { NextRequest } from 'next/server'; +import { rewriteCookbookUrlsInText } from '@/lib/geistdocs/cookbook-source'; import { getLLMText, source } from '@/lib/geistdocs/source'; export const revalidate = false; @@ -11,7 +12,7 @@ export const GET = async ( const scan = source.getPages(lang).map(getLLMText); const scanned = await Promise.all(scan); - return new Response(scanned.join('\n\n'), { + return new Response(rewriteCookbookUrlsInText(scanned.join('\n\n')), { headers: { 'Content-Type': 'text/markdown; charset=utf-8', }, diff --git a/docs/app/[lang]/sitemap.md/route.ts b/docs/app/[lang]/sitemap.md/route.ts index 1912d496d9..7c193e126d 100644 --- a/docs/app/[lang]/sitemap.md/route.ts +++ b/docs/app/[lang]/sitemap.md/route.ts @@ -1,4 +1,5 @@ import type { Node, Root } from 'fumadocs-core/page-tree'; +import { rewriteCookbookUrl } from '@/lib/geistdocs/cookbook-source'; import { source } from '@/lib/geistdocs/source'; export const revalidate = false; @@ -16,10 +17,10 @@ export async function GET( if ('type' in node) { if (node.type === 'page') { - mdText += `${indent}- [${node.name}](${node.url})\n`; + mdText += `${indent}- [${node.name}](${rewriteCookbookUrl(node.url)})\n`; } else if (node.type === 'folder') { if (node.index) { - mdText += `${indent}- [${node.name}](${node.index.url})\n`; + mdText += `${indent}- [${node.name}](${rewriteCookbookUrl(node.index.url)})\n`; } else { mdText += `${indent}- ${node.name}\n`; } @@ -30,7 +31,6 @@ export async function GET( } } } else if (node.children.length > 0) { - // Root node for (const child of node.children) { traverseTree(child, depth); } diff --git a/docs/app/sitemap.md/route.ts b/docs/app/sitemap.md/route.ts index 6129b002e7..fa01ffc501 100644 --- a/docs/app/sitemap.md/route.ts +++ b/docs/app/sitemap.md/route.ts @@ -1,4 +1,5 @@ import type { Node, Root } from 'fumadocs-core/page-tree'; +import { rewriteCookbookUrl } from '@/lib/geistdocs/cookbook-source'; import { i18n } from '@/lib/geistdocs/i18n'; import { source } from '@/lib/geistdocs/source'; @@ -13,10 +14,10 @@ export async function GET(_req: Request) { if ('type' in node) { if (node.type === 'page') { - mdText += `${indent}- [${node.name}](${node.url})\n`; + mdText += `${indent}- [${node.name}](${rewriteCookbookUrl(node.url)})\n`; } else if (node.type === 'folder') { if (node.index) { - mdText += `${indent}- [${node.name}](${node.index.url})\n`; + mdText += `${indent}- [${node.name}](${rewriteCookbookUrl(node.index.url)})\n`; } else { mdText += `${indent}- ${node.name}\n`; } @@ -27,7 +28,6 @@ export async function GET(_req: Request) { } } } else if (node.children.length > 0) { - // Root node for (const child of node.children) { traverseTree(child, depth); } diff --git a/docs/app/sitemap.ts b/docs/app/sitemap.ts index 673ea996d2..9f3be47eba 100644 --- a/docs/app/sitemap.ts +++ b/docs/app/sitemap.ts @@ -1,5 +1,6 @@ import type { MetadataRoute } from 'next'; +import { rewriteCookbookUrl } from '@/lib/geistdocs/cookbook-source'; import { source } from '@/lib/geistdocs/source'; const protocol = process.env.NODE_ENV === 'production' ? 'https' : 'http'; @@ -17,7 +18,7 @@ export default function sitemap(): MetadataRoute.Sitemap { changeFrequency: 'weekly' as const, lastModified: undefined, priority: 0.5, - url: url(page.url), + url: url(rewriteCookbookUrl(page.url)), }); } diff --git a/docs/content/docs/cookbook/advanced/custom-serialization.mdx b/docs/content/docs/cookbook/advanced/custom-serialization.mdx new file mode 100644 index 0000000000..b39e619b44 --- /dev/null +++ b/docs/content/docs/cookbook/advanced/custom-serialization.mdx @@ -0,0 +1,255 @@ +--- +title: Custom Serialization +description: Make class instances serializable across workflow boundaries using the WORKFLOW_SERIALIZE and WORKFLOW_DESERIALIZE symbol protocol. +type: guide +summary: Implement the serde symbol protocol on classes so instances survive serialization when passed between workflow and step functions, and register them in the global class registry. +--- + + +This is an advanced guide. It dives into workflow internals and is not required reading to use workflow. + + +## The Problem + +Workflow functions run inside a sandboxed VM. Every value that crosses a function boundary — step arguments, step return values, workflow inputs — must be serializable. Plain objects, strings, and numbers work automatically, but **class instances** lose their prototype chain and methods during serialization. + +```typescript lineNumbers +class StorageClient { + constructor(private region: string) {} + + async upload(key: string, body: Uint8Array) { + // ... uses this.region internally + } +} + +export async function processFile(client: StorageClient) { + "use workflow"; + + // client is no longer a StorageClient here — it's a plain object + // client.upload() throws: "client.upload is not a function" + await uploadStep(client, "output.json", data); +} +``` + +The [step-as-factory pattern](/docs/cookbook/advanced/serializable-steps) solves this by deferring object construction into steps. But sometimes you need the object itself to cross boundaries — for example, when a class instance is passed as a workflow input, returned from a step, or stored in workflow state. That's where custom serialization comes in. + +## The WORKFLOW_SERIALIZE / WORKFLOW_DESERIALIZE Protocol + +The `@workflow/serde` package exports two symbols that act as a serialization protocol. When the workflow runtime encounters a class instance with these symbols, it knows how to convert it to plain data and back. + +{/* @skip-typecheck - @workflow/serde is not mapped in the type-checker */} +```typescript lineNumbers +import { WORKFLOW_SERIALIZE, WORKFLOW_DESERIALIZE } from "@workflow/serde"; + +class Point { + constructor(public x: number, public y: number) {} + + distanceTo(other: Point): number { + return Math.sqrt((this.x - other.x) ** 2 + (this.y - other.y) ** 2); + } + + static [WORKFLOW_SERIALIZE](instance: Point) { + return { x: instance.x, y: instance.y }; + } + + static [WORKFLOW_DESERIALIZE](data: { x: number; y: number }) { + return new Point(data.x, data.y); + } +} +``` + +Both methods must be **static**. `WORKFLOW_SERIALIZE` receives an instance and returns plain serializable data. `WORKFLOW_DESERIALIZE` receives that same data and reconstructs a new instance. + + +Both serde methods run inside the workflow VM. They must not use Node.js APIs, non-deterministic operations, or network calls. Keep them focused on extracting and reconstructing data. + + +## Automatic Class Registration + +For the runtime to deserialize a class, the class must be registered in a global registry with a stable `classId`. The SWC compiler plugin handles this automatically — when it detects a class with both `WORKFLOW_SERIALIZE` and `WORKFLOW_DESERIALIZE` static methods, it generates registration code at build time. + +This means you only need to implement the two symbol methods. The compiler assigns a deterministic `classId` based on the file path and class name, and registers it in the global `Symbol.for("workflow-class-registry")` registry. + + +No manual registration is required for classes defined in your workflow files. The SWC plugin detects the serde symbols and generates the registration automatically at build time. + + +### Manual Registration for Library Authors + +If you're a library author whose classes are defined **outside** the workflow build pipeline (e.g., in a published npm package), the SWC plugin won't process your code. In that case, you need to register classes manually using the same global registry the runtime uses: + +```typescript lineNumbers +const WORKFLOW_CLASS_REGISTRY = Symbol.for("workflow-class-registry"); + +function registerSerializableClass(classId: string, cls: Function) { + const g = globalThis as any; + let registry = g[WORKFLOW_CLASS_REGISTRY] as Map | undefined; + if (!registry) { + registry = new Map(); + g[WORKFLOW_CLASS_REGISTRY] = registry; + } + registry.set(classId, cls); + Object.defineProperty(cls, "classId", { + value: classId, + writable: false, + enumerable: false, + configurable: false, + }); +} +``` + +Then call it after your class definition: + +{/* @skip-typecheck - references variables from prior code block */} +```typescript lineNumbers +registerSerializableClass("WorkflowStorageClient", WorkflowStorageClient); +``` + +The `classId` is a string identifier stored alongside the serialized data. When the runtime encounters serialized data tagged with that ID, it looks up the registry to find the class and calls `WORKFLOW_DESERIALIZE`. + +## Full Example: A Workflow-Safe Storage Client + +Here's a complete example of a storage client class that survives serialization across workflow boundaries. This pattern is useful when you need an object with methods to be passed as a workflow input or returned from a step. + +```typescript lineNumbers +import { WORKFLOW_SERIALIZE, WORKFLOW_DESERIALIZE } from "@workflow/serde"; + +interface StorageClientOptions { + region: string; + bucket: string; + accessKeyId?: string; + secretAccessKey?: string; +} + +export class WorkflowStorageClient { + private readonly region: string; + private readonly bucket: string; + private readonly accessKeyId?: string; + private readonly secretAccessKey?: string; + + constructor(options: StorageClientOptions) { + this.region = options.region; + this.bucket = options.bucket; + this.accessKeyId = options.accessKeyId; + this.secretAccessKey = options.secretAccessKey; + } + + async upload(key: string, body: Uint8Array) { + "use step"; + const { S3Client, PutObjectCommand } = await import("@aws-sdk/client-s3"); + const client = new S3Client({ + region: this.region, + credentials: this.accessKeyId + ? { accessKeyId: this.accessKeyId, secretAccessKey: this.secretAccessKey! } + : undefined, + }); + await client.send( + new PutObjectCommand({ Bucket: this.bucket, Key: key, Body: body }) + ); + } + + async getSignedUrl(key: string): Promise { + "use step"; + const { S3Client, GetObjectCommand } = await import("@aws-sdk/client-s3"); + const { getSignedUrl } = await import("@aws-sdk/s3-request-presigner"); + const client = new S3Client({ region: this.region }); + return getSignedUrl(client, new GetObjectCommand({ Bucket: this.bucket, Key: key })); + } + + // --- Serde protocol --- + + static [WORKFLOW_SERIALIZE](instance: WorkflowStorageClient): StorageClientOptions { + return { + region: instance.region, + bucket: instance.bucket, + accessKeyId: instance.accessKeyId, + secretAccessKey: instance.secretAccessKey, + }; + } + + static [WORKFLOW_DESERIALIZE]( + this: typeof WorkflowStorageClient, + data: StorageClientOptions + ): WorkflowStorageClient { + return new this(data); + } +} +``` + +Now this client can be passed into a workflow and used directly: + +```typescript lineNumbers +import { WorkflowStorageClient } from "./storage-client"; + +export async function processUpload( + client: WorkflowStorageClient, + data: Uint8Array +) { + "use workflow"; + + // client is a real WorkflowStorageClient with working methods + await client.upload("output/result.json", data); + const url = await client.getSignedUrl("output/result.json"); + return { url }; +} +``` + +## When to Use Custom Serde vs Step-as-Factory + +Both patterns solve the same root problem — non-serializable objects can't cross workflow boundaries — but they work differently and suit different situations. + +### Step-as-Factory + +The [step-as-factory pattern](/docs/cookbook/advanced/serializable-steps) passes a **factory function** instead of an object. The real object is constructed inside a step at execution time. + +```typescript lineNumbers +// Factory: returns a step function, not an object +export function createS3Client(region: string) { + return async () => { + "use step"; + const { S3Client } = await import("@aws-sdk/client-s3"); + return new S3Client({ region }); + }; +} +``` + +**Best when:** +- The object has no serializable state (e.g., AI SDK model providers that are pure configuration) +- You don't need to pass the object back out of a step +- The object is only used inside a single step + +### Custom Serde + +Custom serde makes the **object itself** serializable. It can be passed as a workflow input, stored in workflow state, returned from steps, and used across multiple steps. + +```typescript lineNumbers +// Serde: the object survives serialization +class WorkflowStorageClient { + static [WORKFLOW_SERIALIZE](instance) { /* ... */ } + static [WORKFLOW_DESERIALIZE](data) { /* ... */ } +} +``` + +**Best when:** +- The object has meaningful state that must survive serialization (credentials, configuration, accumulated data) +- The object is passed as a workflow input by the caller +- Multiple steps need the same object instance +- You're a library author shipping classes that workflow users will pass around + +### Decision Guide + +| Scenario | Recommended pattern | +|---|---| +| AI SDK model provider (`openai("gpt-4o")`) | Step-as-factory | +| Database/HTTP client with no config state | Step-as-factory | +| Storage client with region + credentials | Custom serde | +| Domain object passed as workflow input | Custom serde | +| Object returned from one step, used in another | Custom serde | +| Library class that users instantiate and pass to `start()` | Custom serde | + +## Key APIs + +- [`WORKFLOW_SERIALIZE`](/docs/api-reference/workflow-serde/workflow-serialize) — symbol for the static serialization method +- [`WORKFLOW_DESERIALIZE`](/docs/api-reference/workflow-serde/workflow-deserialize) — symbol for the static deserialization method +- [`"use step"`](/docs/api-reference/workflow/use-step) — marks a function for extraction and serialization +- [`"use workflow"`](/docs/api-reference/workflow/use-workflow) — declares the orchestrator function diff --git a/docs/content/docs/cookbook/advanced/durable-objects.mdx b/docs/content/docs/cookbook/advanced/durable-objects.mdx new file mode 100644 index 0000000000..b3140ac56f --- /dev/null +++ b/docs/content/docs/cookbook/advanced/durable-objects.mdx @@ -0,0 +1,150 @@ +--- +title: Durable Objects +description: Model long-lived stateful entities as workflows that persist state across requests. +type: guide +summary: Build a durable counter or session object whose state survives restarts by using a workflow's event log as the persistence layer. +--- + + +This is an advanced guide. It dives into workflow internals and is not required reading to use workflow. + + +## The Idea + +A workflow's event log already records every step result and replays them to reconstruct state. This is the same property that makes an "object" durable — its fields survive cold starts, crashes, and redeployments. Instead of using a workflow to model a *process*, you can use one to model an *entity* with methods. + +Each "method call" is a hook that the object's workflow loop awaits. External callers resume the hook with a payload describing the operation. The workflow applies the operation, updates its internal state, and waits for the next call. + +## Pattern: Durable Counter + +A counter that persists its value without a database. Each increment/decrement is recorded in the event log. + +```typescript lineNumbers +import { defineHook, getWorkflowMetadata } from "workflow"; +import { z } from "zod"; + +const counterAction = defineHook({ + schema: z.object({ + type: z.enum(["increment", "decrement", "get"]), + amount: z.number().default(1), + }), +}); + +export async function durableCounter() { + "use workflow"; + + let count = 0; + const { workflowRunId } = getWorkflowMetadata(); + + while (true) { + const hook = counterAction.create({ token: `counter:${workflowRunId}` }); + const action = await hook; + + switch (action.type) { + case "increment": + count += action.amount; + await recordState(count); + break; + case "decrement": + count -= action.amount; + await recordState(count); + break; + case "get": + await emitValue(count); + break; + } + } +} + +async function recordState(count: number) { + "use step"; + // Step records the state transition in the event log. + // On replay, the step result restores `count` without re-executing. + return count; +} + +async function emitValue(count: number) { + "use step"; + return { count }; +} +``` + +### Calling the Object + +From an API route, resume the hook to "invoke a method" on the durable object: + +```typescript lineNumbers +import { resumeHook } from "workflow/api"; + +export async function POST(request: Request) { + "use step"; + + const { runId, type, amount } = await request.json(); + await resumeHook(`counter:${runId}`, { type, amount }); + return Response.json({ ok: true }); +} +``` + +## Pattern: Durable Session + +A chat session where conversation history is the durable state. Each user message is a hook event; the workflow accumulates messages and generates responses. + +```typescript lineNumbers +import { defineHook, getWritable, getWorkflowMetadata } from "workflow"; +import { DurableAgent } from "@workflow/ai/agent"; +import { anthropic } from "@workflow/ai/providers/anthropic"; +import { z } from "zod"; +import type { UIMessageChunk, ModelMessage } from "ai"; + +const messageHook = defineHook({ + schema: z.object({ + role: z.literal("user"), + content: z.string(), + }), +}); + +export async function durableSession() { + "use workflow"; + + const writable = getWritable(); + const { workflowRunId: runId } = getWorkflowMetadata(); + const messages: ModelMessage[] = []; + + const agent = new DurableAgent({ + model: anthropic("claude-sonnet-4-20250514"), + instructions: "You are a helpful assistant.", + }); + + while (true) { + const hook = messageHook.create({ token: `session:${runId}` }); + const userMessage = await hook; + + messages.push({ + role: userMessage.role, + content: userMessage.content, + }); + + await agent.stream({ messages, writable }); + } +} +``` + +## When to Use This + +- **Entity-per-workflow**: Each user, document, or device gets its own workflow run. The run ID is the entity ID. +- **No external database needed**: State lives in the event log. Reads replay from the log; writes append to it. +- **Automatic consistency**: Only one execution runs at a time per workflow run, so there are no race conditions on the entity's state. + +## Trade-offs + +- **Read latency**: Accessing current state requires replaying the event log (or caching the last known state in a step result). +- **Not a replacement for databases**: If you need to query across entities (e.g., "all counters above 100"), you still need a database. Durable objects are for single-entity state. +- **Log growth**: Long-lived objects accumulate large event logs. Consider periodic "snapshot" steps that checkpoint the full state. + +## Key APIs + +- [`"use workflow"`](/docs/api-reference/workflow/use-workflow) — declares the orchestrator function +- [`"use step"`](/docs/api-reference/workflow/use-step) — marks functions for durable execution +- [`defineHook`](/docs/api-reference/workflow/define-hook) — type-safe hook for receiving external method calls +- [`getWorkflowMetadata`](/docs/api-reference/workflow/get-workflow-metadata) — access the run ID for deterministic hook tokens +- [`resumeHook`](/docs/api-reference/workflow-api/resume-hook) — invoke a method on the durable object from an API route diff --git a/docs/content/docs/cookbook/advanced/isomorphic-packages.mdx b/docs/content/docs/cookbook/advanced/isomorphic-packages.mdx new file mode 100644 index 0000000000..830c2a02da --- /dev/null +++ b/docs/content/docs/cookbook/advanced/isomorphic-packages.mdx @@ -0,0 +1,145 @@ +--- +title: Isomorphic Packages +description: Publish reusable workflow packages that work both inside and outside the workflow runtime. +type: guide +summary: Use try/catch around getWorkflowMetadata, dynamic imports, and optional peer dependencies to build libraries that run in workflows and in plain Node.js. +--- + + +This is an advanced guide. It dives into workflow internals and is not required reading to use workflow. + + +## The Challenge + +If you're a library author publishing a package that integrates with workflow, your code needs to handle two environments: + +1. **Inside a workflow run** — `getWorkflowMetadata()` works, `"use step"` directives are transformed, and the full workflow runtime is available. +2. **Outside a workflow** — your package is imported in a regular Node.js process, a test suite, or a project that doesn't use workflow at all. + +A hard dependency on `workflow` will crash at import time for users who don't have it installed. + +## Pattern 1: Feature-Detect with `getWorkflowMetadata` + +Use a try/catch to detect whether you're running inside a workflow. This lets you add durable behavior when available and fall back to standard execution otherwise. + +```typescript lineNumbers +import { getWorkflowMetadata } from "workflow"; + +export async function processPayment(amount: number, currency: string) { + "use workflow"; + + let runId: string | undefined; + try { + const metadata = getWorkflowMetadata(); + runId = metadata.workflowRunId; + } catch { + // Not running inside a workflow — proceed without durability + runId = undefined; + } + + if (runId) { + // Inside a workflow: use the run ID as an idempotency key + return await chargeWithIdempotency(amount, currency, runId); + } else { + // Outside a workflow: standard charge + return await chargeStandard(amount, currency); + } +} + +async function chargeWithIdempotency(amount: number, currency: string, idempotencyKey: string) { + "use step"; + // Stripe charge with idempotency key from workflow run ID + return { charged: true, amount, currency, idempotencyKey }; +} + +async function chargeStandard(amount: number, currency: string) { + "use step"; + return { charged: true, amount, currency }; +} +``` + +## Pattern 2: Dynamic Imports + +Avoid importing `workflow` at the top level. Use dynamic `import()` so the module is only loaded when actually needed. + +```typescript lineNumbers +export async function createDurableTask(name: string, payload: unknown) { + "use workflow"; + + let sleep: ((duration: string) => Promise) | undefined; + + try { + const wf = await import("workflow"); + sleep = wf.sleep; + } catch { + // workflow not installed — use setTimeout fallback + sleep = undefined; + } + + await executeTask(name, payload); + + if (sleep) { + // Inside workflow: durable sleep that survives restarts + await sleep("5m"); + } else { + // Outside workflow: plain timer (not durable) + await new Promise((resolve) => setTimeout(resolve, 5 * 60 * 1000)); + } + + await sendNotification(name); +} + +async function executeTask(name: string, payload: unknown) { + "use step"; + return { executed: true, name, payload }; +} + +async function sendNotification(name: string) { + "use step"; + return { notified: true, name }; +} +``` + +## Pattern 3: Optional Peer Dependencies + +In your `package.json`, declare `workflow` as an optional peer dependency. This signals to package managers that your library *can* use workflow but doesn't require it. + +```json +{ + "name": "@acme/payments", + "peerDependencies": { + "workflow": ">=1.0.0" + }, + "peerDependenciesMeta": { + "workflow": { + "optional": true + } + } +} +``` + +Then guard all workflow imports with dynamic `import()` and try/catch as shown above. + +## Real-World Examples + +### Mux AI + +The Mux team published a reusable workflow package for video processing. Their library detects the workflow runtime and falls back to standard async processing when workflow isn't available. + +### World ID + +World ID's identity verification library uses `getWorkflowMetadata()` to attach run IDs to their human-in-the-loop verification hooks, but the same library works in non-workflow environments for simple verification flows. + +## Guidelines for Library Authors + +1. **Never hard-import `workflow` at the top level** if your package should work without it. +2. **Use `getWorkflowMetadata()` in a try/catch** as the canonical runtime detection pattern. +3. **Mark `workflow` as an optional peer dependency** in `package.json`. +4. **Test both paths**: run your test suite with and without the workflow runtime to catch import errors. +5. **Document the dual behavior**: make it clear in your README which features require workflow and which work standalone. + +## Key APIs + +- [`"use workflow"`](/docs/api-reference/workflow/use-workflow) — declares the orchestrator function +- [`"use step"`](/docs/api-reference/workflow/use-step) — marks functions for durable execution +- [`getWorkflowMetadata`](/docs/api-reference/workflow/get-workflow-metadata) — runtime detection and run ID access diff --git a/docs/content/docs/cookbook/advanced/meta.json b/docs/content/docs/cookbook/advanced/meta.json new file mode 100644 index 0000000000..6b6fb644ff --- /dev/null +++ b/docs/content/docs/cookbook/advanced/meta.json @@ -0,0 +1,11 @@ +{ + "title": "Advanced", + "pages": [ + "serializable-steps", + "durable-objects", + "isomorphic-packages", + "custom-serialization", + "secure-credentials", + "publishing-libraries" + ] +} diff --git a/docs/content/docs/cookbook/advanced/publishing-libraries.mdx b/docs/content/docs/cookbook/advanced/publishing-libraries.mdx new file mode 100644 index 0000000000..f1675bc3ff --- /dev/null +++ b/docs/content/docs/cookbook/advanced/publishing-libraries.mdx @@ -0,0 +1,298 @@ +--- +title: Publishing Libraries +description: Structure and publish npm packages that export workflow functions for consumers to use with Workflow SDK. +type: guide +summary: Learn how to build, export, and test npm packages that ship workflow and step functions — including package.json exports, re-exporting for stable workflow IDs, keeping step I/O clean, and integration testing. +--- + + +This is an advanced guide for library authors who want to publish reusable workflow functions as npm packages. It assumes familiarity with `"use workflow"`, `"use step"`, and the workflow execution model. + + +## Package Structure + +A workflow library follows a standard TypeScript package layout with a dedicated `workflows/` directory. Each workflow file exports one or more workflow functions that consumers can import and pass to `start()`. + +``` +my-media-lib/ +├── src/ +│ ├── index.ts # Package entry point +│ ├── types.ts # Shared types +│ ├── workflows/ +│ │ ├── index.ts # Re-exports all workflows +│ │ ├── transcode.ts # Workflow: transcode a video +│ │ └── generate-thumbnails.ts +│ └── lib/ +│ └── api-client.ts # Internal helpers (NOT steps) +├── test-server/ +│ └── workflows.ts # Re-export for integration tests +├── tsup.config.ts +├── package.json +└── tsconfig.json +``` + +### Entry Points and Exports + +Use the `exports` field in `package.json` to expose separate entry points for the main API and the raw workflow functions: + +```json +{ + "name": "@acme/media", + "type": "module", + "exports": { + ".": { + "types": { "import": "./dist/index.d.ts" }, + "import": "./dist/index.js" + }, + "./workflows": { + "types": { "import": "./dist/workflows/index.d.ts" }, + "import": "./dist/workflows/index.js" + } + }, + "files": ["dist"] +} +``` + +The main entry point (`@acme/media`) exports types, utilities, and convenience wrappers. The `./workflows` entry point (`@acme/media/workflows`) exports the raw workflow functions that consumers need for the build system. + +### Source Files + +The package entry re-exports workflows alongside any utilities: + +```typescript lineNumbers +// src/index.ts +export * from "./types"; +export * as workflows from "./workflows"; +``` + +The workflows barrel file re-exports each workflow: + +```typescript lineNumbers +// src/workflows/index.ts +export * from "./transcode"; +export * from "./generate-thumbnails"; +``` + +### Build Configuration + +Use a bundler like `tsup` with separate entry points for each export. Mark `workflow` as external so it's resolved from the consumer's project: + +```typescript lineNumbers +// tsup.config.ts +import { defineConfig } from "tsup"; + +export default defineConfig({ + entry: [ + "src/index.ts", + "src/workflows/index.ts", + ], + format: ["esm"], + dts: true, + sourcemap: true, + clean: true, + external: ["workflow"], +}); +``` + +## Re-Exporting for Workflow ID Stability + +Workflow SDK's compiler assigns each workflow function a stable ID based on its position in the source file that the build system processes. When a consumer imports a pre-built workflow from an npm package, the compiler never sees the original source — it only sees the compiled output. This means workflow IDs won't match between the library's development environment and the consumer's app. + +The fix is a **re-export file**. The consumer creates a file in their `workflows/` directory that re-exports the library's workflows. The build system then processes this file and assigns stable IDs. + +### Consumer Setup + +```typescript lineNumbers +// workflows/media.ts (in the consumer's project) +// Re-export library workflows so the build system assigns stable IDs +export * from "@acme/media/workflows"; +``` + +This one-line file is all that's needed. The workflow compiler transforms this file, discovers the workflow and step functions from the library, and assigns IDs that are stable across deployments. + +### Why This Is Necessary + +Without re-exporting, the workflow runtime cannot match a running workflow to its function definition. When a workflow run is replayed after a cold start, the runtime looks up functions by their compiler-assigned IDs. If the IDs don't exist (because the compiler never processed the library's source), replay fails. + +The re-export pattern ensures: + +1. **Stable IDs** — the compiler assigns IDs based on the consumer's source tree +2. **Replay safety** — IDs persist across deployments and cold starts +3. **Version upgrades** — re-exported IDs remain stable as long as the consumer's file doesn't change + +## Keeping Step I/O Clean + +When you publish a workflow library, every step function's inputs and outputs are recorded in the event log. This has two implications: + +### 1. Everything Must Be Serializable + +Step inputs and outputs must be JSON-serializable. Do not pass or return: + +- Class instances (unless they implement custom serialization) +- Functions or closures +- `Map`, `Set`, `WeakRef`, or other non-JSON types +- Circular references + +If your library works with complex objects, pass serializable configuration into steps and reconstruct the objects inside the step body. + +{/* @skip-typecheck - good/bad comparison with duplicate function names */} +```typescript lineNumbers +// Good: pass serializable config, construct inside the step +async function callExternalApi(endpoint: string, params: Record) { + "use step"; + const client = createApiClient(process.env.API_KEY!); + return await client.request(endpoint, params); +} + +// Bad: pass a pre-constructed client object +async function callExternalApi(client: ApiClient, params: Record) { + "use step"; + // ApiClient is not serializable — this will fail on replay + return await client.request(params); +} +``` + +See [Serializable Steps](/docs/cookbook/advanced/serializable-steps) for the step-as-factory pattern. + +### 2. Secrets Must Not Appear in Step I/O + +Step inputs and outputs are persisted in the event log and may be visible in observability tools. **Never pass secrets as step arguments or return them from steps.** + +{/* @skip-typecheck - good/bad comparison with duplicate function names */} +```typescript lineNumbers +// Bad: API key appears in the event log +async function fetchData(apiKey: string, query: string) { + "use step"; + const client = createClient(apiKey); + return await client.fetch(query); +} + +// Good: resolve credentials inside the step from environment +async function fetchData(query: string) { + "use step"; + const client = createClient(process.env.API_KEY!); + return await client.fetch(query); +} +``` + +Similarly, helper functions that create API clients using credentials should **not** be marked as steps. If a function's return value would contain sensitive data, keep it as a plain function called inside a step body: + +{/* @skip-typecheck - references undefined ServiceClient */} +```typescript lineNumbers +// This is NOT a step — intentionally, to avoid credentials in step I/O +function createAuthenticatedClient(credentials: { token: string }) { + return new ServiceClient({ auth: credentials.token }); +} + +async function processItem(itemId: string) { + "use step"; + // Resolve credentials and create client inside the step + const client = createAuthenticatedClient({ + token: process.env.SERVICE_TOKEN!, + }); + return await client.process(itemId); +} +``` + +## Testing Workflow Libraries + +Library authors need integration tests that exercise workflows through the full Workflow SDK runtime — not just unit tests of individual functions. + +### Test Server Pattern + +Create a minimal test server that re-exports your library's workflows, just like a consumer would: + +```typescript lineNumbers +// test-server/workflows.ts +export * from "@acme/media/workflows"; +``` + +This test server acts as a stand-in consumer app. Point your test runner at it to exercise the full workflow lifecycle: start, replay, and completion. + +### Vitest Configuration + +Use a dedicated Vitest config for integration tests that run against the Workflow SDK runtime: + +```typescript lineNumbers +// vitest.workflowdevkit.config.ts +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + test: { + include: ["tests/integration/**/*.workflowdevkit.test.ts"], + testTimeout: 120_000, // Workflows may take time to complete + setupFiles: ["./tests/setup.ts"], + }, +}); +``` + +Run these tests separately from your unit tests: + +```bash +# Unit tests (fast, no workflow runtime) +pnpm vitest run tests/unit + +# Integration tests (requires workflow runtime) +pnpm vitest run --config vitest.workflowdevkit.config.ts +``` + +### What to Test + +- **Happy path**: workflow starts, all steps execute, and the final result is correct +- **Serialization round-trip**: inputs and outputs survive the event log +- **Replay**: kill and restart a workflow mid-execution to verify deterministic replay +- **Error handling**: verify that step failures produce the expected errors + +## Working With and Without Workflow Installed + +If your library should work both as a standalone package and inside Workflow SDK, declare `workflow` as an optional peer dependency: + +```json +{ + "peerDependencies": { + "workflow": ">=4.0.0" + }, + "peerDependenciesMeta": { + "workflow": { + "optional": true + } + } +} +``` + +Use dynamic imports and runtime detection so your library gracefully degrades when workflow is not installed: + +```typescript lineNumbers +async function isWorkflowRuntime(): Promise { + try { + const wf = await import("workflow"); + if (typeof wf.getWorkflowMetadata !== "function") return false; + wf.getWorkflowMetadata(); + return true; + } catch { + return false; + } +} +``` + +See [Isomorphic Packages](/docs/cookbook/advanced/isomorphic-packages) for the full pattern including feature detection, dynamic imports, and dual-path execution. + +## Checklist + +Before publishing a workflow library: + +- [ ] `workflow` is listed as an **optional** peer dependency +- [ ] Separate `./workflows` export in `package.json` for the raw workflow functions +- [ ] `workflow` is marked as **external** in your bundler config +- [ ] Documentation tells consumers to re-export from `@your-lib/workflows` +- [ ] No secrets in step inputs or outputs — credentials are resolved at runtime inside steps +- [ ] All step I/O is JSON-serializable +- [ ] Integration tests use a test server with re-exported workflows +- [ ] Both with-workflow and without-workflow code paths are tested + +## Key APIs + +- [`"use workflow"`](/docs/api-reference/workflow/use-workflow) — declares the orchestrator function +- [`"use step"`](/docs/api-reference/workflow/use-step) — marks functions for durable execution +- [`start`](/docs/api-reference/workflow/start) — starts a workflow run +- [`getWorkflowMetadata`](/docs/api-reference/workflow/get-workflow-metadata) — runtime detection and run ID access diff --git a/docs/content/docs/cookbook/advanced/secure-credentials.mdx b/docs/content/docs/cookbook/advanced/secure-credentials.mdx new file mode 100644 index 0000000000..af1b7a9650 --- /dev/null +++ b/docs/content/docs/cookbook/advanced/secure-credentials.mdx @@ -0,0 +1,353 @@ +--- +title: Secure Credential Handling +description: Protect API keys and secrets from appearing in the workflow event log using encryption, credential providers, and careful step design. +type: guide +summary: Encrypt credentials before start(), resolve secrets at runtime via a credentials provider, and avoid leaking secrets through step I/O. +--- + + +This is an advanced guide. It covers security patterns for workflows that handle sensitive credentials. It is not required reading to use workflow, but is strongly recommended for production multi-tenant applications. + + +## Why Credentials Need Special Treatment + +Workflow SDK persists every step's input and output to an event log for replay and observability. If you pass an API key as a step argument or return it from a step, **the plaintext secret is stored in the event log**. + +Three complementary patterns keep secrets out of the log: + +1. **Encrypt credentials before `start()`** so the event log only stores ciphertext. +2. **Use a module-level credentials provider** so steps resolve secrets at runtime instead of receiving them as arguments. +3. **Keep credential-resolving helpers out of steps** so their return values are never serialized. + +--- + +## Encrypting Credentials Before `start()` + +When a caller triggers a workflow, any arguments passed to `start()` are serialized into the event log. If those arguments contain API keys, the keys are stored in plaintext. AES-256-GCM encryption solves this: encrypt on the caller side, decrypt inside a step. + +### The Encryption Utility + +{/* @skip-typecheck - uses @noble/ciphers and helper functions not available to type-checker */} +{/* @skip-typecheck - uses unmapped @noble/ciphers and helper functions defined elsewhere */} +```typescript lineNumbers +// lib/workflow-crypto.ts +import { gcm } from "@noble/ciphers/aes.js"; + +const IV_LENGTH = 12; +const TAG_LENGTH = 16; + +export interface EncryptedPayload { + v: 1; + alg: "aes-256-gcm"; + kid?: string; // optional key ID for rotation + iv: string; + tag: string; + ciphertext: string; +} + +export async function encryptForWorkflow( + value: T, + key: Uint8Array | string, + keyId?: string, +): Promise { + const keyBytes = normalizeKey(key); // validate 32-byte key + const iv = new Uint8Array(IV_LENGTH); + crypto.getRandomValues(iv); + + const plaintext = new TextEncoder().encode(JSON.stringify(value)); + const encrypted = gcm(keyBytes, iv).encrypt(plaintext); + + // GCM appends the auth tag to the ciphertext + const tag = encrypted.slice(encrypted.length - TAG_LENGTH); + const ciphertext = encrypted.slice(0, encrypted.length - TAG_LENGTH); + + return { + v: 1, + alg: "aes-256-gcm", + ...(keyId !== undefined && { kid: keyId }), + iv: bytesToBase64(iv), + tag: bytesToBase64(tag), + ciphertext: bytesToBase64(ciphertext), + }; +} + +export async function decryptFromWorkflow( + payload: EncryptedPayload, + key: Uint8Array | string, +): Promise { + const keyBytes = normalizeKey(key); + const iv = base64ToBytes(payload.iv); + const tag = base64ToBytes(payload.tag); + const ciphertext = base64ToBytes(payload.ciphertext); + + // Recombine ciphertext + tag for GCM decryption + const combined = new Uint8Array(ciphertext.length + tag.length); + combined.set(ciphertext); + combined.set(tag, ciphertext.length); + + const plaintext = gcm(keyBytes, iv).decrypt(combined); + return JSON.parse(new TextDecoder().decode(plaintext)) as T; +} + +function normalizeKey(key: Uint8Array | string): Uint8Array { + const bytes = typeof key === "string" ? base64ToBytes(key) : key; + if (bytes.length !== 32) { + throw new Error(`Expected 32-byte key, got ${bytes.length}`); + } + return bytes; +} +``` + +### Encrypting on the Caller Side + +{/* @skip-typecheck - uses app-local imports and workflow/api start */} +{/* @skip-typecheck - start is from workflow/api, references local modules */} +```typescript lineNumbers +// app/api/start-workflow/route.ts +import { start } from "workflow/api"; +import { encryptForWorkflow } from "@/lib/workflow-crypto"; +import { processDocument } from "@/workflows/process-document"; + +export async function POST(request: Request) { + const { documentId } = await request.json(); + + // Encrypt credentials before they enter the event log + const encrypted = await encryptForWorkflow( + { + apiKey: process.env.THIRD_PARTY_API_KEY!, + serviceToken: process.env.SERVICE_TOKEN!, + }, + process.env.WORKFLOW_SECRET_KEY!, + ); + + const run = await start(processDocument, [documentId, encrypted]); + return Response.json({ runId: run.id }); +} +``` + +### Decrypting Inside a Step + +```typescript lineNumbers +// workflows/process-document.ts +import { decryptFromWorkflow } from "@/lib/workflow-crypto"; +import type { EncryptedPayload } from "@/lib/workflow-crypto"; + +export async function processDocument( + documentId: string, + credentials: EncryptedPayload, +) { + "use workflow"; + + const result = await fetchDocument(documentId, credentials); + return result; +} + +async function fetchDocument( + documentId: string, + credentials: EncryptedPayload, +) { + "use step"; + + // Decrypt inside the step — the decrypted values never leave this function + const { apiKey } = await decryptFromWorkflow<{ apiKey: string }>( + credentials, + process.env.WORKFLOW_SECRET_KEY!, + ); + + const response = await fetch(`https://api.example.com/docs/${documentId}`, { + headers: { Authorization: `Bearer ${apiKey}` }, + }); + + // Only the document data is returned (and logged), not the API key + return response.json(); +} +``` + +The event log stores the encrypted blob as the step input and the document data as the step output. The plaintext API key exists only in memory during step execution. + +### Key Rotation + +The optional `kid` (key ID) field supports key rotation. Include a `kid` when encrypting to identify which key was used. On the decryption side, read `payload.kid` to look up the correct key: + +{/* @skip-typecheck - references variables from prior code blocks */} +```typescript lineNumbers +const encrypted = await encryptForWorkflow( + credentials, + currentKey, + "key-2025-03", // key identifier +); + +// On the decryption side +const key = getKeyById(payload.kid); // look up the right key +const decrypted = await decryptFromWorkflow(payload, key); +``` + +--- + +## Module-Level Credentials Provider + +Encryption works well when credentials originate from the caller. But sometimes the deployment environment itself holds the secrets (e.g., environment variables or a secrets manager), and you want steps to resolve them at runtime without receiving them as arguments. + +A **credentials provider** is a factory function registered at module scope. Steps call it at runtime to get the credentials they need. + +### Registering a Provider + +```typescript lineNumbers +// lib/credentials-provider.ts + +type CredentialsProvider = () => + | Promise | undefined> + | Record + | undefined; + +let credentialsProvider: CredentialsProvider | undefined; + +export function setCredentialsProvider(provider?: CredentialsProvider): void { + credentialsProvider = provider; +} + +export async function resolveCredentials( + input?: Record, +): Promise> { + // 1. Start with provider credentials as the base + const fromProvider = credentialsProvider + ? (await credentialsProvider()) ?? {} + : {}; + + // 2. Merge direct input (overrides provider) + return { ...fromProvider, ...input }; +} +``` + +### Setting the Provider at App Startup + +```typescript lineNumbers +// app/instrumentation.ts (Next.js) or server entry point +import { setCredentialsProvider } from "@/lib/credentials-provider"; + +// Register once at module scope — runs before any workflow step +setCredentialsProvider(() => ({ + apiKey: process.env.THIRD_PARTY_API_KEY!, + serviceToken: process.env.SERVICE_TOKEN!, +})); +``` + +### Using the Provider Inside Steps + +```typescript lineNumbers +// workflows/analyze.ts +import { resolveCredentials } from "@/lib/credentials-provider"; + +export async function analyzeData(datasetId: string) { + "use workflow"; + + const summary = await runAnalysis(datasetId); + return summary; +} + +async function runAnalysis(datasetId: string) { + "use step"; + + // Resolve credentials at runtime — no secrets in the step's arguments + const { apiKey } = await resolveCredentials(); + + const response = await fetch(`https://api.example.com/analyze/${datasetId}`, { + headers: { Authorization: `Bearer ${apiKey}` }, + }); + + return response.json(); +} +``` + +### Resolution Order + +When both encryption and a provider are in use, a typical resolution order is: + +1. **Credentials provider** (module-level factory) +2. **Decrypted credentials** (from encrypted workflow arguments) +3. **Environment variables** (direct `process.env` fallback) + +Later sources override earlier ones. This lets a library provide sensible defaults while allowing callers to override per-workflow. + +--- + +## Why Some Functions MUST NOT Be Steps + +This is the most subtle pattern. Consider a helper function that creates an API client with credentials: + +{/* @skip-typecheck - references resolveCredentials from prior code block */} +```typescript lineNumbers +// lib/client-factory.ts + +/** + * Resolves client configuration for a workflow. + * This function is NOT a workflow step to avoid exposing + * credentials in step I/O. + */ +export async function createClient( + credentials?: Record, +) { + const { apiKey, serviceToken } = await resolveCredentials(credentials); + + return { + apiKey, + serviceToken, + baseUrl: "https://api.example.com", + }; +} +``` + +If `createClient` were marked with `"use step"`, its **return value** — which contains the plaintext `apiKey` and `serviceToken` — would be serialized into the event log for observability. This is a credential leak. + +The rule: **functions that return or handle credentials should NOT be steps.** Instead, call them from *inside* a step: + +```typescript lineNumbers +// workflows/process.ts +import { createClient } from "@/lib/client-factory"; + +async function uploadResult(data: Record) { + "use step"; + + // createClient runs inside this step — its return value + // stays in memory and is never serialized to the event log + const client = await createClient(); + + const response = await fetch(`${client.baseUrl}/upload`, { + method: "POST", + headers: { + Authorization: `Bearer ${client.apiKey}`, + "X-Service-Token": client.serviceToken, + }, + body: JSON.stringify(data), + }); + + // Only the upload result is returned (and logged) + return response.json(); +} + +export async function processAndUpload(inputData: Record) { + "use workflow"; + + const result = await uploadResult(inputData); + return result; +} +``` + +### The Key Insight + +The event log records: + +- **Step inputs**: the arguments passed to the step function +- **Step outputs**: the return value of the step function + +Anything that happens *inside* the step but is not an input or output is invisible to the log. By resolving credentials inside the step and only returning non-sensitive results, you keep secrets out of the event log entirely. + +### What to Watch For + +| Pattern | Safe? | Why | +|---------|-------|-----| +| Step receives API key as argument | No | Input is logged | +| Step returns an object containing a token | No | Output is logged | +| Step calls `resolveCredentials()` internally | Yes | Credentials stay in memory | +| Helper that returns credentials is called inside a step | Yes | Return value is not the step's return value | +| Helper that returns credentials is marked `"use step"` | No | Step output is logged | diff --git a/docs/content/docs/cookbook/advanced/serializable-steps.mdx b/docs/content/docs/cookbook/advanced/serializable-steps.mdx new file mode 100644 index 0000000000..e64ce38b4f --- /dev/null +++ b/docs/content/docs/cookbook/advanced/serializable-steps.mdx @@ -0,0 +1,149 @@ +--- +title: Serializable Steps +description: Wrap non-serializable objects (like AI model providers) inside step functions so they can cross the workflow boundary. +type: guide +summary: Return a callback from a step to defer provider initialization, making non-serializable AI SDK models work inside durable workflows. +--- + + +This is an advanced guide. It dives into workflow internals and is not required reading to use workflow. + + +## The Problem + +Workflow functions run inside a sandboxed VM where every value that crosses a function boundary must be serializable (JSON-safe). AI SDK model providers — `openai("gpt-4o")`, `anthropic("claude-sonnet-4-20250514")`, etc. — return complex objects with methods, closures, and internal state. Passing one directly into a step causes a serialization error. + +```typescript lineNumbers +import { openai } from "@ai-sdk/openai"; +import { DurableAgent } from "@workflow/ai/agent"; +import { getWritable } from "workflow"; +import type { UIMessageChunk } from "ai"; + +export async function brokenAgent(prompt: string) { + "use workflow"; + + const writable = getWritable(); + const agent = new DurableAgent({ + // This fails — the model object is not serializable + model: openai("gpt-4o"), + }); + + await agent.stream({ messages: [{ role: "user", content: prompt }], writable }); +} +``` + +## The Solution: Step-as-Factory + +Instead of passing the model object, pass a **callback function** that returns the model. Marking that callback with `"use step"` tells the compiler to serialize the *function reference* (which is just a string identifier) rather than its return value. The provider is only instantiated at execution time, inside the step's full Node.js runtime. + +```typescript lineNumbers +import { openai as openaiProvider } from "@ai-sdk/openai"; + +// Returns a step function, not a model object +export function openai(...args: Parameters) { + return async () => { + "use step"; + return openaiProvider(...args); + }; +} +``` + +The `DurableAgent` receives a function (`() => Promise`) instead of a model object. When the agent needs to call the LLM, it invokes the factory inside a step where the real provider can be constructed with full Node.js access. + +## How `@workflow/ai` Uses This + +The `@workflow/ai` package ships pre-wrapped providers for all major AI SDK backends. Each one follows the same pattern: + +```typescript lineNumbers +// packages/ai/src/providers/anthropic.ts +import { anthropic as anthropicProvider } from "@ai-sdk/anthropic"; + +export function anthropic(...args: Parameters) { + return async () => { + "use step"; + return anthropicProvider(...args); + }; +} +``` + +This means you import from `@workflow/ai` instead of `@ai-sdk/*` directly: + +```typescript lineNumbers +import { anthropic } from "@workflow/ai/providers/anthropic"; +import { DurableAgent } from "@workflow/ai/agent"; +import { getWritable } from "workflow"; +import type { UIMessageChunk } from "ai"; + +export async function chatAgent(prompt: string) { + "use workflow"; + + const writable = getWritable(); + const agent = new DurableAgent({ + model: anthropic("claude-sonnet-4-20250514"), + }); + + await agent.stream({ messages: [{ role: "user", content: prompt }], writable }); +} +``` + +## Writing Your Own Serializable Wrapper + +Apply the same pattern to any non-serializable dependency. The key rule: **the outer function captures serializable arguments, and the inner `"use step"` function constructs the real object at runtime**. + +```typescript lineNumbers +import type { S3Client as S3ClientType } from "@aws-sdk/client-s3"; + +// The arguments (region, bucket) are plain strings — serializable +export function createS3Client(region: string) { + return async (): Promise => { + "use step"; + const { S3Client } = await import("@aws-sdk/client-s3"); + return new S3Client({ region }); + }; +} + +// Usage in a workflow +export async function processUpload(region: string, key: string) { + "use workflow"; + + const getClient = createS3Client(region); + // getClient is a serializable step reference, not an S3Client + await uploadFile(getClient, key); +} + +async function uploadFile( + getClient: () => Promise, + key: string +) { + "use step"; + const client = await getClient(); + // Now you have a real S3Client with full Node.js access + await client.send(/* ... */); +} +``` + +## Why This Works + +1. **Compiler transformation**: `"use step"` tells the SWC plugin to extract the function into a separate bundle. The workflow VM only sees a serializable reference (function ID + captured arguments). +2. **Closure tracking**: The compiler tracks which variables the step function closes over. Only serializable values (strings, numbers, plain objects) can be captured. +3. **Deferred construction**: The actual provider/client is only constructed when the step executes in the Node.js runtime — never in the sandboxed workflow VM. + +## Bundle optimization with dynamic imports + +Step functions run in full Node.js, so they can use `await import()` to load heavy dependencies on demand. This keeps the workflow bundle light -- the sandboxed workflow VM never needs to parse or load these libraries. + +```typescript +async function processWithHeavyLib(data: string) { + "use step"; + const { parse } = await import("heavy-parser-lib"); + return parse(data); +} +``` + +This is especially useful for large SDKs (AWS, Google Cloud, parser libraries) that would bloat the workflow bundle unnecessarily. The `createS3Client` example [above](#writing-your-own-serializable-wrapper) already uses this pattern with `await import("@aws-sdk/client-s3")`. + +## Key APIs + +- [`"use step"`](/docs/api-reference/workflow/use-step) — marks a function for extraction and serialization +- [`"use workflow"`](/docs/api-reference/workflow/use-workflow) — declares the orchestrator function +- [`DurableAgent`](/docs/api-reference/workflow-ai/durable-agent) — accepts a model factory for durable AI agent streaming diff --git a/docs/content/docs/cookbook/agent-patterns/durable-agent.mdx b/docs/content/docs/cookbook/agent-patterns/durable-agent.mdx new file mode 100644 index 0000000000..93bc9b2e82 --- /dev/null +++ b/docs/content/docs/cookbook/agent-patterns/durable-agent.mdx @@ -0,0 +1,191 @@ +--- +title: Durable Agent +description: Replace a stateless AI agent with a durable one that survives crashes, retries tool calls, and streams output. +type: guide +summary: Convert an AI SDK Agent into a DurableAgent backed by a workflow, with tools as retryable steps. +--- + +Use this pattern to make any AI SDK agent durable. The agent becomes a workflow, tools become steps, and the framework handles retries, streaming, and state persistence automatically. + +## Pattern + +Replace `Agent` with `DurableAgent`, wrap the function in `"use workflow"`, mark each tool with `"use step"`, and stream output through `getWritable()`. + +### Simplified + +```typescript lineNumbers +import { DurableAgent } from "@workflow/ai/agent"; +import { getWritable } from "workflow"; +import { z } from "zod"; +import type { ModelMessage, UIMessageChunk } from "ai"; + +declare function searchFlights(args: { from: string; to: string; date: string }): Promise<{ flights: { id: string; price: number }[] }>; // @setup +declare function bookFlight(args: { flightId: string; passenger: string }): Promise<{ confirmationId: string }>; // @setup + +export async function flightAgent(messages: ModelMessage[]) { + "use workflow"; + + const agent = new DurableAgent({ + model: "anthropic/claude-haiku-4.5", + instructions: "You are a helpful flight booking assistant.", + tools: { + searchFlights: { + description: "Search for available flights", + inputSchema: z.object({ + from: z.string(), + to: z.string(), + date: z.string(), + }), + execute: searchFlights, + }, + bookFlight: { + description: "Book a specific flight", + inputSchema: z.object({ + flightId: z.string(), + passenger: z.string(), + }), + execute: bookFlight, + }, + }, + }); + + await agent.stream({ + messages, + writable: getWritable(), + }); +} +``` + +### Full Implementation + +```typescript lineNumbers +import { DurableAgent } from "@workflow/ai/agent"; +import { getWritable } from "workflow"; +import { z } from "zod"; +import type { ModelMessage, UIMessageChunk } from "ai"; + +// Step: Search flights with full Node.js access and automatic retries +async function searchFlights({ + from, + to, + date, +}: { + from: string; + to: string; + date: string; +}) { + "use step"; + + const response = await fetch( + `https://api.example.com/flights?from=${from}&to=${to}&date=${date}` + ); + if (!response.ok) throw new Error(`Search failed: ${response.status}`); + return response.json(); +} + +// Step: Book a flight — retries on transient failures +async function bookFlight({ + flightId, + passenger, +}: { + flightId: string; + passenger: string; +}) { + "use step"; + + const response = await fetch("https://api.example.com/bookings", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ flightId, passenger }), + }); + if (!response.ok) throw new Error(`Booking failed: ${response.status}`); + return response.json(); +} + +// Step: Check flight status +async function checkStatus({ flightId }: { flightId: string }) { + "use step"; + + const response = await fetch( + `https://api.example.com/flights/${flightId}/status` + ); + return response.json(); +} + +export async function flightAgent(messages: ModelMessage[]) { + "use workflow"; + + const writable = getWritable(); + + const agent = new DurableAgent({ + model: "anthropic/claude-haiku-4.5", + instructions: "You are a helpful flight booking assistant.", + tools: { + searchFlights: { + description: "Search for available flights between two airports", + inputSchema: z.object({ + from: z.string().describe("Departure airport code"), + to: z.string().describe("Arrival airport code"), + date: z.string().describe("Travel date (YYYY-MM-DD)"), + }), + execute: searchFlights, + }, + bookFlight: { + description: "Book a specific flight for a passenger", + inputSchema: z.object({ + flightId: z.string().describe("Flight ID from search results"), + passenger: z.string().describe("Passenger full name"), + }), + execute: bookFlight, + }, + checkStatus: { + description: "Check the current status of a flight", + inputSchema: z.object({ + flightId: z.string().describe("Flight ID to check"), + }), + execute: checkStatus, + }, + }, + }); + + const result = await agent.stream({ + messages, + writable, + maxSteps: 10, + }); + + return { messages: result.messages }; +} +``` + +### API Route + +```typescript lineNumbers +import { createUIMessageStreamResponse } from "ai"; +import { start } from "workflow/api"; +import { flightAgent } from "@/workflows/flight-agent"; +import type { UIMessage } from "ai"; +import { convertToModelMessages } from "ai"; + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json(); + const modelMessages = await convertToModelMessages(messages); + + const run = await start(flightAgent, [modelMessages]); + + return createUIMessageStreamResponse({ + stream: run.readable, + headers: { + "x-workflow-run-id": run.runId, + }, + }); +} +``` + +## Key APIs + +- [`"use workflow"`](/docs/api-reference/workflow/use-workflow) — declares the orchestrator function +- [`"use step"`](/docs/api-reference/workflow/use-step) — declares step functions with retries and full Node.js access +- [`DurableAgent`](/docs/api-reference/workflow-ai/durable-agent) — durable wrapper around AI SDK's Agent +- [`getWritable()`](/docs/api-reference/workflow/get-writable) — streams agent output to the client +- [`start()`](/docs/api-reference/workflow-api/start) — starts a workflow run from an API route diff --git a/docs/content/docs/cookbook/agent-patterns/human-in-the-loop.mdx b/docs/content/docs/cookbook/agent-patterns/human-in-the-loop.mdx new file mode 100644 index 0000000000..90d131b5ad --- /dev/null +++ b/docs/content/docs/cookbook/agent-patterns/human-in-the-loop.mdx @@ -0,0 +1,278 @@ +--- +title: Human-in-the-Loop +description: Pause an AI agent to wait for human approval, then resume based on the decision. +type: guide +summary: Use defineHook with the tool call ID to suspend an agent for human approval, with an optional timeout. +--- + +Use this pattern when an AI agent needs human confirmation before performing a consequential action like booking, purchasing, or publishing. The workflow suspends without consuming resources until the human responds. + +## Pattern + +Create a typed hook using `defineHook()`. When the agent calls the approval tool, the tool creates a hook instance using the tool call ID as the token, then awaits it. The UI renders approval controls, and an API route resumes the hook with the decision. + +### Simplified + +```typescript lineNumbers +import { DurableAgent } from "@workflow/ai/agent"; +import { defineHook, sleep, getWritable } from "workflow"; +import { z } from "zod"; +import type { ModelMessage, UIMessageChunk } from "ai"; + +export const bookingApprovalHook = defineHook({ + schema: z.object({ + approved: z.boolean(), + comment: z.string().optional(), + }), +}); + +declare function confirmBooking(args: { flightId: string; passenger: string }): Promise<{ confirmationId: string }>; // @setup + +// This tool runs at the workflow level (no "use step") because hooks are workflow primitives +async function requestBookingApproval( + { flightId, passenger, price }: { flightId: string; passenger: string; price: number }, + { toolCallId }: { toolCallId: string } +) { + const hook = bookingApprovalHook.create({ token: toolCallId }); + + const result = await Promise.race([ + hook.then((payload) => ({ type: "decision" as const, ...payload })), + sleep("24h").then(() => ({ type: "timeout" as const, approved: false })), + ]); + + if (result.type === "timeout") return "Booking request expired after 24 hours."; + if (!result.approved) return `Booking rejected: ${result.comment || "No reason given"}`; + + const booking = await confirmBooking({ flightId, passenger }); + return `Booked! Confirmation: ${booking.confirmationId}`; +} + +export async function bookingAgent(messages: ModelMessage[]) { + "use workflow"; + + const agent = new DurableAgent({ + model: "anthropic/claude-haiku-4.5", + instructions: "You help book flights. Always request approval before booking.", + tools: { + requestBookingApproval: { + description: "Request human approval before booking a flight", + inputSchema: z.object({ + flightId: z.string(), + passenger: z.string(), + price: z.number(), + }), + execute: requestBookingApproval, + }, + }, + }); + + await agent.stream({ + messages, + writable: getWritable(), + }); +} +``` + +### Full Implementation + +```typescript lineNumbers +import { DurableAgent } from "@workflow/ai/agent"; +import { defineHook, sleep, getWritable } from "workflow"; +import { z } from "zod"; +import type { ModelMessage, UIMessageChunk } from "ai"; + +// Define the approval hook with schema validation +export const bookingApprovalHook = defineHook({ + schema: z.object({ + approved: z.boolean(), + comment: z.string().optional(), + }), +}); + +// Step: Search for flights (full Node.js access, automatic retries) +async function searchFlights({ + from, + to, + date, +}: { + from: string; + to: string; + date: string; +}) { + "use step"; + + // Your real flight search API call here + await new Promise((resolve) => setTimeout(resolve, 500)); + return { + flights: [ + { id: "FL-100", airline: "Example Air", price: 299, from, to, date }, + { id: "FL-200", airline: "Demo Airlines", price: 349, from, to, date }, + ], + }; +} + +// Step: Confirm the booking after approval +async function confirmBooking({ + flightId, + passenger, +}: { + flightId: string; + passenger: string; +}) { + "use step"; + + await new Promise((resolve) => setTimeout(resolve, 500)); + return { confirmationId: `CONF-${flightId}-${Date.now().toString(36)}` }; +} + +// Workflow-level tool: hooks must be created in workflow context, not inside steps +async function requestBookingApproval( + { + flightId, + passenger, + price, + }: { flightId: string; passenger: string; price: number }, + { toolCallId }: { toolCallId: string } +) { + // No "use step" — hooks are workflow-level primitives + + const hook = bookingApprovalHook.create({ token: toolCallId }); + + // Race: human approval vs. 24-hour timeout + const result = await Promise.race([ + hook.then((payload) => ({ type: "decision" as const, ...payload })), + sleep("24h").then(() => ({ type: "timeout" as const, approved: false })), + ]); + + if (result.type === "timeout") { + return "Booking request expired after 24 hours."; + } + + if (!result.approved) { + return `Booking rejected: ${result.comment || "No reason given"}`; + } + + // Approved — proceed with booking + const booking = await confirmBooking({ flightId, passenger }); + return `Flight ${flightId} booked for ${passenger}. Confirmation: ${booking.confirmationId}`; +} + +export async function bookingAgent(messages: ModelMessage[]) { + "use workflow"; + + const writable = getWritable(); + + const agent = new DurableAgent({ + model: "anthropic/claude-haiku-4.5", + instructions: + "You are a flight booking assistant. Search for flights, then request approval before booking.", + tools: { + searchFlights: { + description: "Search for available flights", + inputSchema: z.object({ + from: z.string().describe("Departure airport code"), + to: z.string().describe("Arrival airport code"), + date: z.string().describe("Travel date (YYYY-MM-DD)"), + }), + execute: searchFlights, + }, + requestBookingApproval: { + description: "Request human approval before booking a flight", + inputSchema: z.object({ + flightId: z.string().describe("Flight ID to book"), + passenger: z.string().describe("Passenger name"), + price: z.number().describe("Total price"), + }), + execute: requestBookingApproval, + }, + }, + }); + + await agent.stream({ messages, writable }); +} +``` + +### API Route for Approvals + +```typescript lineNumbers +import { bookingApprovalHook } from "@/workflows/booking-agent"; + +export async function POST(request: Request) { + const { toolCallId, approved, comment } = await request.json(); + + // Schema validation happens automatically via defineHook + await bookingApprovalHook.resume(toolCallId, { approved, comment }); + + return Response.json({ success: true }); +} +``` + +### Approval Component + +```tsx lineNumbers +"use client"; + +import { useState } from "react"; + +export function BookingApproval({ + toolCallId, + input, + output, +}: { + toolCallId: string; + input?: { flightId: string; passenger: string; price: number }; + output?: string; +}) { + const [comment, setComment] = useState(""); + const [isSubmitting, setIsSubmitting] = useState(false); + + if (output) { + return

{output}

; + } + + const handleSubmit = async (approved: boolean) => { + setIsSubmitting(true); + await fetch("/api/hooks/approval", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ toolCallId, approved, comment }), + }); + setIsSubmitting(false); + }; + + return ( +
+ {input && ( +
+
Flight: {input.flightId}
+
Passenger: {input.passenger}
+
Price: ${input.price}
+
+ )} +