From fcec8fbbf7c383f13b5220ceadcfc795bf0ded92 Mon Sep 17 00:00:00 2001 From: Catherine Han Date: Fri, 6 Mar 2026 15:38:35 +1100 Subject: [PATCH 1/3] feat: add flowstudio-power-automate-debug and flowstudio-power-automate-build skills Two companion skills for the FlowStudio Power Automate MCP server: - flowstudio-power-automate-debug: Debug workflow for failed Power Automate cloud flow runs - flowstudio-power-automate-build: Build & deploy flows from natural language descriptions Both require a FlowStudio MCP subscription: https://flowstudio.app These complement the existing flowstudio-power-automate-mcp skill (merged in PR #896). --- .../flowstudio-power-automate-build/SKILL.md | 460 +++++++++++ .../references/action-patterns-connectors.md | 542 +++++++++++++ .../references/action-patterns-core.md | 542 +++++++++++++ .../references/action-patterns-data.md | 734 ++++++++++++++++++ .../references/build-patterns.md | 108 +++ .../references/flow-schema.md | 225 ++++++ .../references/trigger-types.md | 211 +++++ .../flowstudio-power-automate-debug/SKILL.md | 316 ++++++++ .../references/common-errors.md | 188 +++++ .../references/debug-workflow.md | 157 ++++ 10 files changed, 3483 insertions(+) create mode 100644 skills/flowstudio-power-automate-build/SKILL.md create mode 100644 skills/flowstudio-power-automate-build/references/action-patterns-connectors.md create mode 100644 skills/flowstudio-power-automate-build/references/action-patterns-core.md create mode 100644 skills/flowstudio-power-automate-build/references/action-patterns-data.md create mode 100644 skills/flowstudio-power-automate-build/references/build-patterns.md create mode 100644 skills/flowstudio-power-automate-build/references/flow-schema.md create mode 100644 skills/flowstudio-power-automate-build/references/trigger-types.md create mode 100644 skills/flowstudio-power-automate-debug/SKILL.md create mode 100644 skills/flowstudio-power-automate-debug/references/common-errors.md create mode 100644 skills/flowstudio-power-automate-debug/references/debug-workflow.md diff --git a/skills/flowstudio-power-automate-build/SKILL.md b/skills/flowstudio-power-automate-build/SKILL.md new file mode 100644 index 000000000..86da0a7ba --- /dev/null +++ b/skills/flowstudio-power-automate-build/SKILL.md @@ -0,0 +1,460 @@ +--- +name: flowstudio-power-automate-build +description: >- + Build, scaffold, and deploy Power Automate cloud flows using the FlowStudio + MCP server. Load this skill when asked to: create a flow, build a new flow, + deploy a flow definition, scaffold a Power Automate workflow, construct a flow + JSON, update an existing flow's actions, patch a flow definition, add actions + to a flow, wire up connections, or generate a workflow definition from scratch. + Requires a FlowStudio MCP subscription — see https://mcp.flowstudio.app +--- + +# Build & Deploy Power Automate Flows with FlowStudio MCP + +Step-by-step guide for constructing and deploying Power Automate cloud flows +programmatically through the FlowStudio MCP server. + +**Prerequisite**: A FlowStudio MCP server must be reachable with a valid JWT. +See the `power-automate-mcp` skill for connection setup. +Subscribe at https://mcp.flowstudio.app + +--- + +## Source of Truth + +> **Always call `tools/list` first** to confirm available tool names and their +> parameter schemas. Tool names and parameters may change between server versions. +> This skill covers response shapes, behavioral notes, and build patterns — +> things `tools/list` cannot tell you. If this document disagrees with `tools/list` +> or a real API response, the API wins. + +--- + +## Python Helper + +```python +import json, urllib.request + +MCP_URL = "https://mcp.flowstudio.app/mcp" +MCP_TOKEN = "" + +def mcp(tool, **kwargs): + payload = json.dumps({"jsonrpc": "2.0", "id": 1, "method": "tools/call", + "params": {"name": tool, "arguments": kwargs}}).encode() + req = urllib.request.Request(MCP_URL, data=payload, + headers={"x-api-key": MCP_TOKEN, "Content-Type": "application/json", + "User-Agent": "FlowStudio-MCP/1.0"}) + try: + resp = urllib.request.urlopen(req, timeout=120) + except urllib.error.HTTPError as e: + body = e.read().decode("utf-8", errors="replace") + raise RuntimeError(f"MCP HTTP {e.code}: {body[:200]}") from e + raw = json.loads(resp.read()) + if "error" in raw: + raise RuntimeError(f"MCP error: {json.dumps(raw['error'])}") + return json.loads(raw["result"]["content"][0]["text"]) + +ENV = "" # e.g. Default-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +``` + +--- + +## Step 1 — Safety Check: Does the Flow Already Exist? + +Always look before you build to avoid duplicates: + +```python +results = mcp("list_store_flows", + environmentName=ENV, searchTerm="My New Flow") + +# list_store_flows returns a direct array (no wrapper object) +if len(results) > 0: + # Flow exists — modify rather than create + # id format is "envId.flowId" — split to get the flow UUID + FLOW_ID = results[0]["id"].split(".", 1)[1] + print(f"Existing flow: {FLOW_ID}") + defn = mcp("get_live_flow", environmentName=ENV, flowName=FLOW_ID) +else: + print("Flow not found — building from scratch") + FLOW_ID = None +``` + +--- + +## Step 2 — Obtain Connection References + +Every connector action needs a `connectionName` that points to a key in the +flow's `connectionReferences` map. That key links to an authenticated connection +in the environment. + +> **MANDATORY**: You MUST call `list_live_connections` first — do NOT ask the +> user for connection names or GUIDs. The API returns the exact values you need. +> Only prompt the user if the API confirms that required connections are missing. + +### 2a — Always call `list_live_connections` first + +```python +conns = mcp("list_live_connections", environmentName=ENV) + +# Filter to connected (authenticated) connections only +active = [c for c in conns["connections"] + if c["statuses"][0]["status"] == "Connected"] + +# Build a lookup: connectorName → connectionName (id) +conn_map = {} +for c in active: + conn_map[c["connectorName"]] = c["id"] + +print(f"Found {len(active)} active connections") +print("Available connectors:", list(conn_map.keys())) +``` + +### 2b — Determine which connectors the flow needs + +Based on the flow you are building, identify which connectors are required. +Common connector API names: + +| Connector | API name | +|---|---| +| SharePoint | `shared_sharepointonline` | +| Outlook / Office 365 | `shared_office365` | +| Teams | `shared_teams` | +| Approvals | `shared_approvals` | +| OneDrive for Business | `shared_onedriveforbusiness` | +| Excel Online (Business) | `shared_excelonlinebusiness` | +| Dataverse | `shared_commondataserviceforapps` | +| Microsoft Forms | `shared_microsoftforms` | + +> **Flows that need NO connections** (e.g. Recurrence + Compose + HTTP only) +> can skip the rest of Step 2 — omit `connectionReferences` from the deploy call. + +### 2c — If connections are missing, guide the user + +```python +connectors_needed = ["shared_sharepointonline", "shared_office365"] # adjust per flow + +missing = [c for c in connectors_needed if c not in conn_map] + +if not missing: + print("✅ All required connections are available — proceeding to build") +else: + # ── STOP: connections must be created interactively ── + # Connections require OAuth consent in a browser — no API can create them. + print("⚠️ The following connectors have no active connection in this environment:") + for c in missing: + friendly = c.replace("shared_", "").replace("onlinebusiness", " Online (Business)") + print(f" • {friendly} (API name: {c})") + print() + print("Please create the missing connections:") + print(" 1. Open https://make.powerautomate.com/connections") + print(" 2. Select the correct environment from the top-right picker") + print(" 3. Click '+ New connection' for each missing connector listed above") + print(" 4. Sign in and authorize when prompted") + print(" 5. Tell me when done — I will re-check and continue building") + # DO NOT proceed to Step 3 until the user confirms. + # After user confirms, re-run Step 2a to refresh conn_map. +``` + +### 2d — Build the connectionReferences block + +Only execute this after 2c confirms no missing connectors: + +```python +connection_references = {} +for connector in connectors_needed: + connection_references[connector] = { + "connectionName": conn_map[connector], # the GUID from list_live_connections + "source": "Invoker", + "id": f"/providers/Microsoft.PowerApps/apis/{connector}" + } +``` + +> **IMPORTANT — `host.connectionName` in actions**: When building actions in +> Step 3, set `host.connectionName` to the **key** from this map (e.g. +> `shared_teams`), NOT the connection GUID. The GUID only goes inside the +> `connectionReferences` entry. The engine matches the action's +> `host.connectionName` to the key to find the right connection. + +> **Alternative** — if you already have a flow using the same connectors, +> you can extract `connectionReferences` from its definition: +> ```python +> ref_flow = mcp("get_live_flow", environmentName=ENV, flowName="") +> connection_references = ref_flow["properties"]["connectionReferences"] +> ``` + +See the `power-automate-mcp` skill's **connection-references.md** reference +for the full connection reference structure. + +--- + +## Step 3 — Build the Flow Definition + +Construct the definition object. See [flow-schema.md](references/flow-schema.md) +for the full schema and these action pattern references for copy-paste templates: +- [action-patterns-core.md](references/action-patterns-core.md) — Variables, control flow, expressions +- [action-patterns-data.md](references/action-patterns-data.md) — Array transforms, HTTP, parsing +- [action-patterns-connectors.md](references/action-patterns-connectors.md) — SharePoint, Outlook, Teams, Approvals + +```python +definition = { + "$schema": "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#", + "contentVersion": "1.0.0.0", + "triggers": { ... }, # see trigger-types.md / build-patterns.md + "actions": { ... } # see ACTION-PATTERNS-*.md / build-patterns.md +} +``` + +> See [build-patterns.md](references/build-patterns.md) for complete, ready-to-use +> flow definitions covering Recurrence+SharePoint+Teams, HTTP triggers, and more. + +--- + +## Step 4 — Deploy (Create or Update) + +`update_live_flow` handles both creation and updates in a single tool. + +### Create a new flow (no existing flow) + +Omit `flowName` — the server generates a new GUID and creates via PUT: + +```python +result = mcp("update_live_flow", + environmentName=ENV, + # flowName omitted → creates a new flow + definition=definition, + connectionReferences=connection_references, + displayName="Overdue Invoice Notifications", + description="Weekly SharePoint → Teams notification flow, built by agent" +) + +if result.get("error") is not None: + print("Create failed:", result["error"]) +else: + # Capture the new flow ID for subsequent steps + FLOW_ID = result["created"] + print(f"✅ Flow created: {FLOW_ID}") +``` + +### Update an existing flow + +Provide `flowName` to PATCH: + +```python +result = mcp("update_live_flow", + environmentName=ENV, + flowName=FLOW_ID, + definition=definition, + connectionReferences=connection_references, + displayName="My Updated Flow", + description="Updated by agent on " + __import__('datetime').datetime.utcnow().isoformat() +) + +if result.get("error") is not None: + print("Update failed:", result["error"]) +else: + print("Update succeeded:", result) +``` + +> ⚠️ `update_live_flow` always returns an `error` key. +> `null` (Python `None`) means success — do not treat the presence of the key as failure. +> +> ⚠️ `description` is required for both create and update. + +### Common deployment errors + +| Error message (contains) | Cause | Fix | +|---|---|---| +| `missing from connectionReferences` | An action's `host.connectionName` references a key that doesn't exist in the `connectionReferences` map | Ensure `host.connectionName` uses the **key** from `connectionReferences` (e.g. `shared_teams`), not the raw GUID | +| `ConnectionAuthorizationFailed` / 403 | The connection GUID belongs to another user or is not authorized | Re-run Step 2a and use a connection owned by the current `x-api-key` user | +| `InvalidTemplate` / `InvalidDefinition` | Syntax error in the definition JSON | Check `runAfter` chains, expression syntax, and action type spelling | +| `ConnectionNotConfigured` | A connector action exists but the connection GUID is invalid or expired | Re-check `list_live_connections` for a fresh GUID | + +--- + +## Step 6 — Verify the Deployment + +```python +check = mcp("get_live_flow", environmentName=ENV, flowName=FLOW_ID) + +# Confirm state +print("State:", check["properties"]["state"]) # Should be "Started" + +# Confirm the action we added is there +acts = check["properties"]["definition"]["actions"] +print("Actions:", list(acts.keys())) +``` + +--- + +## Step 7 — Test the Flow + +> **MANDATORY**: Before triggering any test run, **ask the user for confirmation**. +> Running a flow has real side effects — it may send emails, post Teams messages, +> write to SharePoint, start approvals, or call external APIs. Explain what the +> flow will do and wait for explicit approval before calling `trigger_live_flow` +> or `resubmit_live_flow_run`. + +### Updated flows (have prior runs) + +The fastest path — resubmit the most recent run: + +```python +runs = mcp("get_live_flow_runs", environmentName=ENV, flowName=FLOW_ID, top=1) +if runs: + result = mcp("resubmit_live_flow_run", + environmentName=ENV, flowName=FLOW_ID, runName=runs[0]["name"]) + print(result) +``` + +### Flows already using an HTTP trigger + +Fire directly with a test payload: + +```python +schema = mcp("get_live_flow_http_schema", + environmentName=ENV, flowName=FLOW_ID) +print("Expected body:", schema.get("triggerSchema")) + +result = mcp("trigger_live_flow", + environmentName=ENV, flowName=FLOW_ID, + body={"name": "Test", "value": 1}) +print(f"Status: {result['status']}") +``` + +### Brand-new non-HTTP flows (Recurrence, connector triggers, etc.) + +A brand-new Recurrence or connector-triggered flow has no runs to resubmit +and no HTTP endpoint to call. **Deploy with a temporary HTTP trigger first, +test the actions, then swap to the production trigger.** + +#### 7a — Save the real trigger, deploy with a temporary HTTP trigger + +```python +# Save the production trigger you built in Step 3 +production_trigger = definition["triggers"] + +# Replace with a temporary HTTP trigger +definition["triggers"] = { + "manual": { + "type": "Request", + "kind": "Http", + "inputs": { + "schema": {} + } + } +} + +# Deploy (create or update) with the temp trigger +result = mcp("update_live_flow", + environmentName=ENV, + flowName=FLOW_ID, # omit if creating new + definition=definition, + connectionReferences=connection_references, + displayName="Overdue Invoice Notifications", + description="Deployed with temp HTTP trigger for testing") + +if result.get("error") is not None: + print("Deploy failed:", result["error"]) +else: + if not FLOW_ID: + FLOW_ID = result["created"] + print(f"✅ Deployed with temp HTTP trigger: {FLOW_ID}") +``` + +#### 7b — Fire the flow and check the result + +```python +# Trigger the flow +test = mcp("trigger_live_flow", + environmentName=ENV, flowName=FLOW_ID) +print(f"Trigger response status: {test['status']}") + +# Wait for the run to complete +import time; time.sleep(15) + +# Check the run result +runs = mcp("get_live_flow_runs", + environmentName=ENV, flowName=FLOW_ID, top=1) +run = runs[0] +print(f"Run {run['name']}: {run['status']}") + +if run["status"] == "Failed": + err = mcp("get_live_flow_run_error", + environmentName=ENV, flowName=FLOW_ID, runName=run["name"]) + root = err["failedActions"][-1] + print(f"Root cause: {root['actionName']} → {root.get('code')}") + # Debug and fix the definition before proceeding + # See power-automate-debug skill for full diagnosis workflow +``` + +#### 7c — Swap to the production trigger + +Once the test run succeeds, replace the temporary HTTP trigger with the real one: + +```python +# Restore the production trigger +definition["triggers"] = production_trigger + +result = mcp("update_live_flow", + environmentName=ENV, + flowName=FLOW_ID, + definition=definition, + connectionReferences=connection_references, + description="Swapped to production trigger after successful test") + +if result.get("error") is not None: + print("Trigger swap failed:", result["error"]) +else: + print("✅ Production trigger deployed — flow is live") +``` + +> **Why this works**: The trigger is just the entry point — the actions are +> identical regardless of how the flow starts. Testing via HTTP trigger +> exercises all the same Compose, SharePoint, Teams, etc. actions. +> +> **Connector triggers** (e.g. "When an item is created in SharePoint"): +> If actions reference `triggerBody()` or `triggerOutputs()`, pass a +> representative test payload in `trigger_live_flow`'s `body` parameter +> that matches the shape the connector trigger would produce. + +--- + +## Gotchas + +| Mistake | Consequence | Prevention | +|---|---|---| +| Missing `connectionReferences` in deploy | 400 "Supply connectionReferences" | Always call `list_live_connections` first | +| `"operationOptions"` missing on Foreach | Parallel execution, race conditions on writes | Always add `"Sequential"` | +| `union(old_data, new_data)` | Old values override new (first-wins) | Use `union(new_data, old_data)` | +| `split()` on potentially-null string | `InvalidTemplate` crash | Wrap with `coalesce(field, '')` | +| Checking `result["error"]` exists | Always present; true error is `!= null` | Use `result.get("error") is not None` | +| Flow deployed but state is "Stopped" | Flow won't run on schedule | Check connection auth; re-enable | +| Teams "Chat with Flow bot" recipient as object | 400 `GraphUserDetailNotFound` | Use plain string with trailing semicolon (see below) | + +### Teams `PostMessageToConversation` — Recipient Formats + +The `body/recipient` parameter format depends on the `location` value: + +| Location | `body/recipient` format | Example | +|---|---|---| +| **Chat with Flow bot** | Plain email string with **trailing semicolon** | `"user@contoso.com;"` | +| **Channel** | Object with `groupId` and `channelId` | `{"groupId": "...", "channelId": "..."}` | + +> **Common mistake**: passing `{"to": "user@contoso.com"}` for "Chat with Flow bot" +> returns a 400 `GraphUserDetailNotFound` error. The API expects a plain string. + +--- + +## Reference Files + +- [flow-schema.md](references/flow-schema.md) — Full flow definition JSON schema +- [trigger-types.md](references/trigger-types.md) — Trigger type templates +- [action-patterns-core.md](references/action-patterns-core.md) — Variables, control flow, expressions +- [action-patterns-data.md](references/action-patterns-data.md) — Array transforms, HTTP, parsing +- [action-patterns-connectors.md](references/action-patterns-connectors.md) — SharePoint, Outlook, Teams, Approvals +- [build-patterns.md](references/build-patterns.md) — Complete flow definition templates (Recurrence+SP+Teams, HTTP trigger) + +## Related Skills + +- `power-automate-mcp` — Core connection setup and tool reference +- `power-automate-debug` — Debug failing flows after deployment diff --git a/skills/flowstudio-power-automate-build/references/action-patterns-connectors.md b/skills/flowstudio-power-automate-build/references/action-patterns-connectors.md new file mode 100644 index 000000000..d9102d6de --- /dev/null +++ b/skills/flowstudio-power-automate-build/references/action-patterns-connectors.md @@ -0,0 +1,542 @@ +# FlowStudio MCP — Action Patterns: Connectors + +SharePoint, Outlook, Teams, and Approvals connector action patterns. + +> All examples assume `"runAfter"` is set appropriately. +> Replace `` with the **key** you used in `connectionReferences` +> (e.g. `shared_sharepointonline`, `shared_teams`). This is NOT the connection +> GUID — it is the logical reference name that links the action to its entry in +> the `connectionReferences` map. + +--- + +## SharePoint + +### SharePoint — Get Items + +```json +"Get_SP_Items": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", + "operationId": "GetItems" + }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "table": "MyList", + "$filter": "Status eq 'Active'", + "$top": 500 + } + } +} +``` + +Result reference: `@outputs('Get_SP_Items')?['body/value']` + +> **Dynamic OData filter with string interpolation**: inject a runtime value +> directly into the `$filter` string using `@{...}` syntax: +> ``` +> "$filter": "Title eq '@{outputs('ConfirmationCode')}'" +> ``` +> Note the single-quotes inside double-quotes — correct OData string literal +> syntax. Avoids a separate variable action. + +> **Pagination for large lists**: by default, GetItems stops at `$top`. To auto-paginate +> beyond that, enable the pagination policy on the action. In the flow definition this +> appears as: +> ```json +> "paginationPolicy": { "minimumItemCount": 10000 } +> ``` +> Set `minimumItemCount` to the maximum number of items you expect. The connector will +> keep fetching pages until that count is reached or the list is exhausted. Without this, +> flows silently return a capped result on lists with >5,000 items. + +--- + +### SharePoint — Get Item (Single Row by ID) + +```json +"Get_SP_Item": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", + "operationId": "GetItem" + }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "table": "MyList", + "id": "@triggerBody()?['ID']" + } + } +} +``` + +Result reference: `@body('Get_SP_Item')?['FieldName']` + +> Use `GetItem` (not `GetItems` with a filter) when you already have the ID. +> Re-fetching after a trigger gives you the **current** row state, not the +> snapshot captured at trigger time — important if another process may have +> modified the item since the flow started. + +--- + +### SharePoint — Create Item + +```json +"Create_SP_Item": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", + "operationId": "PostItem" + }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "table": "MyList", + "item/Title": "@variables('myTitle')", + "item/Status": "Active" + } + } +} +``` + +--- + +### SharePoint — Update Item + +```json +"Update_SP_Item": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", + "operationId": "PatchItem" + }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "table": "MyList", + "id": "@item()?['ID']", + "item/Status": "Processed" + } + } +} +``` + +--- + +### SharePoint — File Upsert (Create or Overwrite in Document Library) + +SharePoint's `CreateFile` fails if the file already exists. To upsert (create or overwrite) +without a prior existence check, use `GetFileMetadataByPath` on **both Succeeded and Failed** +from `CreateFile` — if create failed because the file exists, the metadata call still +returns its ID, which `UpdateFile` can then overwrite: + +```json +"Create_File": { + "type": "OpenApiConnection", + "inputs": { + "host": { "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", "operationId": "CreateFile" }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "folderPath": "/My Library/Subfolder", + "name": "@{variables('filename')}", + "body": "@outputs('Compose_File_Content')" + } + } +}, +"Get_File_Metadata_By_Path": { + "type": "OpenApiConnection", + "runAfter": { "Create_File": ["Succeeded", "Failed"] }, + "inputs": { + "host": { "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", "operationId": "GetFileMetadataByPath" }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "path": "/My Library/Subfolder/@{variables('filename')}" + } + } +}, +"Update_File": { + "type": "OpenApiConnection", + "runAfter": { "Get_File_Metadata_By_Path": ["Succeeded", "Skipped"] }, + "inputs": { + "host": { "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", "operationId": "UpdateFile" }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "id": "@outputs('Get_File_Metadata_By_Path')?['body/{Identifier}']", + "body": "@outputs('Compose_File_Content')" + } + } +} +``` + +> If `Create_File` succeeds, `Get_File_Metadata_By_Path` is `Skipped` and `Update_File` +> still fires (accepting `Skipped`), harmlessly overwriting the file just created. +> If `Create_File` fails (file exists), the metadata call retrieves the existing file's ID +> and `Update_File` overwrites it. Either way you end with the latest content. +> +> **Document library system properties** — when iterating a file library result (e.g. +> from `ListFolder` or `GetFilesV2`), use curly-brace property names to access +> SharePoint's built-in file metadata. These are different from list field names: +> ``` +> @item()?['{Name}'] — filename without path (e.g. "report.csv") +> @item()?['{FilenameWithExtension}'] — same as {Name} in most connectors +> @item()?['{Identifier}'] — internal file ID for use in UpdateFile/DeleteFile +> @item()?['{FullPath}'] — full server-relative path +> @item()?['{IsFolder}'] — boolean, true for folder entries +> ``` + +--- + +### SharePoint — GetItemChanges Column Gate + +When a SharePoint "item modified" trigger fires, it doesn't tell you WHICH +column changed. Use `GetItemChanges` to get per-column change flags, then gate +downstream logic on specific columns: + +```json +"Get_Changes": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", + "operationId": "GetItemChanges" + }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "table": "", + "id": "@triggerBody()?['ID']", + "since": "@triggerBody()?['Modified']", + "includeDrafts": false + } + } +} +``` + +Gate on a specific column: + +```json +"expression": { + "and": [{ + "equals": [ + "@body('Get_Changes')?['Column']?['hasChanged']", + true + ] + }] +} +``` + +> **New-item detection:** On the very first modification (version 1.0), +> `GetItemChanges` may report no prior version. Check +> `@equals(triggerBody()?['OData__UIVersionString'], '1.0')` to detect +> newly created items and skip change-gate logic for those. + +--- + +### SharePoint — REST MERGE via HttpRequest + +For cross-list updates or advanced operations not supported by the standard +Update Item connector (e.g., updating a list in a different site), use the +SharePoint REST API via the `HttpRequest` operation: + +```json +"Update_Cross_List_Item": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", + "operationId": "HttpRequest" + }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/target-site", + "parameters/method": "POST", + "parameters/uri": "/_api/web/lists(guid'')/items(@{variables('ItemId')})", + "parameters/headers": { + "Accept": "application/json;odata=nometadata", + "Content-Type": "application/json;odata=nometadata", + "X-HTTP-Method": "MERGE", + "IF-MATCH": "*" + }, + "parameters/body": "{ \"Title\": \"@{variables('NewTitle')}\", \"Status\": \"@{variables('NewStatus')}\" }" + } + } +} +``` + +> **Key headers:** +> - `X-HTTP-Method: MERGE` — tells SharePoint to do a partial update (PATCH semantics) +> - `IF-MATCH: *` — overwrites regardless of current ETag (no conflict check) +> +> The `HttpRequest` operation reuses the existing SharePoint connection — no extra +> authentication needed. Use this when the standard Update Item connector can't +> reach the target list (different site collection, or you need raw REST control). + +--- + +### SharePoint — File as JSON Database (Read + Parse) + +Use a SharePoint document library JSON file as a queryable "database" of +last-known-state records. A separate process (e.g., Power BI dataflow) maintains +the file; the flow downloads and filters it for before/after comparisons. + +```json +"Get_File": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", + "operationId": "GetFileContent" + }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "id": "%252fShared%2bDocuments%252fdata.json", + "inferContentType": false + } + } +}, +"Parse_JSON_File": { + "type": "Compose", + "runAfter": { "Get_File": ["Succeeded"] }, + "inputs": "@json(decodeBase64(body('Get_File')?['$content']))" +}, +"Find_Record": { + "type": "Query", + "runAfter": { "Parse_JSON_File": ["Succeeded"] }, + "inputs": { + "from": "@outputs('Parse_JSON_File')", + "where": "@equals(item()?['id'], variables('RecordId'))" + } +} +``` + +> **Decode chain:** `GetFileContent` returns base64-encoded content in +> `body(...)?['$content']`. Apply `decodeBase64()` then `json()` to get a +> usable array. `Filter Array` then acts as a WHERE clause. +> +> **When to use:** When you need a lightweight "before" snapshot to detect field +> changes from a webhook payload (the "after" state). Simpler than maintaining +> a full SharePoint list mirror — works well for up to ~10K records. +> +> **File path encoding:** In the `id` parameter, SharePoint URL-encodes paths +> twice. Spaces become `%2b` (plus sign), slashes become `%252f`. + +--- + +## Outlook + +### Outlook — Send Email + +```json +"Send_Email": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_office365", + "connectionName": "", + "operationId": "SendEmailV2" + }, + "parameters": { + "emailMessage/To": "recipient@contoso.com", + "emailMessage/Subject": "Automated notification", + "emailMessage/Body": "

@{outputs('Compose_Message')}

", + "emailMessage/IsHtml": true + } + } +} +``` + +--- + +### Outlook — Get Emails (Read Template from Folder) + +```json +"Get_Email_Template": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_office365", + "connectionName": "", + "operationId": "GetEmailsV3" + }, + "parameters": { + "folderPath": "Id::", + "fetchOnlyUnread": false, + "includeAttachments": false, + "top": 1, + "importance": "Any", + "fetchOnlyWithAttachment": false, + "subjectFilter": "My Email Template Subject" + } + } +} +``` + +Access subject and body: +``` +@first(outputs('Get_Email_Template')?['body/value'])?['subject'] +@first(outputs('Get_Email_Template')?['body/value'])?['body'] +``` + +> **Outlook-as-CMS pattern**: store a template email in a dedicated Outlook folder. +> Set `fetchOnlyUnread: false` so the template persists after first use. +> Non-technical users can update subject and body by editing that email — +> no flow changes required. Pass subject and body directly into `SendEmailV2`. +> +> To get a folder ID: in Outlook on the web, right-click the folder → open in +> new tab — the folder GUID is in the URL. Prefix it with `Id::` in `folderPath`. + +--- + +## Teams + +### Teams — Post Message + +```json +"Post_Teams_Message": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_teams", + "connectionName": "", + "operationId": "PostMessageToConversation" + }, + "parameters": { + "poster": "Flow bot", + "location": "Channel", + "body/recipient": { + "groupId": "", + "channelId": "" + }, + "body/messageBody": "@outputs('Compose_Message')" + } + } +} +``` + +#### Variant: Group Chat (1:1 or Multi-Person) + +To post to a group chat instead of a channel, use `"location": "Group chat"` with +a thread ID as the recipient: + +```json +"Post_To_Group_Chat": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_teams", + "connectionName": "", + "operationId": "PostMessageToConversation" + }, + "parameters": { + "poster": "Flow bot", + "location": "Group chat", + "body/recipient": "19:@thread.v2", + "body/messageBody": "@outputs('Compose_Message')" + } + } +} +``` + +For 1:1 ("Chat with Flow bot"), use `"location": "Chat with Flow bot"` and set +`body/recipient` to the user's email address. + +> **Active-user gate:** When sending notifications in a loop, check the recipient's +> Azure AD account is enabled before posting — avoids failed deliveries to departed +> staff: +> ```json +> "Check_User_Active": { +> "type": "OpenApiConnection", +> "inputs": { +> "host": { "apiId": "/providers/Microsoft.PowerApps/apis/shared_office365users", +> "operationId": "UserProfile_V2" }, +> "parameters": { "id": "@{item()?['Email']}" } +> } +> } +> ``` +> Then gate: `@equals(body('Check_User_Active')?['accountEnabled'], true)` + +--- + +## Approvals + +### Split Approval (Create → Wait) + +The standard "Start and wait for an approval" is a single blocking action. +For more control (e.g., posting the approval link in Teams, or adding a timeout +scope), split it into two actions: `CreateAnApproval` (fire-and-forget) then +`WaitForAnApproval` (webhook pause). + +```json +"Create_Approval": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_approvals", + "connectionName": "", + "operationId": "CreateAnApproval" + }, + "parameters": { + "approvalType": "CustomResponse/Result", + "ApprovalCreationInput/title": "Review: @{variables('ItemTitle')}", + "ApprovalCreationInput/assignedTo": "approver@contoso.com", + "ApprovalCreationInput/details": "Please review and select an option.", + "ApprovalCreationInput/responseOptions": ["Approve", "Reject", "Defer"], + "ApprovalCreationInput/enableNotifications": true, + "ApprovalCreationInput/enableReassignment": true + } + } +}, +"Wait_For_Approval": { + "type": "OpenApiConnectionWebhook", + "runAfter": { "Create_Approval": ["Succeeded"] }, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_approvals", + "connectionName": "", + "operationId": "WaitForAnApproval" + }, + "parameters": { + "approvalName": "@body('Create_Approval')?['name']" + } + } +} +``` + +> **`approvalType` options:** +> - `"Approve/Reject - First to respond"` — binary, first responder wins +> - `"Approve/Reject - Everyone must approve"` — requires all assignees +> - `"CustomResponse/Result"` — define your own response buttons +> +> After `Wait_For_Approval`, read the outcome: +> ``` +> @body('Wait_For_Approval')?['outcome'] → "Approve", "Reject", or custom +> @body('Wait_For_Approval')?['responses'][0]?['responder']?['displayName'] +> @body('Wait_For_Approval')?['responses'][0]?['comments'] +> ``` +> +> The split pattern lets you insert actions between create and wait — e.g., +> posting the approval link to Teams, starting a timeout scope, or logging +> the pending approval to a tracking list. diff --git a/skills/flowstudio-power-automate-build/references/action-patterns-core.md b/skills/flowstudio-power-automate-build/references/action-patterns-core.md new file mode 100644 index 000000000..6f561250b --- /dev/null +++ b/skills/flowstudio-power-automate-build/references/action-patterns-core.md @@ -0,0 +1,542 @@ +# FlowStudio MCP — Action Patterns: Core + +Variables, control flow, and expression patterns for Power Automate flow definitions. + +> All examples assume `"runAfter"` is set appropriately. +> Replace `` with the **key** you used in your `connectionReferences` map +> (e.g. `shared_teams`, `shared_office365`) — NOT the connection GUID. + +--- + +## Data & Variables + +### Compose (Store a Value) + +```json +"Compose_My_Value": { + "type": "Compose", + "runAfter": {}, + "inputs": "@variables('myVar')" +} +``` + +Reference: `@outputs('Compose_My_Value')` + +--- + +### Initialize Variable + +```json +"Init_Counter": { + "type": "InitializeVariable", + "runAfter": {}, + "inputs": { + "variables": [{ + "name": "counter", + "type": "Integer", + "value": 0 + }] + } +} +``` + +Types: `"Integer"`, `"Float"`, `"Boolean"`, `"String"`, `"Array"`, `"Object"` + +--- + +### Set Variable + +```json +"Set_Counter": { + "type": "SetVariable", + "runAfter": {}, + "inputs": { + "name": "counter", + "value": "@add(variables('counter'), 1)" + } +} +``` + +--- + +### Append to Array Variable + +```json +"Collect_Item": { + "type": "AppendToArrayVariable", + "runAfter": {}, + "inputs": { + "name": "resultArray", + "value": "@item()" + } +} +``` + +--- + +### Increment Variable + +```json +"Increment_Counter": { + "type": "IncrementVariable", + "runAfter": {}, + "inputs": { + "name": "counter", + "value": 1 + } +} +``` + +> Use `IncrementVariable` (not `SetVariable` with `add()`) for counters inside loops — +> it is atomic and avoids expression errors when the variable is used elsewhere in the +> same iteration. `value` can be any integer or expression, e.g. `@mul(item()?['Interval'], 60)` +> to advance a Unix timestamp cursor by N minutes. + +--- + +## Control Flow + +### Condition (If/Else) + +```json +"Check_Status": { + "type": "If", + "runAfter": {}, + "expression": { + "and": [{ "equals": ["@item()?['Status']", "Active"] }] + }, + "actions": { + "Handle_Active": { + "type": "Compose", + "runAfter": {}, + "inputs": "Active user: @{item()?['Name']}" + } + }, + "else": { + "actions": { + "Handle_Inactive": { + "type": "Compose", + "runAfter": {}, + "inputs": "Inactive user" + } + } + } +} +``` + +Comparison operators: `equals`, `not`, `greater`, `greaterOrEquals`, `less`, `lessOrEquals`, `contains` +Logical: `and: [...]`, `or: [...]` + +--- + +### Switch + +```json +"Route_By_Type": { + "type": "Switch", + "runAfter": {}, + "expression": "@triggerBody()?['type']", + "cases": { + "Case_Email": { + "case": "email", + "actions": { "Process_Email": { "type": "Compose", "runAfter": {}, "inputs": "email" } } + }, + "Case_Teams": { + "case": "teams", + "actions": { "Process_Teams": { "type": "Compose", "runAfter": {}, "inputs": "teams" } } + } + }, + "default": { + "actions": { "Unknown_Type": { "type": "Compose", "runAfter": {}, "inputs": "unknown" } } + } +} +``` + +--- + +### Scope (Grouping / Try-Catch) + +Wrap related actions in a Scope to give them a shared name, collapse them in the +designer, and — most importantly — handle their errors as a unit. + +```json +"Scope_Get_Customer": { + "type": "Scope", + "runAfter": {}, + "actions": { + "HTTP_Get_Customer": { + "type": "Http", + "runAfter": {}, + "inputs": { + "method": "GET", + "uri": "https://api.example.com/customers/@{variables('customerId')}" + } + }, + "Compose_Email": { + "type": "Compose", + "runAfter": { "HTTP_Get_Customer": ["Succeeded"] }, + "inputs": "@outputs('HTTP_Get_Customer')?['body/email']" + } + } +}, +"Handle_Scope_Error": { + "type": "Compose", + "runAfter": { "Scope_Get_Customer": ["Failed", "TimedOut"] }, + "inputs": "Scope failed: @{result('Scope_Get_Customer')?[0]?['error']?['message']}" +} +``` + +> Reference scope results: `@result('Scope_Get_Customer')` returns an array of action +> outcomes. Use `runAfter: {"MyScope": ["Failed", "TimedOut"]}` on a follow-up action +> to create try/catch semantics without a Terminate. + +--- + +### Foreach (Sequential) + +```json +"Process_Each_Item": { + "type": "Foreach", + "runAfter": {}, + "foreach": "@outputs('Get_Items')?['body/value']", + "operationOptions": "Sequential", + "actions": { + "Handle_Item": { + "type": "Compose", + "runAfter": {}, + "inputs": "@item()?['Title']" + } + } +} +``` + +> Always include `"operationOptions": "Sequential"` unless parallel is intentional. + +--- + +### Foreach (Parallel with Concurrency Limit) + +```json +"Process_Each_Item_Parallel": { + "type": "Foreach", + "runAfter": {}, + "foreach": "@body('Get_SP_Items')?['value']", + "runtimeConfiguration": { + "concurrency": { + "repetitions": 20 + } + }, + "actions": { + "HTTP_Upsert": { + "type": "Http", + "runAfter": {}, + "inputs": { + "method": "POST", + "uri": "https://api.example.com/contacts/@{item()?['Email']}" + } + } + } +} +``` + +> Set `repetitions` to control how many items are processed simultaneously. +> Practical values: `5–10` for external API calls (respect rate limits), +> `20–50` for internal/fast operations. +> Omit `runtimeConfiguration.concurrency` entirely for the platform default +> (currently 50). Do NOT use `"operationOptions": "Sequential"` and concurrency together. + +--- + +### Wait (Delay) + +```json +"Delay_10_Minutes": { + "type": "Wait", + "runAfter": {}, + "inputs": { + "interval": { + "count": 10, + "unit": "Minute" + } + } +} +``` + +Valid `unit` values: `"Second"`, `"Minute"`, `"Hour"`, `"Day"` + +> Use a Delay + re-fetch as a deduplication guard: wait for any competing process +> to complete, then re-read the record before acting. This avoids double-processing +> when multiple triggers or manual edits can race on the same item. + +--- + +### Terminate (Success or Failure) + +```json +"Terminate_Success": { + "type": "Terminate", + "runAfter": {}, + "inputs": { + "runStatus": "Succeeded" + } +}, +"Terminate_Failure": { + "type": "Terminate", + "runAfter": { "Risky_Action": ["Failed"] }, + "inputs": { + "runStatus": "Failed", + "runError": { + "code": "StepFailed", + "message": "@{outputs('Get_Error_Message')}" + } + } +} +``` + +--- + +### Do Until (Loop Until Condition) + +Repeats a block of actions until an exit condition becomes true. +Use when the number of iterations is not known upfront (e.g. paginating an API, +walking a time range, polling until a status changes). + +```json +"Do_Until_Done": { + "type": "Until", + "runAfter": {}, + "expression": "@greaterOrEquals(variables('cursor'), variables('endValue'))", + "limit": { + "count": 5000, + "timeout": "PT5H" + }, + "actions": { + "Do_Work": { + "type": "Compose", + "runAfter": {}, + "inputs": "@variables('cursor')" + }, + "Advance_Cursor": { + "type": "IncrementVariable", + "runAfter": { "Do_Work": ["Succeeded"] }, + "inputs": { + "name": "cursor", + "value": 1 + } + } + } +} +``` + +> Always set `limit.count` and `limit.timeout` explicitly — the platform defaults are +> low (60 iterations, 1 hour). For time-range walkers use `limit.count: 5000` and +> `limit.timeout: "PT5H"` (ISO 8601 duration). +> +> The exit condition is evaluated **before** each iteration. Initialise your cursor +> variable before the loop so the condition can evaluate correctly on the first pass. + +--- + +### Async Polling with RequestId Correlation + +When an API starts a long-running job asynchronously (e.g. Power BI dataset refresh, +report generation, batch export), the trigger call returns a request ID. Capture it +from the **response header**, then poll a status endpoint filtering by that exact ID: + +```json +"Start_Job": { + "type": "Http", + "inputs": { "method": "POST", "uri": "https://api.example.com/jobs" } +}, +"Capture_Request_ID": { + "type": "Compose", + "runAfter": { "Start_Job": ["Succeeded"] }, + "inputs": "@outputs('Start_Job')?['headers/X-Request-Id']" +}, +"Initialize_Status": { + "type": "InitializeVariable", + "inputs": { "variables": [{ "name": "jobStatus", "type": "String", "value": "Running" }] } +}, +"Poll_Until_Done": { + "type": "Until", + "expression": "@not(equals(variables('jobStatus'), 'Running'))", + "limit": { "count": 60, "timeout": "PT30M" }, + "actions": { + "Delay": { "type": "Wait", "inputs": { "interval": { "count": 20, "unit": "Second" } } }, + "Get_History": { + "type": "Http", + "runAfter": { "Delay": ["Succeeded"] }, + "inputs": { "method": "GET", "uri": "https://api.example.com/jobs/history" } + }, + "Filter_This_Job": { + "type": "Query", + "runAfter": { "Get_History": ["Succeeded"] }, + "inputs": { + "from": "@outputs('Get_History')?['body/items']", + "where": "@equals(item()?['requestId'], outputs('Capture_Request_ID'))" + } + }, + "Set_Status": { + "type": "SetVariable", + "runAfter": { "Filter_This_Job": ["Succeeded"] }, + "inputs": { + "name": "jobStatus", + "value": "@first(body('Filter_This_Job'))?['status']" + } + } + } +}, +"Handle_Failure": { + "type": "If", + "runAfter": { "Poll_Until_Done": ["Succeeded"] }, + "expression": { "equals": ["@variables('jobStatus')", "Failed"] }, + "actions": { "Terminate_Failed": { "type": "Terminate", "inputs": { "runStatus": "Failed" } } }, + "else": { "actions": {} } +} +``` + +Access response headers: `@outputs('Start_Job')?['headers/X-Request-Id']` + +> **Status variable initialisation**: set a sentinel value (`"Running"`, `"Unknown"`) before +> the loop. The exit condition tests for any value other than the sentinel. +> This way an empty poll result (job not yet in history) leaves the variable unchanged +> and the loop continues — it doesn't accidentally exit on null. +> +> **Filter before extracting**: always `Filter Array` the history to your specific +> request ID before calling `first()`. History endpoints return all jobs; without +> filtering, status from a different concurrent job can corrupt your poll. + +--- + +### runAfter Fallback (Failed → Alternative Action) + +Route to a fallback action when a primary action fails — without a Condition block. +Simply set `runAfter` on the fallback to accept `["Failed"]` from the primary: + +```json +"HTTP_Get_Hi_Res": { + "type": "Http", + "runAfter": {}, + "inputs": { "method": "GET", "uri": "https://api.example.com/data?resolution=hi-res" } +}, +"HTTP_Get_Low_Res": { + "type": "Http", + "runAfter": { "HTTP_Get_Hi_Res": ["Failed"] }, + "inputs": { "method": "GET", "uri": "https://api.example.com/data?resolution=low-res" } +} +``` + +> Actions that follow can use `runAfter` accepting both `["Succeeded", "Skipped"]` to +> handle either path — see **Fan-In Join Gate** below. + +--- + +### Fan-In Join Gate (Merge Two Mutually Exclusive Branches) + +When two branches are mutually exclusive (only one can succeed per run), use a single +downstream action that accepts `["Succeeded", "Skipped"]` from **both** branches. +The gate fires exactly once regardless of which branch ran: + +```json +"Increment_Count": { + "type": "IncrementVariable", + "runAfter": { + "Update_Hi_Res_Metadata": ["Succeeded", "Skipped"], + "Update_Low_Res_Metadata": ["Succeeded", "Skipped"] + }, + "inputs": { "name": "LoopCount", "value": 1 } +} +``` + +> This avoids duplicating the downstream action in each branch. The key insight: +> whichever branch was skipped reports `Skipped` — the gate accepts that state and +> fires once. Only works cleanly when the two branches are truly mutually exclusive +> (e.g. one is `runAfter: [...Failed]` of the other). + +--- + +## Expressions + +### Common Expression Patterns + +``` +Null-safe field access: @item()?['FieldName'] +Null guard: @coalesce(item()?['Name'], 'Unknown') +String format: @{variables('firstName')} @{variables('lastName')} +Date today: @utcNow() +Formatted date: @formatDateTime(utcNow(), 'dd/MM/yyyy') +Add days: @addDays(utcNow(), 7) +Array length: @length(variables('myArray')) +Filter array: @array(filter(outputs('Get_Items')?['body/value'], item => equals(item?['Status'], 'Active'))) +Union (new wins): @union(body('New_Data'), outputs('Old_Data')) +Sort: @sort(variables('myArray'), 'Date') +Unix timestamp → date: @formatDateTime(addseconds('1970-1-1', triggerBody()?['created']), 'yyyy-MM-dd') +Date → Unix milliseconds: @div(sub(ticks(startOfDay(item()?['Created'])), ticks(formatDateTime('1970-01-01Z','o'))), 10000) +Date → Unix seconds: @div(sub(ticks(item()?['Start']), ticks('1970-01-01T00:00:00Z')), 10000000) +Unix seconds → datetime: @addSeconds('1970-01-01T00:00:00Z', int(variables('Unix'))) +Coalesce as no-else: @coalesce(outputs('Optional_Step'), outputs('Default_Step')) +Flow elapsed minutes: @div(float(sub(ticks(utcNow()), ticks(outputs('Flow_Start')))), 600000000) +HH:mm time string: @formatDateTime(outputs('Local_Datetime'), 'HH:mm') +Response header: @outputs('HTTP_Action')?['headers/X-Request-Id'] +Array max (by field): @reverse(sort(body('Select_Items'), 'Date'))[0] +Integer day span: @int(split(dateDifference(outputs('Start'), outputs('End')), '.')[0]) +ISO week number: @div(add(dayofyear(addDays(subtractFromTime(date, sub(dayofweek(date),1), 'Day'), 3)), 6), 7) +Join errors to string: @if(equals(length(variables('Errors')),0), null, concat(join(variables('Errors'),', '),' not found.')) +Normalize before compare: @replace(coalesce(outputs('Value'),''),'_',' ') +Robust non-empty check: @greater(length(trim(coalesce(string(outputs('Val')), ''))), 0) +``` + +### Newlines in Expressions + +> **`\n` does NOT produce a newline inside Power Automate expressions.** It is +> treated as a literal backslash + `n` and will either appear verbatim or cause +> a validation error. + +Use `decodeUriComponent('%0a')` wherever you need a newline character: + +``` +Newline (LF): decodeUriComponent('%0a') +CRLF: decodeUriComponent('%0d%0a') +``` + +Example — multi-line Teams or email body via `concat()`: +```json +"Compose_Message": { + "type": "Compose", + "inputs": "@concat('Hi ', outputs('Get_User')?['body/displayName'], ',', decodeUriComponent('%0a%0a'), 'Your report is ready.', decodeUriComponent('%0a'), '- The Team')" +} +``` + +Example — `join()` with newline separator: +```json +"Compose_List": { + "type": "Compose", + "inputs": "@join(body('Select_Names'), decodeUriComponent('%0a'))" +} +``` + +> This is the only reliable way to embed newlines in dynamically built strings +> in Power Automate flow definitions (confirmed against Logic Apps runtime). + +--- + +### Sum an array (XPath trick) + +Power Automate has no native `sum()` function. Use XPath on XML instead: + +```json +"Prepare_For_Sum": { + "type": "Compose", + "runAfter": {}, + "inputs": { "root": { "numbers": "@body('Select_Amounts')" } } +}, +"Sum": { + "type": "Compose", + "runAfter": { "Prepare_For_Sum": ["Succeeded"] }, + "inputs": "@xpath(xml(outputs('Prepare_For_Sum')), 'sum(/root/numbers)')" +} +``` + +`Select_Amounts` must output a flat array of numbers (use a **Select** action to extract a single numeric field first). The result is a number you can use directly in conditions or calculations. + +> This is the only way to aggregate (sum/min/max) an array without a loop in Power Automate. diff --git a/skills/flowstudio-power-automate-build/references/action-patterns-data.md b/skills/flowstudio-power-automate-build/references/action-patterns-data.md new file mode 100644 index 000000000..53972c464 --- /dev/null +++ b/skills/flowstudio-power-automate-build/references/action-patterns-data.md @@ -0,0 +1,734 @@ +# FlowStudio MCP — Action Patterns: Data Transforms + +Array operations, HTTP calls, parsing, and data transformation patterns. + +> All examples assume `"runAfter"` is set appropriately. +> Replace `` with the GUID from `connectionReferences`. + +--- + +## Array Operations + +### Select (Reshape / Project an Array) + +Transforms each item in an array, keeping only the columns you need or renaming them. +Avoids carrying large objects through the rest of the flow. + +```json +"Select_Needed_Columns": { + "type": "Select", + "runAfter": {}, + "inputs": { + "from": "@outputs('HTTP_Get_Subscriptions')?['body/data']", + "select": { + "id": "@item()?['id']", + "status": "@item()?['status']", + "trial_end": "@item()?['trial_end']", + "cancel_at": "@item()?['cancel_at']", + "interval": "@item()?['plan']?['interval']" + } + } +} +``` + +Result reference: `@body('Select_Needed_Columns')` — returns a direct array of reshaped objects. + +> Use Select before looping or filtering to reduce payload size and simplify +> downstream expressions. Works on any array — SP results, HTTP responses, variables. +> +> **Tips:** +> - **Single-to-array coercion:** When an API returns a single object but you need +> Select (which requires an array), wrap it: `@array(body('Get_Employee')?['data'])`. +> The output is a 1-element array — access results via `?[0]?['field']`. +> - **Null-normalize optional fields:** Use `@if(empty(item()?['field']), null, item()?['field'])` +> on every optional field to normalize empty strings, missing properties, and empty +> objects to explicit `null`. Ensures consistent downstream `@equals(..., @null)` checks. +> - **Flatten nested objects:** Project nested properties into flat fields: +> ``` +> "manager_name": "@if(empty(item()?['manager']?['name']), null, item()?['manager']?['name'])" +> ``` +> This enables direct field-level comparison with a flat schema from another source. + +--- + +### Filter Array (Query) + +Filters an array to items matching a condition. Use the action form (not the `filter()` +expression) for complex multi-condition logic — it's clearer and easier to maintain. + +```json +"Filter_Active_Subscriptions": { + "type": "Query", + "runAfter": {}, + "inputs": { + "from": "@body('Select_Needed_Columns')", + "where": "@and(or(equals(item().status, 'trialing'), equals(item().status, 'active')), equals(item().cancel_at, null))" + } +} +``` + +Result reference: `@body('Filter_Active_Subscriptions')` — direct filtered array. + +> Tip: run multiple Filter Array actions on the same source array to create +> named buckets (e.g. active, being-canceled, fully-canceled), then use +> `coalesce(first(body('Filter_A')), first(body('Filter_B')), ...)` to pick +> the highest-priority match without any loops. + +--- + +### Create CSV Table (Array → CSV String) + +Converts an array of objects into a CSV-formatted string — no connector call, no code. +Use after a `Select` or `Filter Array` to export data or pass it to a file-write action. + +```json +"Create_CSV": { + "type": "Table", + "runAfter": {}, + "inputs": { + "from": "@body('Select_Output_Columns')", + "format": "CSV" + } +} +``` + +Result reference: `@body('Create_CSV')` — a plain string with header row + data rows. + +```json +// Custom column order / renamed headers: +"Create_CSV_Custom": { + "type": "Table", + "inputs": { + "from": "@body('Select_Output_Columns')", + "format": "CSV", + "columns": [ + { "header": "Date", "value": "@item()?['transactionDate']" }, + { "header": "Amount", "value": "@item()?['amount']" }, + { "header": "Description", "value": "@item()?['description']" } + ] + } +} +``` + +> Without `columns`, headers are taken from the object property names in the source array. +> With `columns`, you control header names and column order explicitly. +> +> The output is a raw string. Write it to a file with `CreateFile` or `UpdateFile` +> (set `body` to `@body('Create_CSV')`), or store in a variable with `SetVariable`. +> +> If source data came from Power BI's `ExecuteDatasetQuery`, column names will be +> wrapped in square brackets (e.g. `[Amount]`). Strip them before writing: +> `@replace(replace(body('Create_CSV'),'[',''),']','')` + +--- + +### range() + Select for Array Generation + +`range(0, N)` produces an integer sequence `[0, 1, 2, …, N-1]`. Pipe it through +a Select action to generate date series, index grids, or any computed array +without a loop: + +```json +// Generate 14 consecutive dates starting from a base date +"Generate_Date_Series": { + "type": "Select", + "inputs": { + "from": "@range(0, 14)", + "select": "@addDays(outputs('Base_Date'), item(), 'yyyy-MM-dd')" + } +} +``` + +Result: `@body('Generate_Date_Series')` → `["2025-01-06", "2025-01-07", …, "2025-01-19"]` + +```json +// Flatten a 2D array (rows × cols) into 1D using arithmetic indexing +"Flatten_Grid": { + "type": "Select", + "inputs": { + "from": "@range(0, mul(length(outputs('Rows')), length(outputs('Cols'))))", + "select": { + "row": "@outputs('Rows')[div(item(), length(outputs('Cols')))]", + "col": "@outputs('Cols')[mod(item(), length(outputs('Cols')))]" + } + } +} +``` + +> `range()` is zero-based. The Cartesian product pattern above uses `div(i, cols)` +> for the row index and `mod(i, cols)` for the column index — equivalent to a +> nested for-loop flattened into a single pass. Useful for generating time-slot × +> date grids, shift × location assignments, etc. + +--- + +### Dynamic Dictionary via json(concat(join())) + +When you need O(1) key→value lookups at runtime and Power Automate has no native +dictionary type, build one from an array using Select + join + json: + +```json +"Build_Key_Value_Pairs": { + "type": "Select", + "inputs": { + "from": "@body('Get_Lookup_Items')?['value']", + "select": "@concat('\"', item()?['Key'], '\":\"', item()?['Value'], '\"')" + } +}, +"Assemble_Dictionary": { + "type": "Compose", + "inputs": "@json(concat('{', join(body('Build_Key_Value_Pairs'), ','), '}'))" +} +``` + +Lookup: `@outputs('Assemble_Dictionary')?['myKey']` + +```json +// Practical example: date → rate-code lookup for business rules +"Build_Holiday_Rates": { + "type": "Select", + "inputs": { + "from": "@body('Get_Holidays')?['value']", + "select": "@concat('\"', formatDateTime(item()?['Date'], 'yyyy-MM-dd'), '\":\"', item()?['RateCode'], '\"')" + } +}, +"Holiday_Dict": { + "type": "Compose", + "inputs": "@json(concat('{', join(body('Build_Holiday_Rates'), ','), '}'))" +} +``` + +Then inside a loop: `@coalesce(outputs('Holiday_Dict')?[item()?['Date']], 'Standard')` + +> The `json(concat('{', join(...), '}'))` pattern works for string values. For numeric +> or boolean values, omit the inner escaped quotes around the value portion. +> Keys must be unique — duplicate keys silently overwrite earlier ones. +> This replaces deeply nested `if(equals(key,'A'),'X', if(equals(key,'B'),'Y', ...))` chains. + +--- + +### union() for Changed-Field Detection + +When you need to find records where *any* of several fields has changed, run one +`Filter Array` per field and `union()` the results. This avoids a complex +multi-condition filter and produces a clean deduplicated set: + +```json +"Filter_Name_Changed": { + "type": "Query", + "inputs": { "from": "@body('Existing_Records')", + "where": "@not(equals(item()?['name'], item()?['dest_name']))" } +}, +"Filter_Status_Changed": { + "type": "Query", + "inputs": { "from": "@body('Existing_Records')", + "where": "@not(equals(item()?['status'], item()?['dest_status']))" } +}, +"All_Changed": { + "type": "Compose", + "inputs": "@union(body('Filter_Name_Changed'), body('Filter_Status_Changed'))" +} +``` + +Reference: `@outputs('All_Changed')` — deduplicated array of rows where anything changed. + +> `union()` deduplicates by object identity, so a row that changed in both fields +> appears once. Add more `Filter_*_Changed` inputs to `union()` as needed: +> `@union(body('F1'), body('F2'), body('F3'))` + +--- + +### File-Content Change Gate + +Before running expensive processing on a file or blob, compare its current content +to a stored baseline. Skip entirely if nothing has changed — makes sync flows +idempotent and safe to re-run or schedule aggressively. + +```json +"Get_File_From_Source": { ... }, +"Get_Stored_Baseline": { ... }, +"Condition_File_Changed": { + "type": "If", + "expression": { + "not": { + "equals": [ + "@base64(body('Get_File_From_Source'))", + "@body('Get_Stored_Baseline')" + ] + } + }, + "actions": { + "Update_Baseline": { "...": "overwrite stored copy with new content" }, + "Process_File": { "...": "all expensive work goes here" } + }, + "else": { "actions": {} } +} +``` + +> Store the baseline as a file in SharePoint or blob storage — `base64()`-encode the +> live content before comparing so binary and text files are handled uniformly. +> Write the new baseline **before** processing so a re-run after a partial failure +> does not re-process the same file again. + +--- + +### Set-Join for Sync (Update Detection without Nested Loops) + +When syncing a source collection into a destination (e.g. API response → SharePoint list, +CSV → database), avoid nested `Apply to each` loops to find changed records. +Instead, **project flat key arrays** and use `contains()` to perform set operations — +zero nested loops, and the final loop only touches changed items. + +**Full insert/update/delete sync pattern:** + +```json +// Step 1 — Project a flat key array from the DESTINATION (e.g. SharePoint) +"Select_Dest_Keys": { + "type": "Select", + "inputs": { + "from": "@outputs('Get_Dest_Items')?['body/value']", + "select": "@item()?['Title']" + } +} +// → ["KEY1", "KEY2", "KEY3", ...] + +// Step 2 — INSERT: source rows whose key is NOT in destination +"Filter_To_Insert": { + "type": "Query", + "inputs": { + "from": "@body('Source_Array')", + "where": "@not(contains(body('Select_Dest_Keys'), item()?['key']))" + } +} +// → Apply to each Filter_To_Insert → CreateItem + +// Step 3 — INNER JOIN: source rows that exist in destination +"Filter_Already_Exists": { + "type": "Query", + "inputs": { + "from": "@body('Source_Array')", + "where": "@contains(body('Select_Dest_Keys'), item()?['key'])" + } +} + +// Step 4 — UPDATE: one Filter per tracked field, then union them +"Filter_Field1_Changed": { + "type": "Query", + "inputs": { + "from": "@body('Filter_Already_Exists')", + "where": "@not(equals(item()?['field1'], item()?['dest_field1']))" + } +} +"Filter_Field2_Changed": { + "type": "Query", + "inputs": { + "from": "@body('Filter_Already_Exists')", + "where": "@not(equals(item()?['field2'], item()?['dest_field2']))" + } +} +"Union_Changed": { + "type": "Compose", + "inputs": "@union(body('Filter_Field1_Changed'), body('Filter_Field2_Changed'))" +} +// → rows where ANY tracked field differs + +// Step 5 — Resolve destination IDs for changed rows (no nested loop) +"Select_Changed_Keys": { + "type": "Select", + "inputs": { "from": "@outputs('Union_Changed')", "select": "@item()?['key']" } +} +"Filter_Dest_Items_To_Update": { + "type": "Query", + "inputs": { + "from": "@outputs('Get_Dest_Items')?['body/value']", + "where": "@contains(body('Select_Changed_Keys'), item()?['Title'])" + } +} +// Step 6 — Single loop over changed items only +"Apply_to_each_Update": { + "type": "Foreach", + "foreach": "@body('Filter_Dest_Items_To_Update')", + "actions": { + "Get_Source_Row": { + "type": "Query", + "inputs": { + "from": "@outputs('Union_Changed')", + "where": "@equals(item()?['key'], items('Apply_to_each_Update')?['Title'])" + } + }, + "Update_Item": { + "...": "...", + "id": "@items('Apply_to_each_Update')?['ID']", + "item/field1": "@first(body('Get_Source_Row'))?['field1']" + } + } +} + +// Step 7 — DELETE: destination keys NOT in source +"Select_Source_Keys": { + "type": "Select", + "inputs": { "from": "@body('Source_Array')", "select": "@item()?['key']" } +} +"Filter_To_Delete": { + "type": "Query", + "inputs": { + "from": "@outputs('Get_Dest_Items')?['body/value']", + "where": "@not(contains(body('Select_Source_Keys'), item()?['Title']))" + } +} +// → Apply to each Filter_To_Delete → DeleteItem +``` + +> **Why this beats nested loops**: the naive approach (for each dest item, scan source) +> is O(n × m) and hits Power Automate's 100k-action run limit fast on large lists. +> This pattern is O(n + m): one pass to build key arrays, one pass per filter. +> The update loop in Step 6 only iterates *changed* records — often a tiny fraction +> of the full collection. Run Steps 2/4/7 in **parallel Scopes** for further speed. + +--- + +### First-or-Null Single-Row Lookup + +Use `first()` on the result array to extract one record without a loop. +Then null-check the output to guard downstream actions. + +```json +"Get_First_Match": { + "type": "Compose", + "runAfter": { "Get_SP_Items": ["Succeeded"] }, + "inputs": "@first(outputs('Get_SP_Items')?['body/value'])" +} +``` + +In a Condition, test for no-match with the **`@null` literal** (not `empty()`): + +```json +"Condition": { + "type": "If", + "expression": { + "not": { + "equals": [ + "@outputs('Get_First_Match')", + "@null" + ] + } + } +} +``` + +Access fields on the matched row: `@outputs('Get_First_Match')?['FieldName']` + +> Use this instead of `Apply to each` when you only need one matching record. +> `first()` on an empty array returns `null`; `empty()` is for arrays/strings, +> not scalars — using it on a `first()` result causes a runtime error. + +--- + +## HTTP & Parsing + +### HTTP Action (External API) + +```json +"Call_External_API": { + "type": "Http", + "runAfter": {}, + "inputs": { + "method": "POST", + "uri": "https://api.example.com/endpoint", + "headers": { + "Content-Type": "application/json", + "Authorization": "Bearer @{variables('apiToken')}" + }, + "body": { + "data": "@outputs('Compose_Payload')" + }, + "retryPolicy": { + "type": "Fixed", + "count": 3, + "interval": "PT10S" + } + } +} +``` + +Response reference: `@outputs('Call_External_API')?['body']` + +#### Variant: ActiveDirectoryOAuth (Service-to-Service) + +For calling APIs that require Azure AD client-credentials (e.g., Microsoft Graph), +use in-line OAuth instead of a Bearer token variable: + +```json +"Call_Graph_API": { + "type": "Http", + "runAfter": {}, + "inputs": { + "method": "GET", + "uri": "https://graph.microsoft.com/v1.0/users?$search=\"employeeId:@{variables('Code')}\"&$select=id,displayName", + "headers": { + "Content-Type": "application/json", + "ConsistencyLevel": "eventual" + }, + "authentication": { + "type": "ActiveDirectoryOAuth", + "authority": "https://login.microsoftonline.com", + "tenant": "", + "audience": "https://graph.microsoft.com", + "clientId": "", + "secret": "@parameters('graphClientSecret')" + } + } +} +``` + +> **When to use:** Calling Microsoft Graph, Azure Resource Manager, or any +> Azure AD-protected API from a flow without a premium connector. +> +> The `authentication` block handles the entire OAuth client-credentials flow +> transparently — no manual token acquisition step needed. +> +> `ConsistencyLevel: eventual` is required for Graph `$search` queries. +> Without it, `$search` returns 400. +> +> For PATCH/PUT writes, the same `authentication` block works — just change +> `method` and add a `body`. +> +> ⚠️ **Never hardcode `secret` inline.** Use `@parameters('graphClientSecret')` +> and declare it in the flow's `parameters` block (type `securestring`). This +> prevents the secret from appearing in run history or being readable via +> `get_live_flow`. Declare the parameter like: +> ```json +> "parameters": { +> "graphClientSecret": { "type": "securestring", "defaultValue": "" } +> } +> ``` +> Then pass the real value via the flow's connections or environment variables +> — never commit it to source control. + +--- + +### HTTP Response (Return to Caller) + +Used in HTTP-triggered flows to send a structured reply back to the caller. +Must run before the flow times out (default 2 min for synchronous HTTP). + +```json +"Response": { + "type": "Response", + "runAfter": {}, + "inputs": { + "statusCode": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "status": "success", + "message": "@{outputs('Compose_Result')}" + } + } +} +``` + +> **PowerApps / low-code caller pattern**: always return `statusCode: 200` with a +> `status` field in the body (`"success"` / `"error"`). PowerApps HTTP actions +> do not handle non-2xx responses gracefully — the caller should inspect +> `body.status` rather than the HTTP status code. +> +> Use multiple Response actions — one per branch — so each path returns +> an appropriate message. Only one will execute per run. + +--- + +### Child Flow Call (Parent→Child via HTTP POST) + +Power Automate supports parent→child orchestration by calling a child flow's +HTTP trigger URL directly. The parent sends an HTTP POST and blocks until the +child returns a `Response` action. The child flow uses a `manual` (Request) trigger. + +```json +// PARENT — call child flow and wait for its response +"Call_Child_Flow": { + "type": "Http", + "inputs": { + "method": "POST", + "uri": "https://prod-XX.australiasoutheast.logic.azure.com:443/workflows//triggers/manual/paths/invoke?api-version=2016-06-01&sp=%2Ftriggers%2Fmanual%2Frun&sv=1.0&sig=", + "headers": { "Content-Type": "application/json" }, + "body": { + "ID": "@triggerBody()?['ID']", + "WeekEnd": "@triggerBody()?['WeekEnd']", + "Payload": "@variables('dataArray')" + }, + "retryPolicy": { "type": "none" } + }, + "operationOptions": "DisableAsyncPattern", + "runtimeConfiguration": { + "contentTransfer": { "transferMode": "Chunked" } + }, + "limit": { "timeout": "PT2H" } +} +``` + +```json +// CHILD — manual trigger receives the JSON body +// (trigger definition) +"manual": { + "type": "Request", + "kind": "Http", + "inputs": { + "schema": { + "type": "object", + "properties": { + "ID": { "type": "string" }, + "WeekEnd": { "type": "string" }, + "Payload": { "type": "array" } + } + } + } +} + +// CHILD — return result to parent +"Response_Success": { + "type": "Response", + "inputs": { + "statusCode": 200, + "headers": { "Content-Type": "application/json" }, + "body": { "Result": "Success", "Count": "@length(variables('processed'))" } + } +} +``` + +> **`retryPolicy: none`** — critical on the parent's HTTP call. Without it, a child +> flow timeout triggers retries, spawning duplicate child runs. +> +> **`DisableAsyncPattern`** — prevents the parent from treating a 202 Accepted as +> completion. The parent will block until the child sends its `Response`. +> +> **`transferMode: Chunked`** — enable when passing large arrays (>100 KB) to the child; +> avoids request-size limits. +> +> **`limit.timeout: PT2H`** — raise the default 2-minute HTTP timeout for long-running +> children. Max is PT24H. +> +> The child flow's trigger URL contains a SAS token (`sig=...`) that authenticates +> the call. Copy it from the child flow's trigger properties panel. The URL changes +> if the trigger is deleted and re-created. + +--- + +### Parse JSON + +```json +"Parse_Response": { + "type": "ParseJson", + "runAfter": {}, + "inputs": { + "content": "@outputs('Call_External_API')?['body']", + "schema": { + "type": "object", + "properties": { + "id": { "type": "integer" }, + "name": { "type": "string" }, + "items": { + "type": "array", + "items": { "type": "object" } + } + } + } + } +} +``` + +Access parsed values: `@body('Parse_Response')?['name']` + +--- + +### Manual CSV → JSON (No Premium Action) + +Parse a raw CSV string into an array of objects using only built-in expressions. +Avoids the premium "Parse CSV" connector action. + +```json +"Delimiter": { + "type": "Compose", + "inputs": "," +}, +"Strip_Quotes": { + "type": "Compose", + "inputs": "@replace(body('Get_File_Content'), '\"', '')" +}, +"Detect_Line_Ending": { + "type": "Compose", + "inputs": "@if(equals(indexOf(outputs('Strip_Quotes'), decodeUriComponent('%0D%0A')), -1), if(equals(indexOf(outputs('Strip_Quotes'), decodeUriComponent('%0A')), -1), decodeUriComponent('%0D'), decodeUriComponent('%0A')), decodeUriComponent('%0D%0A'))" +}, +"Headers": { + "type": "Compose", + "inputs": "@split(first(split(outputs('Strip_Quotes'), outputs('Detect_Line_Ending'))), outputs('Delimiter'))" +}, +"Data_Rows": { + "type": "Compose", + "inputs": "@skip(split(outputs('Strip_Quotes'), outputs('Detect_Line_Ending')), 1)" +}, +"Select_CSV_Body": { + "type": "Select", + "inputs": { + "from": "@outputs('Data_Rows')", + "select": { + "@{outputs('Headers')[0]}": "@split(item(), outputs('Delimiter'))[0]", + "@{outputs('Headers')[1]}": "@split(item(), outputs('Delimiter'))[1]", + "@{outputs('Headers')[2]}": "@split(item(), outputs('Delimiter'))[2]" + } + } +}, +"Filter_Empty_Rows": { + "type": "Query", + "inputs": { + "from": "@body('Select_CSV_Body')", + "where": "@not(equals(item()?[outputs('Headers')[0]], null))" + } +} +``` + +Result: `@body('Filter_Empty_Rows')` — array of objects with header names as keys. + +> **`Detect_Line_Ending`** handles CRLF (Windows), LF (Unix), and CR (old Mac) automatically +> using `indexOf()` with `decodeUriComponent('%0D%0A' / '%0A' / '%0D')`. +> +> **Dynamic key names in `Select`**: `@{outputs('Headers')[0]}` as a JSON key in a +> `Select` shape sets the output property name at runtime from the header row — +> this works as long as the expression is in `@{...}` interpolation syntax. +> +> **Columns with embedded commas**: if field values can contain the delimiter, +> use `length(split(row, ','))` in a Switch to detect the column count and manually +> reassemble the split fragments: `@concat(split(item(),',')[1],',',split(item(),',')[2])` + +--- + +### ConvertTimeZone (Built-in, No Connector) + +Converts a timestamp between timezones with no API call or connector licence cost. +Format string `"g"` produces short locale date+time (`M/d/yyyy h:mm tt`). + +```json +"Convert_to_Local_Time": { + "type": "Expression", + "kind": "ConvertTimeZone", + "runAfter": {}, + "inputs": { + "baseTime": "@{outputs('UTC_Timestamp')}", + "sourceTimeZone": "UTC", + "destinationTimeZone": "Taipei Standard Time", + "formatString": "g" + } +} +``` + +Result reference: `@body('Convert_to_Local_Time')` — **not** `outputs()`, unlike most actions. + +Common `formatString` values: `"g"` (short), `"f"` (full), `"yyyy-MM-dd"`, `"HH:mm"` + +Common timezone strings: `"UTC"`, `"AUS Eastern Standard Time"`, `"Taipei Standard Time"`, +`"Singapore Standard Time"`, `"GMT Standard Time"` + +> This is `type: Expression, kind: ConvertTimeZone` — a built-in Logic Apps action, +> not a connector. No connection reference needed. Reference the output via +> `body()` (not `outputs()`), otherwise the expression returns null. diff --git a/skills/flowstudio-power-automate-build/references/build-patterns.md b/skills/flowstudio-power-automate-build/references/build-patterns.md new file mode 100644 index 000000000..b50b10afd --- /dev/null +++ b/skills/flowstudio-power-automate-build/references/build-patterns.md @@ -0,0 +1,108 @@ +# Common Build Patterns + +Complete flow definition templates ready to copy and customize. + +--- + +## Pattern: Recurrence + SharePoint list read + Teams notification + +```json +{ + "triggers": { + "Recurrence": { + "type": "Recurrence", + "recurrence": { "frequency": "Day", "interval": 1, + "startTime": "2026-01-01T08:00:00Z", + "timeZone": "AUS Eastern Standard Time" } + } + }, + "actions": { + "Get_SP_Items": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "shared_sharepointonline", + "operationId": "GetItems" + }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "table": "MyList", + "$filter": "Status eq 'Active'", + "$top": 500 + } + } + }, + "Apply_To_Each": { + "type": "Foreach", + "runAfter": { "Get_SP_Items": ["Succeeded"] }, + "foreach": "@outputs('Get_SP_Items')?['body/value']", + "actions": { + "Post_Teams_Message": { + "type": "OpenApiConnection", + "runAfter": {}, + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_teams", + "connectionName": "shared_teams", + "operationId": "PostMessageToConversation" + }, + "parameters": { + "poster": "Flow bot", + "location": "Channel", + "body/recipient": { + "groupId": "", + "channelId": "" + }, + "body/messageBody": "Item: @{items('Apply_To_Each')?['Title']}" + } + } + } + }, + "operationOptions": "Sequential" + } + } +} +``` + +--- + +## Pattern: HTTP trigger (webhook / Power App call) + +```json +{ + "triggers": { + "manual": { + "type": "Request", + "kind": "Http", + "inputs": { + "schema": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "value": { "type": "number" } + } + } + } + } + }, + "actions": { + "Compose_Response": { + "type": "Compose", + "runAfter": {}, + "inputs": "Received: @{triggerBody()?['name']} = @{triggerBody()?['value']}" + }, + "Response": { + "type": "Response", + "runAfter": { "Compose_Response": ["Succeeded"] }, + "inputs": { + "statusCode": 200, + "body": { "status": "ok", "message": "@{outputs('Compose_Response')}" } + } + } + } +} +``` + +Access body values: `@triggerBody()?['name']` diff --git a/skills/flowstudio-power-automate-build/references/flow-schema.md b/skills/flowstudio-power-automate-build/references/flow-schema.md new file mode 100644 index 000000000..02210e0a3 --- /dev/null +++ b/skills/flowstudio-power-automate-build/references/flow-schema.md @@ -0,0 +1,225 @@ +# FlowStudio MCP — Flow Definition Schema + +The full JSON structure expected by `update_live_flow` (and returned by `get_live_flow`). + +--- + +## Top-Level Shape + +```json +{ + "$schema": "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "$connections": { + "defaultValue": {}, + "type": "Object" + } + }, + "triggers": { + "": { ... } + }, + "actions": { + "": { ... } + }, + "outputs": {} +} +``` + +--- + +## `triggers` + +Exactly one trigger per flow definition. The key name is arbitrary but +conventional names are used (e.g. `Recurrence`, `manual`, `When_a_new_email_arrives`). + +See [trigger-types.md](trigger-types.md) for all trigger templates. + +--- + +## `actions` + +Dictionary of action definitions keyed by unique action name. +Key names may not contain spaces — use underscores. + +Each action must include: +- `type` — action type identifier +- `runAfter` — map of upstream action names → status conditions array +- `inputs` — action-specific input configuration + +See [action-patterns-core.md](action-patterns-core.md), [action-patterns-data.md](action-patterns-data.md), +and [action-patterns-connectors.md](action-patterns-connectors.md) for templates. + +### Optional Action Properties + +Beyond the required `type`, `runAfter`, and `inputs`, actions can include: + +| Property | Purpose | +|---|---| +| `runtimeConfiguration` | Pagination, concurrency, secure data, chunked transfer | +| `operationOptions` | `"Sequential"` for Foreach, `"DisableAsyncPattern"` for HTTP | +| `limit` | Timeout override (e.g. `{"timeout": "PT2H"}`) | + +#### `runtimeConfiguration` Variants + +**Pagination** (SharePoint Get Items with large lists): +```json +"runtimeConfiguration": { + "paginationPolicy": { + "minimumItemCount": 5000 + } +} +``` +> Without this, Get Items silently caps at 256 results. Set `minimumItemCount` +> to the maximum rows you expect. Required for any SharePoint list over 256 items. + +**Concurrency** (parallel Foreach): +```json +"runtimeConfiguration": { + "concurrency": { + "repetitions": 20 + } +} +``` + +**Secure inputs/outputs** (mask values in run history): +```json +"runtimeConfiguration": { + "secureData": { + "properties": ["inputs", "outputs"] + } +} +``` +> Use on actions that handle credentials, tokens, or PII. Masked values show +> as `""` in the flow run history UI and API responses. + +**Chunked transfer** (large HTTP payloads): +```json +"runtimeConfiguration": { + "contentTransfer": { + "transferMode": "Chunked" + } +} +``` +> Enable on HTTP actions sending or receiving bodies >100 KB (e.g. parent→child +> flow calls with large arrays). + +--- + +## `runAfter` Rules + +The first action in a branch has `"runAfter": {}` (empty — runs after trigger). + +Subsequent actions declare their dependency: + +```json +"My_Action": { + "runAfter": { + "Previous_Action": ["Succeeded"] + } +} +``` + +Multiple upstream dependencies: +```json +"runAfter": { + "Action_A": ["Succeeded"], + "Action_B": ["Succeeded", "Skipped"] +} +``` + +Error-handling action (runs when upstream failed): +```json +"Log_Error": { + "runAfter": { + "Risky_Action": ["Failed"] + } +} +``` + +--- + +## `parameters` (Flow-Level Input Parameters) + +Optional. Define reusable values at the flow level: + +```json +"parameters": { + "listName": { + "type": "string", + "defaultValue": "MyList" + }, + "maxItems": { + "type": "integer", + "defaultValue": 100 + } +} +``` + +Reference: `@parameters('listName')` in expression strings. + +--- + +## `outputs` + +Rarely used in cloud flows. Leave as `{}` unless the flow is called +as a child flow and needs to return values. + +For child flows that return data: + +```json +"outputs": { + "resultData": { + "type": "object", + "value": "@outputs('Compose_Result')" + } +} +``` + +--- + +## Scoped Actions (Inside Scope Block) + +Actions that need to be grouped for error handling or clarity: + +```json +"Scope_Main_Process": { + "type": "Scope", + "runAfter": {}, + "actions": { + "Step_One": { ... }, + "Step_Two": { "runAfter": { "Step_One": ["Succeeded"] }, ... } + } +} +``` + +--- + +## Full Minimal Example + +```json +{ + "$schema": "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#", + "contentVersion": "1.0.0.0", + "triggers": { + "Recurrence": { + "type": "Recurrence", + "recurrence": { + "frequency": "Week", + "interval": 1, + "schedule": { "weekDays": ["Monday"] }, + "startTime": "2026-01-05T09:00:00Z", + "timeZone": "AUS Eastern Standard Time" + } + } + }, + "actions": { + "Compose_Greeting": { + "type": "Compose", + "runAfter": {}, + "inputs": "Good Monday!" + } + }, + "outputs": {} +} +``` diff --git a/skills/flowstudio-power-automate-build/references/trigger-types.md b/skills/flowstudio-power-automate-build/references/trigger-types.md new file mode 100644 index 000000000..6065f1fa6 --- /dev/null +++ b/skills/flowstudio-power-automate-build/references/trigger-types.md @@ -0,0 +1,211 @@ +# FlowStudio MCP — Trigger Types + +Copy-paste trigger definitions for Power Automate flow definitions. + +--- + +## Recurrence + +Run on a schedule. + +```json +"Recurrence": { + "type": "Recurrence", + "recurrence": { + "frequency": "Day", + "interval": 1, + "startTime": "2026-01-01T08:00:00Z", + "timeZone": "AUS Eastern Standard Time" + } +} +``` + +Weekly on specific days: +```json +"Recurrence": { + "type": "Recurrence", + "recurrence": { + "frequency": "Week", + "interval": 1, + "schedule": { + "weekDays": ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday"] + }, + "startTime": "2026-01-05T09:00:00Z", + "timeZone": "AUS Eastern Standard Time" + } +} +``` + +Common `timeZone` values: +- `"AUS Eastern Standard Time"` — Sydney/Melbourne (UTC+10/+11) +- `"UTC"` — Universal time +- `"E. Australia Standard Time"` — Brisbane (UTC+10 no DST) +- `"New Zealand Standard Time"` — Auckland (UTC+12/+13) +- `"Pacific Standard Time"` — Los Angeles (UTC-8/-7) +- `"GMT Standard Time"` — London (UTC+0/+1) + +--- + +## Manual (HTTP Request / Power Apps) + +Receive an HTTP POST with a JSON body. + +```json +"manual": { + "type": "Request", + "kind": "Http", + "inputs": { + "schema": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "value": { "type": "integer" } + }, + "required": ["name"] + } + } +} +``` + +Access values: `@triggerBody()?['name']` +Trigger URL available after saving: `@listCallbackUrl()` + +#### No-Schema Variant (Accept Arbitrary JSON) + +When the incoming payload structure is unknown or varies, omit the schema +to accept any valid JSON body without validation: + +```json +"manual": { + "type": "Request", + "kind": "Http", + "inputs": { + "schema": {} + } +} +``` + +Access any field dynamically: `@triggerBody()?['anyField']` + +> Use this for external webhooks (Stripe, GitHub, Employment Hero, etc.) where the +> payload shape may change or is not fully documented. The flow accepts any +> JSON without returning 400 for unexpected properties. + +--- + +## Automated (SharePoint Item Created) + +```json +"When_an_item_is_created": { + "type": "OpenApiConnectionNotification", + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", + "operationId": "OnNewItem" + }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "table": "MyList" + }, + "subscribe": { + "body": { "notificationUrl": "@listCallbackUrl()" }, + "queries": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "table": "MyList" + } + } + } +} +``` + +Access trigger data: `@triggerBody()?['ID']`, `@triggerBody()?['Title']`, etc. + +--- + +## Automated (SharePoint Item Modified) + +```json +"When_an_existing_item_is_modified": { + "type": "OpenApiConnectionNotification", + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline", + "connectionName": "", + "operationId": "OnUpdatedItem" + }, + "parameters": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "table": "MyList" + }, + "subscribe": { + "body": { "notificationUrl": "@listCallbackUrl()" }, + "queries": { + "dataset": "https://mytenant.sharepoint.com/sites/mysite", + "table": "MyList" + } + } + } +} +``` + +--- + +## Automated (Outlook: When New Email Arrives) + +```json +"When_a_new_email_arrives": { + "type": "OpenApiConnectionNotification", + "inputs": { + "host": { + "apiId": "/providers/Microsoft.PowerApps/apis/shared_office365", + "connectionName": "", + "operationId": "OnNewEmail" + }, + "parameters": { + "folderId": "Inbox", + "to": "monitored@contoso.com", + "isHTML": true + }, + "subscribe": { + "body": { "notificationUrl": "@listCallbackUrl()" } + } + } +} +``` + +--- + +## Child Flow (Called by Another Flow) + +```json +"manual": { + "type": "Request", + "kind": "Button", + "inputs": { + "schema": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { "type": "object" } + } + } + } + } +} +``` + +Access parent-supplied data: `@triggerBody()?['items']` + +To return data to the parent, add a `Response` action: +```json +"Respond_to_Parent": { + "type": "Response", + "runAfter": { "Compose_Result": ["Succeeded"] }, + "inputs": { + "statusCode": 200, + "body": "@outputs('Compose_Result')" + } +} +``` diff --git a/skills/flowstudio-power-automate-debug/SKILL.md b/skills/flowstudio-power-automate-debug/SKILL.md new file mode 100644 index 000000000..8a9fdf6d4 --- /dev/null +++ b/skills/flowstudio-power-automate-debug/SKILL.md @@ -0,0 +1,316 @@ +--- +name: flowstudio-power-automate-debug +description: >- + Debug failing Power Automate cloud flows using the FlowStudio MCP server. + Load this skill when asked to: debug a flow, investigate a failed run, why is + this flow failing, inspect action outputs, find the root cause of a flow error, + fix a broken Power Automate flow, diagnose a timeout, trace a DynamicOperationRequestFailure, + check connector auth errors, read error details from a run, or troubleshoot + expression failures. Requires a FlowStudio MCP subscription — see https://mcp.flowstudio.app +--- + +# Power Automate Debugging with FlowStudio MCP + +A step-by-step diagnostic process for investigating failing Power Automate +cloud flows through the FlowStudio MCP server. + +**Prerequisite**: A FlowStudio MCP server must be reachable with a valid JWT. +See the `power-automate-mcp` skill for connection setup. +Subscribe at https://mcp.flowstudio.app + +--- + +## Source of Truth + +> **Always call `tools/list` first** to confirm available tool names and their +> parameter schemas. Tool names and parameters may change between server versions. +> This skill covers response shapes, behavioral notes, and diagnostic patterns — +> things `tools/list` cannot tell you. If this document disagrees with `tools/list` +> or a real API response, the API wins. + +--- + +## Python Helper + +```python +import json, urllib.request + +MCP_URL = "https://mcp.flowstudio.app/mcp" +MCP_TOKEN = "" + +def mcp(tool, **kwargs): + payload = json.dumps({"jsonrpc": "2.0", "id": 1, "method": "tools/call", + "params": {"name": tool, "arguments": kwargs}}).encode() + req = urllib.request.Request(MCP_URL, data=payload, + headers={"x-api-key": MCP_TOKEN, "Content-Type": "application/json", + "User-Agent": "FlowStudio-MCP/1.0"}) + try: + resp = urllib.request.urlopen(req, timeout=120) + except urllib.error.HTTPError as e: + body = e.read().decode("utf-8", errors="replace") + raise RuntimeError(f"MCP HTTP {e.code}: {body[:200]}") from e + raw = json.loads(resp.read()) + if "error" in raw: + raise RuntimeError(f"MCP error: {json.dumps(raw['error'])}") + return json.loads(raw["result"]["content"][0]["text"]) + +ENV = "" # e.g. Default-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +``` + +--- + +## FlowStudio for Teams: Fast-Path Diagnosis (Skip Steps 2–4) + +If you have a FlowStudio for Teams subscription, `get_store_flow_errors` +returns per-run failure data including action names and remediation hints +in a single call — no need to walk through live API steps. + +```python +# Quick failure summary +summary = mcp("get_store_flow_summary", environmentName=ENV, flowName=FLOW_ID) +# {"totalRuns": 100, "failRuns": 10, "failRate": 0.1, +# "averageDurationSeconds": 29.4, "maxDurationSeconds": 158.9, +# "firstFailRunRemediation": ""} +print(f"Fail rate: {summary['failRate']:.0%} over {summary['totalRuns']} runs") + +# Per-run error details (requires active monitoring to be configured) +errors = mcp("get_store_flow_errors", environmentName=ENV, flowName=FLOW_ID) +if errors: + for r in errors[:3]: + print(r["startTime"], "|", r.get("failedActions"), "|", r.get("remediationHint")) + # If errors confirms the failing action → jump to Step 6 (apply fix) +else: + # Store doesn't have run-level detail for this flow — use live tools (Steps 2–5) + pass +``` + +For the full governance record (description, complexity, tier, connector list): +```python +record = mcp("get_store_flow", environmentName=ENV, flowName=FLOW_ID) +# {"displayName": "My Flow", "state": "Started", +# "runPeriodTotal": 100, "runPeriodFailRate": 0.1, "runPeriodFails": 10, +# "runPeriodDurationAverage": 29410.8, ← milliseconds +# "runError": "{\"code\": \"EACCES\", ...}", ← JSON string, parse it +# "description": "...", "tier": "Premium", "complexity": "{...}"} +if record.get("runError"): + last_err = json.loads(record["runError"]) + print("Last run error:", last_err) +``` + +--- + +## Step 1 — Locate the Flow + +```python +result = mcp("list_live_flows", environmentName=ENV) +# Returns a wrapper object: {mode, flows, totalCount, error} +target = next(f for f in result["flows"] if "My Flow Name" in f["displayName"]) +FLOW_ID = target["id"] # plain UUID — use directly as flowName +print(FLOW_ID) +``` + +--- + +## Step 2 — Find the Failing Run + +```python +runs = mcp("get_live_flow_runs", environmentName=ENV, flowName=FLOW_ID, top=5) +# Returns direct array (newest first): +# [{"name": "08584296068667933411438594643CU15", +# "status": "Failed", +# "startTime": "2026-02-25T06:13:38.6910688Z", +# "endTime": "2026-02-25T06:15:24.1995008Z", +# "triggerName": "manual", +# "error": {"code": "ActionFailed", "message": "An action failed..."}}, +# {"name": "...", "status": "Succeeded", "error": null, ...}] + +for r in runs: + print(r["name"], r["status"], r["startTime"]) + +RUN_ID = next(r["name"] for r in runs if r["status"] == "Failed") +``` + +--- + +## Step 3 — Get the Top-Level Error + +```python +err = mcp("get_live_flow_run_error", + environmentName=ENV, flowName=FLOW_ID, runName=RUN_ID) +# Returns: +# { +# "runName": "08584296068667933411438594643CU15", +# "failedActions": [ +# {"actionName": "Apply_to_each_prepare_workers", "status": "Failed", +# "error": {"code": "ActionFailed", "message": "An action failed..."}, +# "startTime": "...", "endTime": "..."}, +# {"actionName": "HTTP_find_AD_User_by_Name", "status": "Failed", +# "code": "NotSpecified", "startTime": "...", "endTime": "..."} +# ], +# "allActions": [ +# {"actionName": "Apply_to_each", "status": "Skipped"}, +# {"actionName": "Compose_WeekEnd", "status": "Succeeded"}, +# ... +# ] +# } + +# failedActions is ordered outer-to-inner. The ROOT cause is the LAST entry: +root = err["failedActions"][-1] +print(f"Root action: {root['actionName']} → code: {root.get('code')}") + +# allActions shows every action's status — useful for spotting what was Skipped +# See common-errors.md to decode the error code. +``` + +--- + +## Step 4 — Read the Flow Definition + +```python +defn = mcp("get_live_flow", environmentName=ENV, flowName=FLOW_ID) +actions = defn["properties"]["definition"]["actions"] +print(list(actions.keys())) +``` + +Find the failing action in the definition. Inspect its `inputs` expression +to understand what data it expects. + +--- + +## Step 5 — Inspect Action Outputs (Walk Back from Failure) + +For each action **leading up to** the failure, inspect its runtime output: + +```python +for action_name in ["Compose_WeekEnd", "HTTP_Get_Data", "Parse_JSON"]: + out = mcp("get_live_flow_run_action_outputs", + environmentName=ENV, + flowName=FLOW_ID, + runName=RUN_ID, + actionName=action_name) + # Check status + outputs + print(action_name, out.get("status")) + print(json.dumps(out.get("outputs", {}), indent=2)[:500]) +``` + +> ⚠️ Output payloads from array-processing actions can be very large. +> Always slice (e.g. `[:500]`) before printing. + +--- + +## Step 6 — Pinpoint the Root Cause + +### Expression Errors (e.g. `split` on null) +If the error mentions `InvalidTemplate` or a function name: +1. Find the action in the definition +2. Check what upstream action/expression it reads +3. Inspect that upstream action's output for null / missing fields + +```python +# Example: action uses split(item()?['Name'], ' ') +# → null Name in the source data +outputs = mcp("get_live_flow_run_action_outputs", ..., actionName="Compose_Names") +names = outputs["outputs"]["body"] # check for nulls in the body array +nulls = [x for x in names if x.get("Name") is None] +print(f"{len(nulls)} records with null Name") +``` + +### Wrong Field Path +Expression `triggerBody()?['fieldName']` returns null → `fieldName` is wrong. +Check the trigger output shape with: +```python +mcp("get_live_flow_run_action_outputs", ..., actionName="") +``` + +### Connection / Auth Failures +Look for `ConnectionAuthorizationFailed` — the connection owner must match the +service account running the flow. Cannot fix via API; fix in PA designer. + +--- + +## Step 7 — Apply the Fix + +**For expression/data issues**: +```python +defn = mcp("get_live_flow", environmentName=ENV, flowName=FLOW_ID) +acts = defn["properties"]["definition"]["actions"] + +# Example: fix split on potentially-null Name +acts["Compose_Names"]["inputs"] = \ + "@coalesce(item()?['Name'], 'Unknown')" + +conn_refs = defn["properties"]["connectionReferences"] +result = mcp("update_live_flow", + environmentName=ENV, + flowName=FLOW_ID, + definition=defn["properties"]["definition"], + connectionReferences=conn_refs) + +print(result.get("error")) # None = success +``` + +> ⚠️ `update_live_flow` always returns an `error` key. +> A value of `null` (Python `None`) means success. + +--- + +## Step 8 — Verify the Fix + +```python +# Resubmit the failed run +resubmit = mcp("resubmit_live_flow_run", + environmentName=ENV, flowName=FLOW_ID, runName=RUN_ID) +print(resubmit) + +# Wait ~30 s then check +import time; time.sleep(30) +new_runs = mcp("get_live_flow_runs", environmentName=ENV, flowName=FLOW_ID, top=3) +print(new_runs[0]["status"]) # Succeeded = done +``` + +### Testing HTTP-Triggered Flows + +For flows with a `Request` (HTTP) trigger, use `trigger_live_flow` instead +of `resubmit_live_flow_run` to test with custom payloads: + +```python +# First inspect what the trigger expects +schema = mcp("get_live_flow_http_schema", + environmentName=ENV, flowName=FLOW_ID) +print("Expected body schema:", schema.get("triggerSchema")) +print("Response schemas:", schema.get("responseSchemas")) + +# Trigger with a test payload +result = mcp("trigger_live_flow", + environmentName=ENV, + flowName=FLOW_ID, + body={"name": "Test User", "value": 42}) +print(f"Status: {result['status']}, Body: {result.get('body')}") +``` + +> `trigger_live_flow` handles AAD-authenticated triggers automatically. +> Only works for flows with a `Request` (HTTP) trigger type. + +--- + +## Quick-Reference Diagnostic Decision Tree + +| Symptom | First Tool to Call | What to Look For | +|---|---|---| +| Flow shows as Failed | `get_live_flow_run_error` | `failedActions[-1]["actionName"]` = root cause | +| Expression crash | `get_live_flow_run_action_outputs` on prior action | null / wrong-type fields in output body | +| Flow never starts | `get_live_flow` | check `properties.state` = "Started" | +| Action returns wrong data | `get_live_flow_run_action_outputs` | actual output body vs expected | +| Fix applied but still fails | `get_live_flow_runs` after resubmit | new run `status` field | + +--- + +## Reference Files + +- [common-errors.md](references/common-errors.md) — Error codes, likely causes, and fixes +- [debug-workflow.md](references/debug-workflow.md) — Full decision tree for complex failures + +## Related Skills + +- `power-automate-mcp` — Core connection setup and operation reference +- `power-automate-build` — Build and deploy new flows diff --git a/skills/flowstudio-power-automate-debug/references/common-errors.md b/skills/flowstudio-power-automate-debug/references/common-errors.md new file mode 100644 index 000000000..bd879b4fe --- /dev/null +++ b/skills/flowstudio-power-automate-debug/references/common-errors.md @@ -0,0 +1,188 @@ +# FlowStudio MCP — Common Power Automate Errors + +Reference for error codes, likely causes, and recommended fixes when debugging +Power Automate flows via the FlowStudio MCP server. + +--- + +## Expression / Template Errors + +### `InvalidTemplate` — Function Applied to Null + +**Full message pattern**: `"Unable to process template language expressions... function 'split' expects its first argument 'text' to be of type string"` + +**Root cause**: An expression like `@split(item()?['Name'], ' ')` received a null value. + +**Diagnosis**: +1. Note the action name in the error message +2. Call `get_live_flow_run_action_outputs` on the action that produces the array +3. Find items where `Name` (or the referenced field) is `null` + +**Fixes**: +``` +Before: @split(item()?['Name'], ' ') +After: @split(coalesce(item()?['Name'], ''), ' ') + +Or guard the whole foreach body with a condition: + expression: "@not(empty(item()?['Name']))" +``` + +--- + +### `InvalidTemplate` — Wrong Expression Path + +**Full message pattern**: `"Unable to process template language expressions... 'triggerBody()?['FieldName']' is of type 'Null'"` + +**Root cause**: The field name in the expression doesn't match the actual payload schema. + +**Diagnosis**: +```python +# Check trigger output shape +mcp("get_live_flow_run_action_outputs", + environmentName=ENV, flowName=FLOW_ID, runName=RUN_ID, + actionName="") +# Compare actual keys vs expression +``` + +**Fix**: Update expression to use the correct key name. Common mismatches: +- `triggerBody()?['body']` vs `triggerBody()?['Body']` (case-sensitive) +- `triggerBody()?['Subject']` vs `triggerOutputs()?['body/Subject']` + +--- + +### `InvalidTemplate` — Type Mismatch + +**Full message pattern**: `"... expected type 'Array' but got type 'Object'"` + +**Root cause**: Passing an object where the expression expects an array (e.g. a single item HTTP response vs a list response). + +**Fix**: +``` +Before: @outputs('HTTP')?['body'] +After: @outputs('HTTP')?['body/value'] ← for OData list responses + @createArray(outputs('HTTP')?['body']) ← wrap single object in array +``` + +--- + +## Connection / Auth Errors + +### `ConnectionAuthorizationFailed` + +**Full message**: `"The API connection ... is not authorized."` + +**Root cause**: The connection referenced in the flow is owned by a different +user/service account than the one whose JWT is being used. + +**Diagnosis**: Check `properties.connectionReferences` — the `connectionName` GUID +identifies the owner. Cannot be fixed via API. + +**Fix options**: +1. Open flow in Power Automate designer → re-authenticate the connection +2. Use a connection owned by the service account whose token you hold +3. Share the connection with the service account in PA admin + +--- + +### `InvalidConnectionCredentials` + +**Root cause**: The underlying OAuth token for the connection has expired or +the user's credentials changed. + +**Fix**: Owner must sign in to Power Automate and refresh the connection. + +--- + +## HTTP Action Errors + +### `ActionFailed` — HTTP 4xx/5xx + +**Full message pattern**: `"An HTTP request to... failed with status code '400'"` + +**Diagnosis**: +```python +actions_out = mcp("get_live_flow_run_action_outputs", ..., actionName="HTTP_My_Call") +item = actions_out[0] # first entry in the returned array +print(item["outputs"]["statusCode"]) # 400, 401, 403, 500... +print(item["outputs"]["body"]) # error details from target API +``` + +**Common causes**: +- 401 — missing or expired auth header +- 403 — permission denied on target resource +- 404 — wrong URL / resource deleted +- 400 — malformed JSON body (check expression that builds the body) + +--- + +### `ActionFailed` — HTTP Timeout + +**Root cause**: Target endpoint did not respond within the connector's timeout +(default 90 s for HTTP action). + +**Fix**: Add retry policy to the HTTP action, or split the payload into smaller +batches to reduce per-request processing time. + +--- + +## Control Flow Errors + +### `ActionSkipped` Instead of Running + +**Root cause**: The `runAfter` condition wasn't met. E.g. an action set to +`runAfter: { "Prev": ["Succeeded"] }` won't run if `Prev` failed or was skipped. + +**Diagnosis**: Check the preceding action's status. Deliberately skipped +(e.g. inside a false branch) is intentional — unexpected skip is a logic gap. + +**Fix**: Add `"Failed"` or `"Skipped"` to the `runAfter` status array if the +action should run on those outcomes too. + +--- + +### Foreach Runs in Wrong Order / Race Condition + +**Root cause**: `Foreach` without `operationOptions: "Sequential"` runs +iterations in parallel, causing write conflicts or undefined ordering. + +**Fix**: Add `"operationOptions": "Sequential"` to the Foreach action. + +--- + +## Update / Deploy Errors + +### `update_live_flow` Returns No-Op + +**Symptom**: `result["updated"]` is empty list or `result["created"]` is empty. + +**Likely cause**: Passing wrong parameter name. The required key is `definition` +(object), not `flowDefinition` or `body`. + +--- + +### `update_live_flow` — `"Supply connectionReferences"` + +**Root cause**: The definition contains `OpenApiConnection` or +`OpenApiConnectionWebhook` actions but `connectionReferences` was not passed. + +**Fix**: Fetch the existing connection references with `get_live_flow` and pass +them as the `connectionReferences` argument. + +--- + +## Data Logic Errors + +### `union()` Overriding Correct Records with Nulls + +**Symptom**: After merging two arrays, some records have null fields that existed +in one of the source arrays. + +**Root cause**: `union(old_data, new_data)` — `union()` first-wins, so old_data +values override new_data for matching records. + +**Fix**: Swap argument order: `union(new_data, old_data)` + +``` +Before: @sort(union(outputs('Old_Array'), body('New_Array')), 'Date') +After: @sort(union(body('New_Array'), outputs('Old_Array')), 'Date') +``` diff --git a/skills/flowstudio-power-automate-debug/references/debug-workflow.md b/skills/flowstudio-power-automate-debug/references/debug-workflow.md new file mode 100644 index 000000000..c28d86d1d --- /dev/null +++ b/skills/flowstudio-power-automate-debug/references/debug-workflow.md @@ -0,0 +1,157 @@ +# FlowStudio MCP — Debug Workflow + +End-to-end decision tree for diagnosing Power Automate flow failures. + +--- + +## Top-Level Decision Tree + +``` +Flow is failing +│ +├── Flow never starts / no runs appear +│ └── ► Check flow State: get_live_flow → properties.state +│ ├── "Stopped" → flow is disabled; enable in PA designer +│ └── "Started" + no runs → trigger condition not met (check trigger config) +│ +├── Flow run shows "Failed" +│ ├── Step A: get_live_flow_run_error → read error.code + error.message +│ │ +│ ├── error.code = "InvalidTemplate" +│ │ └── ► Expression error (null value, wrong type, bad path) +│ │ └── See: Expression Error Workflow below +│ │ +│ ├── error.code = "ConnectionAuthorizationFailed" +│ │ └── ► Connection owned by different user; fix in PA designer +│ │ +│ ├── error.code = "ActionFailed" + message mentions HTTP +│ │ └── ► See: HTTP Action Workflow below +│ │ +│ └── Unknown / generic error +│ └── ► Walk actions backwards (Step B below) +│ +└── Flow Succeeds but output is wrong + └── ► Inspect intermediate actions with get_live_flow_run_action_outputs + └── See: Data Quality Workflow below +``` + +--- + +## Expression Error Workflow + +``` +InvalidTemplate error +│ +├── 1. Read error.message — identifies the action name and function +│ +├── 2. Get flow definition: get_live_flow +│ └── Find that action in definition["actions"][action_name]["inputs"] +│ └── Identify what upstream value the expression reads +│ +├── 3. get_live_flow_run_action_outputs for the action BEFORE the failing one +│ └── Look for null / wrong type in that action's output +│ ├── Null string field → wrap with coalesce(): @coalesce(field, '') +│ ├── Null object → add empty check condition before the action +│ └── Wrong field name → correct the key (case-sensitive) +│ +└── 4. Apply fix with update_live_flow, then resubmit +``` + +--- + +## HTTP Action Workflow + +``` +ActionFailed on HTTP action +│ +├── 1. get_live_flow_run_action_outputs on the HTTP action +│ └── Read: outputs.statusCode, outputs.body +│ +├── statusCode = 401 +│ └── ► Auth header missing or expired OAuth token +│ Check: action inputs.authentication block +│ +├── statusCode = 403 +│ └── ► Insufficient permission on target resource +│ Check: service principal / user has access +│ +├── statusCode = 400 +│ └── ► Malformed request body +│ Check: action inputs.body expression; parse errors often in nested JSON +│ +├── statusCode = 404 +│ └── ► Wrong URL or resource deleted/renamed +│ Check: action inputs.uri expression +│ +└── statusCode = 500 / timeout + └── ► Target system error; retry policy may help + Add: "retryPolicy": {"type": "Fixed", "count": 3, "interval": "PT10S"} +``` + +--- + +## Data Quality Workflow + +``` +Flow succeeds but output data is wrong +│ +├── 1. Identify the first "wrong" output — which action produces it? +│ +├── 2. get_live_flow_run_action_outputs on that action +│ └── Compare actual output body vs expected +│ +├── Source array has nulls / unexpected values +│ ├── Check the trigger data — get_live_flow_run_action_outputs on trigger +│ └── Trace forward action by action until the value corrupts +│ +├── Merge/union has wrong values +│ └── Check union argument order: +│ union(NEW, old) = new wins ✓ +│ union(OLD, new) = old wins ← common bug +│ +├── Foreach output missing items +│ ├── Check foreach condition — filter may be too strict +│ └── Check if parallel foreach caused race condition (add Sequential) +│ +└── Date/time values wrong timezone + └── Use convertTimeZone() — utcNow() is always UTC +``` + +--- + +## Walk-Back Analysis (Unknown Failure) + +When the error message doesn't clearly name a root cause: + +```python +# 1. Get all action names from definition +defn = mcp("get_live_flow", environmentName=ENV, flowName=FLOW_ID) +actions = list(defn["properties"]["definition"]["actions"].keys()) + +# 2. Check status of each action in the failed run +for action in actions: + actions_out = mcp("get_live_flow_run_action_outputs", + environmentName=ENV, flowName=FLOW_ID, runName=RUN_ID, + actionName=action) + # Returns an array of action objects + item = actions_out[0] if actions_out else {} + status = item.get("status", "unknown") + print(f"{action}: {status}") + +# 3. Find the boundary between Succeeded and Failed/Skipped +# The first Failed action is likely the root cause (unless skipped by design) +``` + +Actions inside Foreach / Condition branches may appear nested — +check the parent action first to confirm the branch ran at all. + +--- + +## Post-Fix Verification Checklist + +1. `update_live_flow` returns `error: null` — definition accepted +2. `resubmit_live_flow_run` confirms new run started +3. Wait for run completion (poll `get_live_flow_runs` every 15 s) +4. Confirm new run `status = "Succeeded"` +5. If flow has downstream consumers (child flows, emails, SharePoint writes), + spot-check those too From 37c246d359a7428e1988ea77784eac7f3d90e15d Mon Sep 17 00:00:00 2001 From: Catherine Han Date: Fri, 6 Mar 2026 16:31:37 +1100 Subject: [PATCH 2/3] =?UTF-8?q?fix:=20address=20all=20review=20comments=20?= =?UTF-8?q?=E2=80=94=20README,=20cross-refs,=20response=20shapes,=20step?= =?UTF-8?q?=20numbering?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add skills to docs/README.skills.md (fixes validate-readme CI check) - Update cross-skill references to use flowstudio- prefix (#1, #4, #7, #9) - Fix get_live_flow_run_action_outputs: returns array, index [0] (#2, #3) - Renumber Step 6→5, Step 7→6 — remove gap in build workflow (#8) - Fix connectionName note: it's the key, not the GUID (#10) - Remove invalid arrow function from Filter array expression (#11) --- docs/README.skills.md | 2 ++ .../flowstudio-power-automate-build/SKILL.md | 10 +++++----- .../references/action-patterns-core.md | 2 +- .../references/action-patterns-data.md | 3 ++- .../flowstudio-power-automate-debug/SKILL.md | 20 ++++++++++++------- 5 files changed, 23 insertions(+), 14 deletions(-) diff --git a/docs/README.skills.md b/docs/README.skills.md index ef0c81246..11918602a 100644 --- a/docs/README.skills.md +++ b/docs/README.skills.md @@ -112,6 +112,8 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to | [finalize-agent-prompt](../skills/finalize-agent-prompt/SKILL.md) | Finalize prompt file using the role of an AI agent to polish the prompt for the end user. | None | | [finnish-humanizer](../skills/finnish-humanizer/SKILL.md) | Detect and remove AI-generated markers from Finnish text, making it sound like a native Finnish speaker wrote it. Use when asked to "humanize", "naturalize", or "remove AI feel" from Finnish text, or when editing .md/.txt files containing Finnish content. Identifies 26 patterns (12 Finnish-specific + 14 universal) and 4 style markers. | `references/patterns.md` | | [first-ask](../skills/first-ask/SKILL.md) | Interactive, input-tool powered, task refinement workflow: interrogates scope, deliverables, constraints before carrying out the task; Requires the Joyride extension. | None | +| [flowstudio-power-automate-build](../skills/flowstudio-power-automate-build/SKILL.md) | Build, scaffold, and deploy Power Automate cloud flows using the FlowStudio MCP server. Load this skill when asked to: create a flow, build a new flow, deploy a flow definition, scaffold a Power Automate workflow, construct a flow JSON, update an existing flow's actions, patch a flow definition, add actions to a flow, wire up connections, or generate a workflow definition from scratch. Requires a FlowStudio MCP subscription — see https://mcp.flowstudio.app | `references/action-patterns-connectors.md`
`references/action-patterns-core.md`
`references/action-patterns-data.md`
`references/build-patterns.md`
`references/flow-schema.md`
`references/trigger-types.md` | +| [flowstudio-power-automate-debug](../skills/flowstudio-power-automate-debug/SKILL.md) | Debug failing Power Automate cloud flows using the FlowStudio MCP server. Load this skill when asked to: debug a flow, investigate a failed run, why is this flow failing, inspect action outputs, find the root cause of a flow error, fix a broken Power Automate flow, diagnose a timeout, trace a DynamicOperationRequestFailure, check connector auth errors, read error details from a run, or troubleshoot expression failures. Requires a FlowStudio MCP subscription — see https://mcp.flowstudio.app | `references/common-errors.md`
`references/debug-workflow.md` | | [flowstudio-power-automate-mcp](../skills/flowstudio-power-automate-mcp/SKILL.md) | Connect to and operate Power Automate cloud flows via a FlowStudio MCP server. Use when asked to: list flows, read a flow definition, check run history, inspect action outputs, resubmit a run, cancel a running flow, view connections, get a trigger URL, validate a definition, monitor flow health, or any task that requires talking to the Power Automate API through an MCP tool. Also use for Power Platform environment discovery and connection management. Requires a FlowStudio MCP subscription or compatible server — see https://mcp.flowstudio.app | `references/MCP-BOOTSTRAP.md`
`references/action-types.md`
`references/connection-references.md`
`references/tool-reference.md` | | [fluentui-blazor](../skills/fluentui-blazor/SKILL.md) | Guide for using the Microsoft Fluent UI Blazor component library (Microsoft.FluentUI.AspNetCore.Components NuGet package) in Blazor applications. Use this when the user is building a Blazor app with Fluent UI components, setting up the library, using FluentUI components like FluentButton, FluentDataGrid, FluentDialog, FluentToast, FluentNavMenu, FluentTextField, FluentSelect, FluentAutocomplete, FluentDesignTheme, or any component prefixed with "Fluent". Also use when troubleshooting missing providers, JS interop issues, or theming. | `references/DATAGRID.md`
`references/LAYOUT-AND-NAVIGATION.md`
`references/SETUP.md`
`references/THEMING.md` | | [folder-structure-blueprint-generator](../skills/folder-structure-blueprint-generator/SKILL.md) | Comprehensive technology-agnostic prompt for analyzing and documenting project folder structures. Auto-detects project types (.NET, Java, React, Angular, Python, Node.js, Flutter), generates detailed blueprints with visualization options, naming conventions, file placement patterns, and extension templates for maintaining consistent code organization across diverse technology stacks. | None | diff --git a/skills/flowstudio-power-automate-build/SKILL.md b/skills/flowstudio-power-automate-build/SKILL.md index 86da0a7ba..251121186 100644 --- a/skills/flowstudio-power-automate-build/SKILL.md +++ b/skills/flowstudio-power-automate-build/SKILL.md @@ -15,7 +15,7 @@ Step-by-step guide for constructing and deploying Power Automate cloud flows programmatically through the FlowStudio MCP server. **Prerequisite**: A FlowStudio MCP server must be reachable with a valid JWT. -See the `power-automate-mcp` skill for connection setup. +See the `flowstudio-power-automate-mcp` skill for connection setup. Subscribe at https://mcp.flowstudio.app --- @@ -271,7 +271,7 @@ else: --- -## Step 6 — Verify the Deployment +## Step 5 — Verify the Deployment ```python check = mcp("get_live_flow", environmentName=ENV, flowName=FLOW_ID) @@ -286,7 +286,7 @@ print("Actions:", list(acts.keys())) --- -## Step 7 — Test the Flow +## Step 6 — Test the Flow > **MANDATORY**: Before triggering any test run, **ask the user for confirmation**. > Running a flow has real side effects — it may send emails, post Teams messages, @@ -456,5 +456,5 @@ The `body/recipient` parameter format depends on the `location` value: ## Related Skills -- `power-automate-mcp` — Core connection setup and tool reference -- `power-automate-debug` — Debug failing flows after deployment +- `flowstudio-power-automate-mcp` — Core connection setup and tool reference +- `flowstudio-power-automate-debug` — Debug failing flows after deployment diff --git a/skills/flowstudio-power-automate-build/references/action-patterns-core.md b/skills/flowstudio-power-automate-build/references/action-patterns-core.md index 6f561250b..74221ba8d 100644 --- a/skills/flowstudio-power-automate-build/references/action-patterns-core.md +++ b/skills/flowstudio-power-automate-build/references/action-patterns-core.md @@ -467,7 +467,7 @@ Date today: @utcNow() Formatted date: @formatDateTime(utcNow(), 'dd/MM/yyyy') Add days: @addDays(utcNow(), 7) Array length: @length(variables('myArray')) -Filter array: @array(filter(outputs('Get_Items')?['body/value'], item => equals(item?['Status'], 'Active'))) +Filter array: Use the "Filter array" action (no inline filter expression exists in PA) Union (new wins): @union(body('New_Data'), outputs('Old_Data')) Sort: @sort(variables('myArray'), 'Date') Unix timestamp → date: @formatDateTime(addseconds('1970-1-1', triggerBody()?['created']), 'yyyy-MM-dd') diff --git a/skills/flowstudio-power-automate-build/references/action-patterns-data.md b/skills/flowstudio-power-automate-build/references/action-patterns-data.md index 53972c464..d1c652f2c 100644 --- a/skills/flowstudio-power-automate-build/references/action-patterns-data.md +++ b/skills/flowstudio-power-automate-build/references/action-patterns-data.md @@ -3,7 +3,8 @@ Array operations, HTTP calls, parsing, and data transformation patterns. > All examples assume `"runAfter"` is set appropriately. -> Replace `` with the GUID from `connectionReferences`. +> `` is the **key** in `connectionReferences` (e.g. `shared_sharepointonline`), not the GUID. +> The GUID goes in the map value's `connectionName` property. --- diff --git a/skills/flowstudio-power-automate-debug/SKILL.md b/skills/flowstudio-power-automate-debug/SKILL.md index 8a9fdf6d4..964ca3490 100644 --- a/skills/flowstudio-power-automate-debug/SKILL.md +++ b/skills/flowstudio-power-automate-debug/SKILL.md @@ -15,7 +15,7 @@ A step-by-step diagnostic process for investigating failing Power Automate cloud flows through the FlowStudio MCP server. **Prerequisite**: A FlowStudio MCP server must be reachable with a valid JWT. -See the `power-automate-mcp` skill for connection setup. +See the `flowstudio-power-automate-mcp` skill for connection setup. Subscribe at https://mcp.flowstudio.app --- @@ -183,12 +183,13 @@ For each action **leading up to** the failure, inspect its runtime output: ```python for action_name in ["Compose_WeekEnd", "HTTP_Get_Data", "Parse_JSON"]: - out = mcp("get_live_flow_run_action_outputs", + result = mcp("get_live_flow_run_action_outputs", environmentName=ENV, flowName=FLOW_ID, runName=RUN_ID, actionName=action_name) - # Check status + outputs + # Returns an array — single-element when actionName is provided + out = result[0] if result else {} print(action_name, out.get("status")) print(json.dumps(out.get("outputs", {}), indent=2)[:500]) ``` @@ -209,8 +210,13 @@ If the error mentions `InvalidTemplate` or a function name: ```python # Example: action uses split(item()?['Name'], ' ') # → null Name in the source data -outputs = mcp("get_live_flow_run_action_outputs", ..., actionName="Compose_Names") -names = outputs["outputs"]["body"] # check for nulls in the body array +result = mcp("get_live_flow_run_action_outputs", ..., actionName="Compose_Names") +# Returns a single-element array; index [0] to get the action object +if not result: + print("No outputs returned for Compose_Names") + names = [] +else: + names = result[0].get("outputs", {}).get("body") or [] nulls = [x for x in names if x.get("Name") is None] print(f"{len(nulls)} records with null Name") ``` @@ -312,5 +318,5 @@ print(f"Status: {result['status']}, Body: {result.get('body')}") ## Related Skills -- `power-automate-mcp` — Core connection setup and operation reference -- `power-automate-build` — Build and deploy new flows +- `flowstudio-power-automate-mcp` — Core connection setup and operation reference +- `flowstudio-power-automate-build` — Build and deploy new flows From b09dab7127feb9d6372367873343c85242d21b7a Mon Sep 17 00:00:00 2001 From: Catherine Han Date: Sat, 7 Mar 2026 14:10:44 +1100 Subject: [PATCH 3/3] feat: add flowstudio-power-automate plugin bundling all 3 skills Plugin bundles: - flowstudio-power-automate-mcp (core connection & CRUD) - flowstudio-power-automate-debug (debug failed runs) - flowstudio-power-automate-build (build & deploy flows) Install: copilot plugin install flowstudio-power-automate@awesome-copilot Per @aaronpowell's suggestion in review. --- .github/plugin/marketplace.json | 6 +++ docs/README.plugins.md | 1 + .../.github/plugin/plugin.json | 24 ++++++++++++ plugins/flowstudio-power-automate/README.md | 37 +++++++++++++++++++ 4 files changed, 68 insertions(+) create mode 100644 plugins/flowstudio-power-automate/.github/plugin/plugin.json create mode 100644 plugins/flowstudio-power-automate/README.md diff --git a/.github/plugin/marketplace.json b/.github/plugin/marketplace.json index c89b38a28..cf4d6a8ed 100644 --- a/.github/plugin/marketplace.json +++ b/.github/plugin/marketplace.json @@ -113,6 +113,12 @@ "description": "Task Researcher and Task Planner for intermediate to expert users and large codebases - Brought to you by microsoft/edge-ai", "version": "1.0.0" }, + { + "name": "flowstudio-power-automate", + "source": "flowstudio-power-automate", + "description": "Complete toolkit for managing Power Automate cloud flows via the FlowStudio MCP server. Includes skills for connecting to the MCP server, debugging failed flow runs, and building/deploying flows from natural language.", + "version": "1.0.0" + }, { "name": "frontend-web-dev", "source": "frontend-web-dev", diff --git a/docs/README.plugins.md b/docs/README.plugins.md index dd3afb0be..2962f90e0 100644 --- a/docs/README.plugins.md +++ b/docs/README.plugins.md @@ -33,6 +33,7 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-plugins) for guidelines on how t | [dataverse-sdk-for-python](../plugins/dataverse-sdk-for-python/README.md) | Comprehensive collection for building production-ready Python integrations with Microsoft Dataverse. Includes official documentation, best practices, advanced features, file operations, and code generation prompts. | 4 items | dataverse, python, integration, sdk | | [devops-oncall](../plugins/devops-oncall/README.md) | A focused set of prompts, instructions, and a chat mode to help triage incidents and respond quickly with DevOps tools and Azure resources. | 3 items | devops, incident-response, oncall, azure | | [edge-ai-tasks](../plugins/edge-ai-tasks/README.md) | Task Researcher and Task Planner for intermediate to expert users and large codebases - Brought to you by microsoft/edge-ai | 2 items | architecture, planning, research, tasks, implementation | +| [flowstudio-power-automate](../plugins/flowstudio-power-automate/README.md) | Complete toolkit for managing Power Automate cloud flows via the FlowStudio MCP server. Includes skills for connecting to the MCP server, debugging failed flow runs, and building/deploying flows from natural language. | 3 items | power-automate, power-platform, flowstudio, mcp, model-context-protocol, cloud-flows, workflow-automation | | [frontend-web-dev](../plugins/frontend-web-dev/README.md) | Essential prompts, instructions, and chat modes for modern frontend web development including React, Angular, Vue, TypeScript, and CSS frameworks. | 4 items | frontend, web, react, typescript, javascript, css, html, angular, vue | | [gem-team](../plugins/gem-team/README.md) | A modular multi-agent team for complex project execution with DAG-based planning, parallel execution, TDD verification, and automated testing with energetic team lead. | 8 items | multi-agent, orchestration, dag-planning, parallel-execution, tdd, verification, automation, security, prd | | [go-mcp-development](../plugins/go-mcp-development/README.md) | Complete toolkit for building Model Context Protocol (MCP) servers in Go using the official github.com/modelcontextprotocol/go-sdk. Includes instructions for best practices, a prompt for generating servers, and an expert chat mode for guidance. | 2 items | go, golang, mcp, model-context-protocol, server-development, sdk | diff --git a/plugins/flowstudio-power-automate/.github/plugin/plugin.json b/plugins/flowstudio-power-automate/.github/plugin/plugin.json new file mode 100644 index 000000000..7c025d78d --- /dev/null +++ b/plugins/flowstudio-power-automate/.github/plugin/plugin.json @@ -0,0 +1,24 @@ +{ + "name": "flowstudio-power-automate", + "description": "Complete toolkit for managing Power Automate cloud flows via the FlowStudio MCP server. Includes skills for connecting to the MCP server, debugging failed flow runs, and building/deploying flows from natural language.", + "version": "1.0.0", + "author": { + "name": "Awesome Copilot Community" + }, + "repository": "https://github.com/github/awesome-copilot", + "license": "MIT", + "keywords": [ + "power-automate", + "power-platform", + "flowstudio", + "mcp", + "model-context-protocol", + "cloud-flows", + "workflow-automation" + ], + "skills": [ + "./skills/flowstudio-power-automate-mcp/", + "./skills/flowstudio-power-automate-debug/", + "./skills/flowstudio-power-automate-build/" + ] +} diff --git a/plugins/flowstudio-power-automate/README.md b/plugins/flowstudio-power-automate/README.md new file mode 100644 index 000000000..4924c6584 --- /dev/null +++ b/plugins/flowstudio-power-automate/README.md @@ -0,0 +1,37 @@ +# FlowStudio Power Automate Plugin + +Complete toolkit for managing Power Automate cloud flows via the FlowStudio MCP server. Connect, debug, and build/deploy flows using AI agents. + +Requires a FlowStudio MCP subscription — see https://flowstudio.app + +## Installation + +```bash +# Using Copilot CLI +copilot plugin install flowstudio-power-automate@awesome-copilot +``` + +## What's Included + +### Skills + +| Skill | Description | +|-------|-------------| +| `flowstudio-power-automate-mcp` | Core connection setup, tool discovery, and CRUD operations for Power Automate cloud flows via the FlowStudio MCP server. | +| `flowstudio-power-automate-debug` | Step-by-step diagnostic workflow for investigating and fixing failing Power Automate cloud flow runs. | +| `flowstudio-power-automate-build` | Build, scaffold, and deploy Power Automate cloud flows from natural language descriptions with bundled action pattern templates. | + +## Getting Started + +1. Install the plugin +2. Subscribe to FlowStudio MCP at https://flowstudio.app +3. Configure your MCP connection with the JWT from your workspace +4. Ask Copilot to list your flows, debug a failure, or build a new flow + +## Source + +This plugin is part of [Awesome Copilot](https://github.com/github/awesome-copilot), a community-driven collection of GitHub Copilot extensions. + +## License + +MIT