diff --git a/.github/AGENTS.md b/.github/AGENTS.md new file mode 100644 index 00000000000..9f701a7e33d --- /dev/null +++ b/.github/AGENTS.md @@ -0,0 +1,24 @@ +# Agent Instructions + +## Package Management + +This project uses **pnpm** exclusively for package management in the frontend (`invokeai/frontend/web/`). + +- ✅ Use `pnpm` commands (e.g., `pnpm install`, `pnpm run`) +- ❌ Never use `npm` or `yarn` commands +- ❌ Never suggest creating or using `package-lock.json` or `yarn.lock` +- ✅ The lock file is `pnpm-lock.yaml` + +Use the following pnpm commands for typical operations: + +- pnpm -C invokeai/frontend/web install +- pnpm -C invokeai/frontend/web build +- pnpm -C invokeai/frontend/web lint:tsc +- pnpm -C invokeai/frontend/web lint:dpdm +- pnpm -C invokeai/frontend/web lint:eslint +- pnpm -C invokeai/frontend/web lint:prettier + +## Project Structure + +- Backend: Python in `invokeai/` +- Frontend: TypeScript/React in `invokeai/frontend/web/` (uses pnpm) diff --git a/.github/workflows/frontend-checks.yml b/.github/workflows/frontend-checks.yml index 43bbfdec0ac..df767676441 100644 --- a/.github/workflows/frontend-checks.yml +++ b/.github/workflows/frontend-checks.yml @@ -41,6 +41,17 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Fail if package-lock.json is added/modified (pnpm only) + shell: bash + working-directory: . + run: | + set -euo pipefail + git fetch --no-tags --prune --depth=1 origin "${{ github.base_ref }}" + if git diff --name-only "origin/${{ github.base_ref }}...HEAD" | grep -E '(^|/)package-lock\.json$'; then + echo "::error::package-lock.json was added or modified. This repo uses pnpm only." + exit 1 + fi + - name: check for changed frontend files if: ${{ inputs.always_run != true }} id: changed-files diff --git a/docs-old/features/gallery.md b/docs-old/features/gallery.md index 1c12f59c7a7..eb246b83b95 100644 --- a/docs-old/features/gallery.md +++ b/docs-old/features/gallery.md @@ -34,6 +34,26 @@ The settings button opens a list of options. Below these two buttons, you'll see the Search Boards text entry area. You use this to search for specific boards by the name of the board. Next to it is the Add Board (+) button which lets you add new boards. Boards can be renamed by clicking on the name of the board under its thumbnail and typing in the new name. +### Virtual Boards by Date + +In addition to the regular user-created boards, the Gallery can show **virtual boards** that group your images automatically by their creation date. Virtual boards are not stored in the database — they are computed on the fly from existing image metadata, so enabling or disabling them never moves or modifies your images. + +#### Enabling Virtual Boards + +Open the boards settings popover (the gear icon next to the boards search field) and toggle **Show Virtual Boards**. A new collapsible **By Date** section then appears in the boards list, with one entry per day on which images were generated (e.g. `2026-03-18`). + +Each virtual board entry shows: + +- a cover thumbnail (the most recent image of that day) +- the number of generated **images** on that date +- the number of uploaded **assets** on that date + +Selecting a virtual board filters the gallery to show only the images from that day. Search, category filters (Images / Assets), starred-first sorting and sort direction all work the same way as on regular boards. + +!!! note "Read-only" + + Virtual boards are a view over your existing images. You cannot rename, delete or auto-assign to them, and images cannot be "moved into" a virtual board — they appear there automatically based on their creation date. To organize images permanently, use regular boards. + ### Board Thumbnail Menu Each board has a context menu (ctrl+click / right-click). diff --git a/docs/features/prompt-tools.md b/docs/features/prompt-tools.md new file mode 100644 index 00000000000..5b00bfa4956 --- /dev/null +++ b/docs/features/prompt-tools.md @@ -0,0 +1,50 @@ +# LLM Prompt Tools + +InvokeAI includes two built-in tools that use local language models to help you write better prompts. Both tools appear as small buttons in the top-right corner of the positive prompt area and are only visible when you have a compatible model installed. + +## Expand Prompt + +Takes your short prompt and expands it into a detailed, vivid description suitable for image generation. + +**How to use:** + +1. Type a brief prompt (e.g. "a cat in a garden") +2. Click the sparkle button in the prompt area +3. Select a Text LLM model from the dropdown +4. Click **Expand** +5. Your prompt is replaced with the expanded version + +**Compatible models:** Any HuggingFace model with a `ForCausalLM` architecture. Recommended options: + +| Model | Size | HuggingFace ID | +|-------|------|----------------| +| Qwen2.5 1.5B Instruct | ~3 GB | `Qwen/Qwen2.5-1.5B-Instruct` | +| Phi-3 Mini Instruct | ~7.5 GB | `microsoft/Phi-3-mini-4k-instruct` | +| TinyLlama Chat | ~2 GB | `TinyLlama/TinyLlama-1.1B-Chat-v1.0` | + +Install by pasting the HuggingFace ID into the Model Manager. The model is automatically detected as a **Text LLM** type. + +## Image to Prompt + +Upload an image and generate a descriptive prompt from it using a vision-language model. + +**How to use:** + +1. Click the image button in the prompt area +2. Select a LLaVA OneVision model from the dropdown +3. Click **Upload Image** and select an image +4. Click **Generate Prompt** +5. The generated description is set as your prompt + +**Compatible models:** LLaVA OneVision models (already supported by InvokeAI). + +## Undo + +Both tools overwrite your current prompt. You can undo this change: + +- Press **Ctrl+Z** (or **Cmd+Z** on macOS) in the prompt textarea within 30 seconds +- The undo state is cleared when you start typing manually + +## Workflow Node + +A **Text LLM** node is also available in the workflow editor for use in automated pipelines. It accepts a prompt string and model selection as inputs and outputs the expanded text as a string. diff --git a/docs/nodes/creatingNodePack.md b/docs/nodes/creatingNodePack.md new file mode 100644 index 00000000000..7dc7c8824ab --- /dev/null +++ b/docs/nodes/creatingNodePack.md @@ -0,0 +1,154 @@ +# Creating a Node Pack for the Custom Node Manager + +This guide explains how to structure your Git repository so it can be installed via InvokeAI's Custom Node Manager. + +## Repository Structure + +Your repository **is** the node pack. When a user installs it, the entire repo is cloned into the `nodes` directory. + +### Minimum Required Structure + +``` +my-node-pack/ +├── __init__.py # Required: imports your node classes +├── my_node.py # Your node implementation(s) +└── README.md # Recommended: describe your nodes +``` + +The `__init__.py` at the root is **mandatory**. Without it, the pack will not be loaded. + +### Recommended Structure + +``` +my-node-pack/ +├── __init__.py # Imports all node classes +├── requirements.txt # Python dependencies (user-installed) +├── README.md # Description, usage, examples +├── node_one.py # Node implementation +├── node_two.py # Node implementation +├── utils.py # Shared utilities +└── workflows/ # Optional: workflow files + ├── example_workflow.json + └── advanced_workflow.json +``` + +## The `__init__.py` File + +This file must import all invocation classes you want to register. Only classes imported here will be available in InvokeAI. + +```python +from .node_one import MyFirstInvocation +from .node_two import MySecondInvocation +``` + +If you have nodes in subdirectories: + +```python +from .nodes.image_tools import CropInvocation, ResizeInvocation +from .nodes.text_tools import ConcatInvocation +``` + +## Dependencies (`requirements.txt` or `pyproject.toml`) + +If your nodes require additional Python packages, list them in a `requirements.txt` (or `pyproject.toml`) at the repository root: + +``` +numpy>=1.24 +opencv-python>=4.8 +``` + +The Custom Node Manager **does not** install these dependencies automatically — auto-installing into the running InvokeAI environment risks pulling in incompatible versions and breaking the application. After install, the UI shows the user a toast telling them that manual installation is required, and your README should document the exact install command (e.g. `pip install -r requirements.txt` from inside an activated InvokeAI environment). + +**Important:** Avoid pinning versions too tightly. InvokeAI has its own dependencies, and version conflicts can cause issues. Use minimum version constraints (`>=`) where possible. + +## Including Workflows + +If your repository contains workflow `.json` files, they will be **automatically imported** into the user's workflow library during installation. + +### Workflow Detection + +The installer recursively scans your repository for `.json` files. A file is recognized as a workflow if it contains both `nodes` and `edges` keys at the top level. + +### Tagging + +Imported workflows are automatically tagged with `node-pack:` so users can filter for them in the workflow library. When the node pack is uninstalled, these workflows are also removed. + +### Workflow Format + +Workflows should follow the standard InvokeAI workflow format: + +```json +{ + "name": "My Example Workflow", + "author": "Your Name", + "description": "Demonstrates how to use MyFirstInvocation", + "version": "1.0.0", + "contact": "", + "tags": "example, my-node-pack", + "notes": "", + "meta": { + "version": "3.0.0", + "category": "user" + }, + "exposedFields": [], + "nodes": [...], + "edges": [...] +} +``` + +**Tip:** The easiest way to create a workflow file is to build the workflow in InvokeAI's workflow editor, then export it via **Save As** and copy the `.json` file into your repository. + +## Node Implementation + +Each node is a Python class decorated with `@invocation()`. Here's a minimal example: + +```python +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation +from invokeai.app.invocations.fields import InputField, OutputField +from invokeai.invocation_api import BaseInvocationOutput, invocation_output + + +@invocation_output("my_output") +class MyOutput(BaseInvocationOutput): + result: str = OutputField(description="The result") + + +@invocation( + "my_node", + title="My Node", + tags=["example", "custom"], + category="custom", + version="1.0.0", +) +class MyInvocation(BaseInvocation): + """Does something useful.""" + + input_text: str = InputField(default="", description="Input text") + + def invoke(self, context) -> MyOutput: + return MyOutput(result=f"Processed: {self.input_text}") +``` + +For full details on the invocation API, see the [Invocation API documentation](invocation-api.md). + +## Best Practices + +- **Use a descriptive repository name** — it becomes the pack name shown in the UI +- **Include a README.md** with description, screenshots, and usage instructions +- **Version your nodes** using semver in the `@invocation()` decorator +- **Don't include large binary files** in your repository (models, weights, etc.) +- **Test your nodes** by placing the repo in the `nodes` directory before publishing +- **Include example workflows** so users can get started quickly +- **Tag your GitHub repository** with `invokeai-node` for discoverability +- **Avoid name collisions** — choose unique invocation type strings (e.g. `my_pack_resize` instead of just `resize`) + +## Testing Your Pack + +Before publishing, verify your pack works with the Custom Node Manager: + +1. Create a Git repository with your node pack +2. Push it to GitHub (or any Git host) +3. In InvokeAI, go to the Nodes tab and install it via the Git URL +4. Verify your nodes appear in the workflow editor +5. Verify any included workflows are imported +6. Test uninstalling — nodes and workflows should be removed diff --git a/docs/nodes/customNodeManager.md b/docs/nodes/customNodeManager.md new file mode 100644 index 00000000000..76a4b804a95 --- /dev/null +++ b/docs/nodes/customNodeManager.md @@ -0,0 +1,78 @@ +# Custom Node Manager + +The Custom Node Manager allows you to install, manage, and remove community node packs directly from the InvokeAI UI — no manual file copying required. + +## Accessing the Node Manager + +Click the **Nodes** tab (circuit icon) in the left sidebar, between Models and Queue. + +## Installing a Node Pack + +1. Navigate to the **Nodes** tab +2. On the right panel, select the **Git Repository URL** tab +3. Paste the Git URL of the node pack (e.g. `https://github.com/user/my-node-pack.git`) +4. Click **Install** + +The installer will: + +- Clone the repository into your `nodes` directory +- Load the nodes immediately — no restart needed +- Import any workflow `.json` files found in the repository into your workflow library (tagged with `node-pack:` for easy filtering) + +The install progress and results are shown in the **Install Log** at the bottom of the panel. + +### Installing Python Dependencies + +The installer does **not** automatically run `pip install` for `requirements.txt` or `pyproject.toml`. Auto-installing dependencies into the running InvokeAI environment can pull in incompatible package versions and break the application. + +If a node pack ships a `requirements.txt` or `pyproject.toml`, you'll see a warning toast after installation. Install the dependencies yourself by following the instructions in the node pack's documentation (typically `pip install -r requirements.txt` from inside an activated InvokeAI environment, but check the pack's README first). After installing, click the **Reload** button so the new dependencies take effect. + +### Security Warning + +Custom nodes execute arbitrary Python code on your system. **Only install node packs from authors you trust.** Malicious nodes could harm your system or compromise your data. + +## Managing Installed Nodes + +The left panel shows all installed node packs with: + +- **Pack name** +- **Number of nodes** provided +- **Individual node types** as badges +- **File path** on disk + +### Reloading Nodes + +Click the **Reload** button to re-scan the nodes directory. This picks up any node packs that were manually added to the directory without using the installer. + +### Uninstalling a Node Pack + +Click the **Uninstall** button on any node pack. This will: + +- Remove the node pack directory +- Unregister the nodes from the system immediately +- Remove any workflows that were imported from the pack +- Update the workflow editor so the nodes are no longer available + +No restart is required. + +## Scan Folder Tab + +The **Scan Folder** tab shows the location of your nodes directory. Node packs placed there manually (e.g. via `git clone`) are automatically detected at startup. Use the **Reload** button to detect newly added packs without restarting. + +## Troubleshooting + +### Node pack fails to install + +- Verify the Git URL is correct and accessible +- Check that the repository contains an `__init__.py` file at the top level +- Review the Install Log for error details + +### Nodes don't appear after install + +- Click the **Reload** button +- Check that the node pack's `__init__.py` imports its node classes +- Check the server console for error messages + +### Workflows show errors after uninstalling + +If you have user-created workflows that reference nodes from an uninstalled pack, those workflows will show errors for the missing node types. Reinstall the pack or remove the affected nodes from the workflow. diff --git a/docs/src/content/docs/development/Guides/recall-api-advanced.mdx b/docs/src/content/docs/development/Guides/recall-api-advanced.mdx deleted file mode 100644 index e767a4d52af..00000000000 --- a/docs/src/content/docs/development/Guides/recall-api-advanced.mdx +++ /dev/null @@ -1,377 +0,0 @@ ---- -title: Recall Parameters API (Advanced) ---- - -# Recall Parameters API - LoRAs, ControlNets, and IP Adapters with Images - -The Recall Parameters API supports recalling LoRAs, ControlNets (including T2I Adapters and Control LoRAs), and IP Adapters along with their associated weights and settings. Control Layers and IP Adapters can now include image references from the `INVOKEAI_ROOT/outputs/images` directory for fully functional control and image prompt functionality. - -## Key Features - -✅ **LoRAs**: Fully functional - adds to UI, queries model configs, applies weights
-✅ **Control Layers**: Full support with optional images from outputs/images
-✅ **IP Adapters**: Full support with optional reference images from outputs/images
-✅ **Model Name Resolution**: Automatic lookup from human-readable names to internal keys
-✅ **Image Validation**: Backend validates that image files exist before sending - -## Endpoints - -### POST `/api/v1/recall/{queue_id}` - -Updates recallable parameters for the frontend, including LoRAs, control adapters, and IP adapters with optional images. - -**Path Parameters:** -- `queue_id` (string): The queue ID to associate parameters with (typically "default") - -**Request Body:** - -All fields are optional. Include only the parameters you want to update. - -```typescript -{ - // Standard parameters - positive_prompt?: string; - negative_prompt?: string; - model?: string; // Model name or key - steps?: number; - cfg_scale?: number; - width?: number; - height?: number; - seed?: number; - // ... other standard parameters - - // LoRAs - loras?: Array<{ - model_name: string; // LoRA model name - weight?: number; // Default: 0.75, Range: -10 to 10 - is_enabled?: boolean; // Default: true - }>; - - // Control Layers (ControlNet, T2I Adapter, Control LoRA) - control_layers?: Array<{ - model_name: string; // Control adapter model name - image_name?: string; // Optional image filename from outputs/images - weight?: number; // Default: 1.0, Range: -1 to 2 - begin_step_percent?: number; // Default: 0.0, Range: 0 to 1 - end_step_percent?: number; // Default: 1.0, Range: 0 to 1 - control_mode?: "balanced" | "more_prompt" | "more_control"; // ControlNet only - }>; - - // IP Adapters - ip_adapters?: Array<{ - model_name: string; // IP Adapter model name - image_name?: string; // Optional reference image filename from outputs/images - weight?: number; // Default: 1.0, Range: -1 to 2 - begin_step_percent?: number; // Default: 0.0, Range: 0 to 1 - end_step_percent?: number; // Default: 1.0, Range: 0 to 1 - method?: "full" | "style" | "composition"; // Default: "full" - influence?: "Lowest" | "Low" | "Medium" | "High" | "Highest"; // Flux Redux only; default: "highest" - }>; -} -``` - -## Model Name Resolution - -The backend automatically resolves model names to their internal keys: - -1. **Main Models**: Resolved from the name to the model key -2. **LoRAs**: Searched in the LoRA model database -3. **Control Adapters**: Tried in order - ControlNet → T2I Adapter → Control LoRA -4. **IP Adapters**: Searched in the IP Adapter model database - -Models that cannot be resolved are skipped with a warning in the logs. - -## Image File Handling - -### Image Path Resolution - -When you specify an `image_name`, the backend: -1. Constructs the full path: `{INVOKEAI_ROOT}/outputs/images/{image_name}` -2. Validates that the file exists -3. Includes the image reference in the event sent to the frontend -4. Logs whether the image was found or not - -### Image Naming - -Images should be referenced by their filename as it appears in the outputs/images directory: -- ✅ Correct: `"image_name": "example.png"` -- ✅ Correct: `"image_name": "my_control_image_20240110.jpg"` -- ❌ Incorrect: `"image_name": "outputs/images/example.png"` (use relative filename only) -- ❌ Incorrect: `"image_name": "/full/path/to/example.png"` (use relative filename only) - -## Frontend Behavior - -### LoRAs -- **Fully Supported**: LoRAs are immediately added to the LoRA list in the UI -- Existing LoRAs are cleared before adding new ones -- Each LoRA's model config is fetched and applied with the specified weight -- LoRAs appear in the LoRA selector panel - -### Control Layers with Images -- **Fully Supported**: Control layers now support images from outputs/images -- Configuration includes model, weights, step percentages, and image reference -- Image availability is logged in frontend console -- Images can be used to create actual control layers through the UI - -### IP Adapters with Images -- **Fully Supported**: IP Adapters now support reference images from outputs/images -- Configuration includes model, weights, step percentages, method, and image reference -- Image availability is logged in frontend console -- Images can be used to create actual reference image layers through the UI - -## Examples - -### 1. Add LoRAs Only - -```bash -curl -X POST http://localhost:9090/api/v1/recall/default \ - -H "Content-Type: application/json" \ - -d '{ - "loras": [ - { - "model_name": "add-detail-xl", - "weight": 0.8, - "is_enabled": true - }, - { - "model_name": "sd_xl_offset_example-lora_1.0", - "weight": 0.5, - "is_enabled": true - } - ] - }' -``` - -### 2. Configure Control Layers with Image - -Replace `my_control_image.png` with an actual image filename from your outputs/images directory. - -```bash -curl -X POST http://localhost:9090/api/v1/recall/default \ - -H "Content-Type: application/json" \ - -d '{ - "control_layers": [ - { - "model_name": "controlnet-canny-sdxl-1.0", - "image_name": "my_control_image.png", - "weight": 0.75, - "begin_step_percent": 0.0, - "end_step_percent": 0.8, - "control_mode": "balanced" - } - ] - }' -``` - -### 3. Configure IP Adapters with Reference Image - -Replace `reference_face.png` with an actual image filename from your outputs/images directory. - -```bash -curl -X POST http://localhost:9090/api/v1/recall/default \ - -H "Content-Type: application/json" \ - -d '{ - "ip_adapters": [ - { - "model_name": "ip-adapter-plus-face_sd15", - "image_name": "reference_face.png", - "weight": 0.7, - "begin_step_percent": 0.0, - "end_step_percent": 1.0, - "method": "composition" - } - ] - }' -``` - -### 4. Complete Configuration with All Features - -```bash -curl -X POST http://localhost:9090/api/v1/recall/default \ - -H "Content-Type: application/json" \ - -d '{ - "positive_prompt": "masterpiece, detailed photo with specific style", - "negative_prompt": "blurry, low quality", - "model": "FLUX Schnell", - "steps": 25, - "cfg_scale": 8.0, - "width": 1024, - "height": 768, - "seed": 42, - "loras": [ - { - "model_name": "add-detail-xl", - "weight": 0.6, - "is_enabled": true - } - ], - "control_layers": [ - { - "model_name": "controlnet-depth-sdxl-1.0", - "image_name": "depth_map.png", - "weight": 1.0, - "begin_step_percent": 0.0, - "end_step_percent": 0.7 - } - ], - "ip_adapters": [ - { - "model_name": "ip-adapter-plus-face_sd15", - "image_name": "style_reference.png", - "weight": 0.5, - "begin_step_percent": 0.0, - "end_step_percent": 1.0, - "method": "style" - } - ] - }' -``` - -## Response Format - -```json -{ - "status": "success", - "queue_id": "default", - "updated_count": 15, - "parameters": { - "positive_prompt": "...", - "steps": 25, - "loras": [ - { - "model_key": "abc123...", - "weight": 0.6, - "is_enabled": true - } - ], - "control_layers": [ - { - "model_key": "controlnet-xyz...", - "weight": 1.0, - "image": { - "image_name": "depth_map.png" - } - } - ], - "ip_adapters": [ - { - "model_key": "ip-adapter-xyz...", - "weight": 0.5, - "image": { - "image_name": "style_reference.png" - } - } - ] - } -} -``` - -## WebSocket Events - -When parameters are updated, a `recall_parameters_updated` event is emitted via WebSocket to the queue room. The frontend automatically: - -1. Applies standard parameters (prompts, steps, dimensions, etc.) -2. Loads and adds LoRAs to the LoRA list -3. Logs control layer and IP adapter configurations with image information -4. Makes image references available for manual canvas/reference image creation - -## Logging - -### Backend Logs - -Backend logs show: -- Model name → key resolution (success/failure) -- Image file validation (found/not found) -- Parameter storage confirmation -- Event emission status - -Example log messages: -``` -INFO: Resolved ControlNet model name 'controlnet-canny-sdxl-1.0' to key 'controlnet-xyz...' -INFO: Found image file: depth_map.png -INFO: Updated 12 recall parameters for queue default -INFO: Resolved 1 LoRA(s) -INFO: Resolved 1 control layer(s) -INFO: Resolved 1 IP adapter(s) -``` - -### Frontend Logs - -Frontend logs (check browser console): -- Set `localStorage.ROARR_FILTER = 'debug'` to see all debug messages -- Look for messages from the `events` namespace -- LoRA loading, model resolution, and parameter application are logged - -Example log messages: -``` -INFO: Applied 5 recall parameters to store -INFO: Received 1 control layer(s) with image support -INFO: Control layer 1: controlnet-xyz... (weight: 0.75, image: depth_map.png) -DEBUG: Control layer 1 image available at: outputs/images/depth_map.png -INFO: Received 1 IP adapter(s) with image support -INFO: IP adapter 1: ip-adapter-xyz... (weight: 0.7, image: style_reference.png) -DEBUG: IP adapter 1 image available at: outputs/images/style_reference.png -``` - -## Limitations - -1. **Canvas Integration**: Control layers and IP adapters with images are currently logged but not automatically added to canvas layers - - Users can view the configuration and manually create canvas layers with the provided images - - Future enhancement: Auto-create canvas layers with stored images - -2. **Model Availability**: Models must be installed in InvokeAI before they can be recalled - -3. **Image Availability**: Images must exist in the outputs/images directory - - Missing images are logged as warnings but don't fail the request - - Other parameters are still applied even if images are missing - -4. **Image URLs**: Only local filenames from outputs/images are supported - - Remote image URLs are not currently supported - -## Testing - -Use the provided test script: - -```bash -./test_recall_loras_controlnets.sh -``` - -This will test: -- LoRA addition with multiple models -- Control layer configuration with image references -- IP adapter configuration with image references -- Combined parameter updates with all features - -Note: Update the image names in the test script to match actual images in your outputs/images directory. - -## Troubleshooting - -### Images Not Found - -If you see "Image file not found" in the logs: -1. Verify the image filename matches exactly (case-sensitive) -2. Ensure the image is in `{INVOKEAI_ROOT}/outputs/images/` -3. Check that the filename doesn't include the `outputs/images/` prefix - -### Models Not Found - -If you see "Could not find model" messages: -1. Verify the model name matches exactly (case-sensitive) -2. Ensure the model is installed in InvokeAI -3. Check the model name using the models browser in the UI - -### Event Not Received - -If the frontend doesn't receive the event: -1. Check browser console for connection errors -2. Verify the queue_id matches the frontend's queue (usually "default") -3. Check backend logs for event emission errors - -## Future Enhancements - -Potential improvements: -1. Auto-create canvas layers with provided control layer images -2. Auto-create reference image layers with provided IP adapter images -3. Support for image URLs -4. Batch operations for multiple queue IDs -5. Image upload capability (accept base64 or file upload) diff --git a/docs/src/content/docs/development/Guides/recall-api.mdx b/docs/src/content/docs/development/Guides/recall-api.mdx index 7f4f64c9648..f366da79b33 100644 --- a/docs/src/content/docs/development/Guides/recall-api.mdx +++ b/docs/src/content/docs/development/Guides/recall-api.mdx @@ -4,29 +4,54 @@ title: Recall Parameters API ## Overview -A new REST API endpoint has been added to the InvokeAI backend that allows programmatic updates to recallable parameters from another process. This enables external applications or scripts to modify frontend parameters like prompts, models, and step counts via HTTP requests. +The Recall Parameters API is a REST endpoint on the InvokeAI backend that +lets external processes set recallable generation parameters on the +frontend. Supported parameters include: -When parameters are updated via the API, the backend automatically broadcasts a WebSocket event to all connected frontend clients subscribed to that queue, causing them to update immediately. +- Core text and numeric parameters (prompts, model, steps, CFG, dimensions, seed, ...) +- LoRAs +- Control Layers (ControlNet, T2I Adapter, Control LoRA) with optional control images +- IP Adapters and FLUX Redux reference images with optional images +- Model-free reference images (FLUX.2 Klein, FLUX Kontext, Qwen Image Edit) -## How It Works +When parameters are updated via the API, the backend stores them in client +state persistence for the target queue and broadcasts a `recall_parameters_updated` +WebSocket event. Any frontend client subscribed to that queue applies the +new values immediately — no manual reload required. + +Typical use cases: -1. **API Request**: External application sends a POST request with parameters to update -2. **Storage**: Parameters are stored in client state persistence, associated with a queue ID -3. **Broadcast**: A WebSocket event (`recall_parameters_updated`) is emitted to all frontend clients listening to that queue -4. **Frontend Update**: Connected frontend clients receive the event and can process the updated parameters -5. **Immediate Display**: The frontend UI updates automatically with the new values +- An external image browser that wants to "recall" or "remix" the + generation parameters saved into a PNG's metadata. +- A script that pre-populates parameters before the user runs generation. +- Automated testing or batch workflows that want to reuse existing model + and adapter configurations. -This means if you have the InvokeAI frontend open in a browser, updating parameters via the API will instantly reflect on the screen without any manual action needed. +## How It Works + +1. **API request** — your client POSTs a JSON body of parameters to + `/api/v1/recall/{queue_id}`. +2. **Storage** — non-null parameters are stored under + `recall_*` keys in the client state persistence service, scoped to the + given `queue_id`. +3. **Resolution** — models are resolved from human-readable names to the + internal model keys used by the frontend, and image filenames are + validated against `{INVOKEAI_ROOT}/outputs/images`. +4. **Broadcast** — a `recall_parameters_updated` event is emitted on the + websocket room for `queue_id`. +5. **Frontend update** — any connected client subscribed to that queue + applies the update to its Redux store, so UI fields, LoRAs, control + layers, IP adapters, and reference images all populate immediately. ## Endpoint -**Base URL**: `http://localhost:9090/api/v1/recall/{queue_id}` +**Base URL:** `http://localhost:9090/api/v1/recall/{queue_id}` -## POST - Update Recall Parameters +The queue id is usually `default`. -Updates recallable parameters for a given queue ID. +### POST — Update Recall Parameters -### Request +Updates recallable parameters for the given `queue_id`. ```http POST /api/v1/recall/{queue_id} @@ -44,11 +69,25 @@ Content-Type: application/json } ``` -The queue id is usually "default". +All parameters are optional — only send the fields you want to update. + +### GET — Retrieve Recall Parameters + +```http +GET /api/v1/recall/{queue_id} +``` + +```json +{ + "status": "success", + "queue_id": "queue_123", + "note": "Use the frontend to access stored recall parameters, or set specific parameters using POST" +} +``` -### Parameters +## Request Schema -All parameters are optional. Only provide the parameters you want to update: +### Core parameters | Parameter | Type | Description | |-----------|------|-------------| @@ -67,60 +106,130 @@ All parameters are optional. Only provide the parameters you want to update: | `width` | integer | Image width in pixels (≥64) | | `height` | integer | Image height in pixels (≥64) | | `seed` | integer | Random seed (≥0) | -| `denoise_strength` | number | Denoising strength (0-1) | -| `refiner_denoise_start` | number | Refiner denoising start (0-1) | +| `denoise_strength` | number | Denoising strength (0–1) | +| `refiner_denoise_start` | number | Refiner denoising start (0–1) | | `clip_skip` | integer | CLIP skip layers (≥0) | | `seamless_x` | boolean | Enable seamless X tiling | | `seamless_y` | boolean | Enable seamless Y tiling | | `refiner_positive_aesthetic_score` | number | Refiner positive aesthetic score | | `refiner_negative_aesthetic_score` | number | Refiner negative aesthetic score | -### Response +### Collection parameters -```json +```typescript { - "status": "success", - "queue_id": "queue_123", - "updated_count": 7, - "parameters": { - "positive_prompt": "a beautiful landscape", - "negative_prompt": "blurry, low quality", - "model": "sd-1.5", - "steps": 20, - "cfg_scale": 7.5, - "width": 512, - "height": 512, - "seed": 12345 - } + // LoRAs + loras?: Array<{ + model_name: string; // LoRA model name + weight?: number; // Default: 0.75, Range: -10 to 10 + is_enabled?: boolean; // Default: true + }>; + + // Control Layers (ControlNet, T2I Adapter, Control LoRA) + control_layers?: Array<{ + model_name: string; // Control adapter model name + image_name?: string; // Optional image filename from outputs/images + weight?: number; // Default: 1.0, Range: -1 to 2 + begin_step_percent?: number; // Default: 0.0, Range: 0 to 1 + end_step_percent?: number; // Default: 1.0, Range: 0 to 1 + control_mode?: "balanced" | "more_prompt" | "more_control"; // ControlNet only + }>; + + // IP Adapters (includes FLUX Redux) + ip_adapters?: Array<{ + model_name: string; // IP Adapter / FLUX Redux model name + image_name?: string; // Optional reference image filename from outputs/images + weight?: number; // Default: 1.0, Range: -1 to 2 + begin_step_percent?: number; // Default: 0.0, Range: 0 to 1 + end_step_percent?: number; // Default: 1.0, Range: 0 to 1 + method?: "full" | "style" | "composition"; // Default: "full" + image_influence?: "lowest" | "low" | "medium" | "high" | "highest"; // FLUX Redux only + }>; + + // Model-free reference images (FLUX.2 Klein, FLUX Kontext, Qwen Image Edit) + reference_images?: Array<{ + image_name: string; // Reference image filename from outputs/images + }>; } ``` -## GET - Retrieve Recall Parameters +## Model Name Resolution -Retrieves metadata about stored recall parameters. +The backend resolves model names to their internal keys: -### Request +1. **Main models** — resolved from the name to the model key. +2. **LoRAs** — searched in the LoRA model database. +3. **Control adapters** — tried in order: ControlNet → T2I Adapter → Control LoRA. +4. **IP Adapters** — searched in the IP Adapter database; falls back to FLUX Redux. -```http -GET /api/v1/recall/{queue_id} -``` +Models that cannot be resolved are skipped with a warning in the logs — +the rest of the parameters are still applied. -### Response +## Image File Handling -```json -{ - "status": "success", - "queue_id": "queue_123", - "note": "Use the frontend to access stored recall parameters, or set specific parameters using POST" -} -``` +When an `image_name` is supplied, the backend: + +1. Resolves `{INVOKEAI_ROOT}/outputs/images/{image_name}` via the image + files service (which also validates the path). +2. Opens the image to extract width/height. +3. Includes the image metadata in the event sent to the frontend. +4. Logs whether the image was found. + +Images must be referenced by their filename as it appears in the +outputs/images directory: + +- ✅ `"image_name": "example.png"` +- ✅ `"image_name": "my_control_image_20240110.jpg"` +- ❌ `"image_name": "outputs/images/example.png"` (no prefix) +- ❌ `"image_name": "/full/path/to/example.png"` (no absolute paths) + +Missing images are logged as warnings but **do not** fail the request — +remaining parameters are still applied. + +## Feature Details + +### LoRAs + +- Existing LoRAs are cleared before new ones are added. +- Each LoRA's model config is fetched and applied with the specified weight. +- LoRAs appear in the LoRA selector panel. + +### Control Layers + +- Fully supported with optional images from `outputs/images`. +- Configuration includes model, weights, step percentages, control mode, + and an image reference. +- Image availability is logged in the frontend console. + +### IP Adapters / FLUX Redux + +- Reference images loaded from `outputs/images` are validated and passed + through. +- Configuration includes model, weights, step percentages, method, and an + image reference. +- FLUX Redux uses `image_influence` instead of a numeric weight. + +### Model-free reference images + +Used by architectures that consume a reference image directly, with no +separate adapter model: + +- **FLUX.2 Klein** — built-in reference image support. +- **FLUX Kontext** — reference image associated with the main model. +- **Qwen Image Edit** — reference image associated with the main model. + +Because there is no adapter model to resolve, these entries carry only +`image_name`. When the frontend receives them, it picks the appropriate +config flavor (`flux2_reference_image`, `flux_kontext_reference_image`, +or `qwen_image_reference_image`) based on the currently-selected main +model, matching the behavior of a manual drag-and-drop. ## Usage Examples -### Using cURL +### cURL ```bash -# Update prompts and model +# Core parameters curl -X POST http://localhost:9090/api/v1/recall/default \ -H "Content-Type: application/json" \ -d '{ @@ -130,81 +239,285 @@ curl -X POST http://localhost:9090/api/v1/recall/default \ "steps": 30 }' -# Update just the seed +# Just the seed curl -X POST http://localhost:9090/api/v1/recall/default \ -H "Content-Type: application/json" \ -d '{"seed": 99999}' ``` -### Using Python +### LoRAs only + +```bash +curl -X POST http://localhost:9090/api/v1/recall/default \ + -H "Content-Type: application/json" \ + -d '{ + "loras": [ + {"model_name": "add-detail-xl", "weight": 0.8, "is_enabled": true}, + {"model_name": "sd_xl_offset_example-lora_1.0", "weight": 0.5} + ] + }' +``` + +### Control layers with an image + +```bash +curl -X POST http://localhost:9090/api/v1/recall/default \ + -H "Content-Type: application/json" \ + -d '{ + "control_layers": [ + { + "model_name": "controlnet-canny-sdxl-1.0", + "image_name": "my_control_image.png", + "weight": 0.75, + "begin_step_percent": 0.0, + "end_step_percent": 0.8, + "control_mode": "balanced" + } + ] + }' +``` + +### IP adapters with a reference image + +```bash +curl -X POST http://localhost:9090/api/v1/recall/default \ + -H "Content-Type: application/json" \ + -d '{ + "ip_adapters": [ + { + "model_name": "ip-adapter-plus-face_sd15", + "image_name": "reference_face.png", + "weight": 0.7, + "method": "composition" + } + ] + }' +``` + +### Model-free reference images (FLUX.2 Klein / FLUX Kontext / Qwen Image Edit) + +```bash +curl -X POST http://localhost:9090/api/v1/recall/default \ + -H "Content-Type: application/json" \ + -d '{ + "model": "FLUX.2 Klein", + "reference_images": [ + {"image_name": "style_reference.png"} + ] + }' +``` + +### Complete configuration + +```bash +curl -X POST http://localhost:9090/api/v1/recall/default \ + -H "Content-Type: application/json" \ + -d '{ + "positive_prompt": "masterpiece, detailed photo with specific style", + "negative_prompt": "blurry, low quality", + "model": "FLUX Schnell", + "steps": 25, + "cfg_scale": 8.0, + "width": 1024, + "height": 768, + "seed": 42, + "loras": [ + {"model_name": "add-detail-xl", "weight": 0.6} + ], + "control_layers": [ + { + "model_name": "controlnet-depth-sdxl-1.0", + "image_name": "depth_map.png", + "weight": 1.0, + "end_step_percent": 0.7 + } + ], + "ip_adapters": [ + { + "model_name": "ip-adapter-plus-face_sd15", + "image_name": "style_reference.png", + "weight": 0.5, + "method": "style" + } + ] + }' +``` + +### Python ```python import requests -import json -# Configuration API_URL = "http://localhost:9090/api/v1/recall/default" -# Update multiple parameters params = { "positive_prompt": "a serene forest", "negative_prompt": "people, buildings", "steps": 25, "cfg_scale": 7.0, - "seed": 42 + "seed": 42, } response = requests.post(API_URL, json=params) result = response.json() - print(f"Status: {result['status']}") print(f"Updated {result['updated_count']} parameters") -print(json.dumps(result['parameters'], indent=2)) ``` -### Using Node.js/JavaScript +### JavaScript ```javascript const API_URL = 'http://localhost:9090/api/v1/recall/default'; -const params = { - positive_prompt: 'a beautiful sunset', - negative_prompt: 'blurry', - steps: 20, - width: 768, - height: 768, - seed: 12345 -}; - fetch(API_URL, { method: 'POST', headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(params) + body: JSON.stringify({ + positive_prompt: 'a beautiful sunset', + steps: 20, + width: 768, + height: 768, + seed: 12345, + }), }) - .then(res => res.json()) - .then(data => console.log(data)); + .then((res) => res.json()) + .then((data) => console.log(data)); ``` -## Implementation Details +## Response Format -- Parameters are stored in the client state persistence service, using keys prefixed with `recall_` -- The parameters are associated with a `queue_id`, allowing multiple concurrent sessions to maintain separate parameter sets -- Only non-null parameters are processed and stored -- The endpoint provides validation for numeric ranges (e.g., steps ≥ 1, dimensions ≥ 64) -- All parameter values are JSON-serialized for storage -- When parameter values are changed, the backend generates a web sockets event that the frontend listens to. +```json +{ + "status": "success", + "queue_id": "default", + "updated_count": 15, + "parameters": { + "positive_prompt": "...", + "steps": 25, + "loras": [ + {"model_key": "abc123...", "weight": 0.6, "is_enabled": true} + ], + "control_layers": [ + { + "model_key": "controlnet-xyz...", + "weight": 1.0, + "image": {"image_name": "depth_map.png", "width": 1024, "height": 768} + } + ], + "ip_adapters": [ + { + "model_key": "ip-adapter-xyz...", + "weight": 0.5, + "image": {"image_name": "style_reference.png", "width": 1024, "height": 1024} + } + ], + "reference_images": [ + {"image": {"image_name": "style_reference.png", "width": 1024, "height": 1024}} + ] + } +} +``` -## Integration with Frontend +## WebSocket Events -The stored parameters can be accessed by the frontend through the -existing client state API or by implementing hooks that read from the -recall parameter storage. This allows external applications to -pre-populate generation parameters before the user initiates image -generation. +Parameter updates emit a `recall_parameters_updated` event to the queue +room. Connected frontend clients automatically: + +1. Apply standard parameters (prompts, steps, dimensions, etc.). +2. Load and add LoRAs to the LoRA list. +3. Apply control-layer configurations. +4. Apply IP Adapter / FLUX Redux configurations with their images. +5. Append model-free reference images, using the config flavor that + matches the currently-selected main model. ## Error Handling -- **400 Bad Request**: Invalid parameters or parameter values -- **500 Internal Server Error**: Server-side error storing or retrieving parameters +- **400 Bad Request** — invalid parameters or parameter values. +- **500 Internal Server Error** — server-side storage or retrieval failure. + +Errors include detailed messages. Missing images and unresolved model +names are **not** errors — they are logged and the remaining parameters +are still applied. + +## Logging + +### Backend + +``` +INFO: Resolved ControlNet model name 'controlnet-canny-sdxl-1.0' to key 'controlnet-xyz...' +INFO: Found image file: depth_map.png (1024x768) +INFO: Updated 12 recall parameters for queue default +INFO: Resolved 1 LoRA(s) +INFO: Resolved 1 control layer(s) +INFO: Resolved 1 IP adapter(s) +INFO: Resolved 1 reference image(s) +``` + +### Frontend + +Set `localStorage.ROARR_FILTER = 'debug'` in the browser to see all debug +messages under the `events` namespace. + +``` +INFO: Applied 5 recall parameters to store +INFO: Applied 1 IP adapter(s), replacing existing list +INFO: Applied 1 model-free reference image(s) +DEBUG: Built IP adapter ref image state: ip-adapter-xyz... (weight: 0.7) +DEBUG: IP adapter image: outputs/images/depth_map.png (1024x768) +``` + +## Implementation Details + +- Parameters are stored in the client state persistence service under + `recall_*` keys, scoped to the `queue_id`. +- Numeric validation runs at the FastAPI layer (e.g. `steps ≥ 1`, `width ≥ 64`). +- Only non-null parameters are processed, stored, and broadcast. +- Model-key resolution runs **after** the raw parameters are stored, so + an unresolvable model name simply drops out of the broadcast but does + not corrupt the persisted state. +- The broadcast payload contains resolved model keys and image metadata + (width/height) so the frontend can populate its store without extra + round-trips. + +## Troubleshooting + +### Image not found + +If you see "Image file not found" in the logs: + +1. Verify the filename matches exactly (case-sensitive). +2. Ensure the image is in `{INVOKEAI_ROOT}/outputs/images/`. +3. Check that the filename does not include the `outputs/images/` prefix. + +### Model not found + +If you see "Could not find model": + +1. Verify the model name matches exactly (case-sensitive). +2. Ensure the model is installed. +3. Check the name via the Models Manager panel. + +### Event not received + +1. Check the browser console for socket connection errors. +2. Verify the `queue_id` matches the frontend's queue (usually `default`). +3. Check backend logs for event emission errors. + +## Limitations + +- **Model availability** — models referenced in the payload must be installed. +- **Image availability** — images must exist in `outputs/images`; remote + URLs are not supported. +- **Canvas auto-layer creation** — control layers and IP adapters with + images populate the recall state, but creating a canvas layer from + them still happens through the UI. + +## Future enhancements + +Potential improvements not yet implemented: -Errors include detailed messages explaining what went wrong. +1. Auto-create canvas layers from control-layer images in the payload. +2. Auto-create reference-image layers from IP Adapter images in the payload. +3. Support remote image URLs in addition to local `outputs/images` filenames. +4. Image upload capability (accept base64 or file upload directly via the API). +5. Batch operations that target multiple `queue_id`s in a single request. diff --git a/invokeai/app/api/routers/client_state.py b/invokeai/app/api/routers/client_state.py index 2e34ea9fe6b..cd92263f97c 100644 --- a/invokeai/app/api/routers/client_state.py +++ b/invokeai/app/api/routers/client_state.py @@ -45,6 +45,44 @@ async def set_client_state( raise HTTPException(status_code=500, detail="Error setting client state") +@client_state_router.get( + "/{queue_id}/get_keys_by_prefix", + operation_id="get_client_state_keys_by_prefix", + response_model=list[str], +) +async def get_client_state_keys_by_prefix( + current_user: CurrentUserOrDefault, + queue_id: str = Path(description="The queue id (ignored, kept for backwards compatibility)"), + prefix: str = Query(..., description="Prefix to filter keys by"), +) -> list[str]: + """Gets client state keys matching a prefix for the current user""" + try: + return ApiDependencies.invoker.services.client_state_persistence.get_keys_by_prefix( + current_user.user_id, prefix + ) + except Exception as e: + logging.error(f"Error getting client state keys: {e}") + raise HTTPException(status_code=500, detail="Error getting client state keys") + + +@client_state_router.post( + "/{queue_id}/delete_by_key", + operation_id="delete_client_state_by_key", + responses={204: {"description": "Client state key deleted"}}, +) +async def delete_client_state_by_key( + current_user: CurrentUserOrDefault, + queue_id: str = Path(description="The queue id (ignored, kept for backwards compatibility)"), + key: str = Query(..., description="Key to delete"), +) -> None: + """Deletes a specific client state key for the current user""" + try: + ApiDependencies.invoker.services.client_state_persistence.delete_by_key(current_user.user_id, key) + except Exception as e: + logging.error(f"Error deleting client state key: {e}") + raise HTTPException(status_code=500, detail="Error deleting client state key") + + @client_state_router.post( "/{queue_id}/delete", operation_id="delete_client_state", diff --git a/invokeai/app/api/routers/custom_nodes.py b/invokeai/app/api/routers/custom_nodes.py new file mode 100644 index 00000000000..3ee8c0ec99c --- /dev/null +++ b/invokeai/app/api/routers/custom_nodes.py @@ -0,0 +1,504 @@ +"""FastAPI routes for custom node management.""" + +import json +import shutil +import subprocess +import sys +import traceback +from importlib.util import module_from_spec, spec_from_file_location +from pathlib import Path +from typing import Optional + +from fastapi import Body +from fastapi.routing import APIRouter +from pydantic import BaseModel, Field + +from invokeai.app.api.auth_dependencies import AdminUserOrDefault +from invokeai.app.api.dependencies import ApiDependencies +from invokeai.app.invocations.baseinvocation import InvocationRegistry +from invokeai.app.services.config.config_default import get_config +from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutIDValidator +from invokeai.backend.util.logging import InvokeAILogger + +custom_nodes_router = APIRouter(prefix="/v2/custom_nodes", tags=["custom_nodes"]) + +logger = InvokeAILogger.get_logger() + +# Name of the manifest file written inside a pack directory to track which workflows +# were imported by that pack. Used on uninstall to delete only pack-imported workflows +# — deleting by tag alone is unsafe because users can edit tags on their own workflows. +PACK_MANIFEST_FILENAME = ".invokeai_pack_manifest.json" + + +class NodePackInfo(BaseModel): + """Information about an installed node pack.""" + + name: str = Field(description="The name of the node pack.") + path: str = Field(description="The path to the node pack directory.") + node_count: int = Field(description="The number of nodes in the pack.") + node_types: list[str] = Field(description="The invocation types provided by this node pack.") + + +class NodePackListResponse(BaseModel): + """Response for listing installed node packs.""" + + node_packs: list[NodePackInfo] = Field(description="List of installed node packs.") + custom_nodes_path: str = Field(description="The configured custom nodes directory path.") + + +class InstallNodePackRequest(BaseModel): + """Request to install a node pack from a git URL.""" + + source: str = Field(description="Git URL of the node pack to install.") + + +class InstallNodePackResponse(BaseModel): + """Response after installing a node pack.""" + + name: str = Field(description="The name of the installed node pack.") + success: bool = Field(description="Whether the installation was successful.") + message: str = Field(description="Status message.") + workflows_imported: int = Field(default=0, description="Number of workflows imported from the pack.") + requires_dependencies: bool = Field( + default=False, + description="Whether the pack ships a dependency manifest (requirements.txt or pyproject.toml) " + "that the user must install manually following the pack's documentation.", + ) + dependency_file: Optional[str] = Field( + default=None, + description="Name of the detected dependency manifest file, if any.", + ) + + +class UninstallNodePackResponse(BaseModel): + """Response after uninstalling a node pack.""" + + name: str = Field(description="The name of the uninstalled node pack.") + success: bool = Field(description="Whether the uninstall was successful.") + message: str = Field(description="Status message.") + + +def _get_custom_nodes_path() -> Path: + """Returns the configured custom nodes directory path.""" + config = get_config() + return config.custom_nodes_path + + +def _get_installed_packs() -> list[NodePackInfo]: + """Scans the custom nodes directory and returns info about installed packs.""" + custom_nodes_path = _get_custom_nodes_path() + + if not custom_nodes_path.exists(): + return [] + + packs: list[NodePackInfo] = [] + + # Get all node types grouped by node_pack + node_types_by_pack: dict[str, list[str]] = {} + for inv_class in InvocationRegistry._invocation_classes: + node_pack = inv_class.UIConfig.node_pack + inv_type = inv_class.get_type() + if node_pack not in node_types_by_pack: + node_types_by_pack[node_pack] = [] + node_types_by_pack[node_pack].append(inv_type) + + for d in sorted(custom_nodes_path.iterdir()): + if not d.is_dir(): + continue + if d.name.startswith("_") or d.name.startswith("."): + continue + init = d / "__init__.py" + if not init.exists(): + continue + + pack_name = d.name + node_types = node_types_by_pack.get(pack_name, []) + + packs.append( + NodePackInfo( + name=pack_name, + path=str(d), + node_count=len(node_types), + node_types=node_types, + ) + ) + + return packs + + +@custom_nodes_router.get( + "/", + operation_id="list_custom_node_packs", + response_model=NodePackListResponse, +) +async def list_custom_node_packs(current_admin: AdminUserOrDefault) -> NodePackListResponse: + """Lists all installed custom node packs. + + Admin-only: the response includes absolute filesystem paths, and non-admins have no + legitimate use for pack management data (install/uninstall/reload are also admin-only). + """ + packs = _get_installed_packs() + return NodePackListResponse(node_packs=packs, custom_nodes_path=str(_get_custom_nodes_path())) + + +@custom_nodes_router.post( + "/install", + operation_id="install_custom_node_pack", + response_model=InstallNodePackResponse, +) +async def install_custom_node_pack( + current_admin: AdminUserOrDefault, + request: InstallNodePackRequest = Body(description="The source URL to install from."), +) -> InstallNodePackResponse: + """Installs a custom node pack from a git URL by cloning it into the nodes directory.""" + custom_nodes_path = _get_custom_nodes_path() + custom_nodes_path.mkdir(parents=True, exist_ok=True) + + source = request.source.strip() + + # Extract pack name from URL + pack_name = source.rstrip("/").split("/")[-1] + if pack_name.endswith(".git"): + pack_name = pack_name[:-4] + + target_dir = custom_nodes_path / pack_name + + if target_dir.exists(): + return InstallNodePackResponse( + name=pack_name, + success=False, + message=f"Node pack '{pack_name}' already exists. Uninstall it first to reinstall.", + ) + + try: + # Clone the repository + result = subprocess.run( + ["git", "clone", source, str(target_dir)], + capture_output=True, + text=True, + timeout=120, + ) + + if result.returncode != 0: + # Clean up on failure + if target_dir.exists(): + shutil.rmtree(target_dir) + return InstallNodePackResponse( + name=pack_name, + success=False, + message=f"Git clone failed: {result.stderr.strip()}", + ) + + # Detect dependency manifests but do NOT install them automatically. + # The user is responsible for installing dependencies per the pack's documentation, + # since arbitrary pip installs can break the InvokeAI environment. + dependency_file: Optional[str] = None + for candidate in ("requirements.txt", "pyproject.toml"): + if (target_dir / candidate).exists(): + dependency_file = candidate + logger.info(f"Node pack '{pack_name}' ships a {candidate}; user must install dependencies manually.") + break + + # Check for __init__.py + init_file = target_dir / "__init__.py" + if not init_file.exists(): + shutil.rmtree(target_dir) + return InstallNodePackResponse( + name=pack_name, + success=False, + message=f"Node pack '{pack_name}' does not contain an __init__.py file.", + ) + + # Load the node pack at runtime + _load_node_pack(pack_name, target_dir) + + # Import any workflows found in the pack, owned by the installing admin and shared with all users + imported_workflow_ids = _import_workflows_from_pack(target_dir, pack_name, owner_user_id=current_admin.user_id) + _write_pack_manifest(target_dir, imported_workflow_ids) + workflows_imported = len(imported_workflow_ids) + workflow_msg = f" Imported {workflows_imported} workflow(s)." if workflows_imported > 0 else "" + dependency_msg = ( + f" This pack includes a {dependency_file} — install its dependencies manually following the pack's documentation." + if dependency_file + else "" + ) + + return InstallNodePackResponse( + name=pack_name, + success=True, + message=f"Successfully installed node pack '{pack_name}'.{workflow_msg}{dependency_msg}", + workflows_imported=workflows_imported, + requires_dependencies=dependency_file is not None, + dependency_file=dependency_file, + ) + + except subprocess.TimeoutExpired: + if target_dir.exists(): + shutil.rmtree(target_dir) + return InstallNodePackResponse( + name=pack_name, + success=False, + message="Installation timed out.", + ) + except Exception: + if target_dir.exists(): + shutil.rmtree(target_dir) + error = traceback.format_exc() + logger.error(f"Failed to install node pack {pack_name}: {error}") + return InstallNodePackResponse( + name=pack_name, + success=False, + message=f"Installation failed: {error}", + ) + + +@custom_nodes_router.delete( + "/{pack_name}", + operation_id="uninstall_custom_node_pack", + response_model=UninstallNodePackResponse, +) +async def uninstall_custom_node_pack( + current_admin: AdminUserOrDefault, + pack_name: str, +) -> UninstallNodePackResponse: + """Uninstalls a custom node pack by removing its directory. + + Note: A restart is required for the node removal to take full effect. + Installed nodes from the pack will remain registered until restart. + """ + custom_nodes_path = _get_custom_nodes_path() + target_dir = custom_nodes_path / pack_name + + if not target_dir.exists(): + return UninstallNodePackResponse( + name=pack_name, + success=False, + message=f"Node pack '{pack_name}' not found.", + ) + + try: + # Read the manifest BEFORE removing the directory — it records exactly which + # workflow IDs this pack imported, so uninstall doesn't accidentally delete + # user workflows that happen to share the pack tag. + imported_workflow_ids = _read_pack_manifest(target_dir) + + shutil.rmtree(target_dir) + + # Unregister the nodes from the registry so they disappear immediately + removed_types = InvocationRegistry.unregister_pack(pack_name) + if removed_types: + # Invalidate OpenAPI schema cache so frontend gets updated node definitions + from invokeai.app.api_app import app + + app.openapi_schema = None + logger.info( + f"Unregistered {len(removed_types)} node(s) from pack '{pack_name}': {', '.join(removed_types)}" + ) + + # Remove the pack's module subtree from sys.modules. Only dropping the + # root module would leave submodules cached; on reinstall the cached + # submodules would be reused without re-running their @invocation + # decorators, so the pack would show up with 0 nodes until restart. + _purge_pack_modules(pack_name) + + # Remove only workflows this pack imported, using the manifest-recorded IDs + workflows_removed = _remove_workflows_by_ids(imported_workflow_ids, pack_name) + workflow_msg = f" Removed {workflows_removed} workflow(s)." if workflows_removed > 0 else "" + + return UninstallNodePackResponse( + name=pack_name, + success=True, + message=f"Successfully uninstalled node pack '{pack_name}'.{workflow_msg}", + ) + except Exception: + error = traceback.format_exc() + logger.error(f"Failed to uninstall node pack {pack_name}: {error}") + return UninstallNodePackResponse( + name=pack_name, + success=False, + message=f"Uninstall failed: {error}", + ) + + +@custom_nodes_router.post( + "/reload", + operation_id="reload_custom_nodes", +) +async def reload_custom_nodes(current_admin: AdminUserOrDefault) -> dict[str, str]: + """Triggers a reload of all custom nodes. + + This re-scans the nodes directory and loads any new node packs. + Already loaded packs are skipped. + """ + config = get_config() + custom_nodes_path = config.custom_nodes_path + + if not custom_nodes_path.exists(): + return {"status": "No custom nodes directory found."} + + from invokeai.app.invocations.load_custom_nodes import load_custom_nodes + + load_custom_nodes(custom_nodes_path, logger) + + # Invalidate the OpenAPI schema cache so the frontend gets updated node definitions + from invokeai.app.api_app import app + + app.openapi_schema = None + + return {"status": "Custom nodes reloaded successfully."} + + +def _purge_pack_modules(pack_name: str) -> list[str]: + """Removes the pack's root module and all of its submodules from sys.modules. + + After uninstall, cached submodules (e.g. `pack_name.nodes`, `pack_name.foo.bar`) + must be evicted as well — otherwise a subsequent reinstall reuses the cached + objects, the @invocation decorators never re-run, and the pack ends up loaded + with zero registered nodes until a full process restart. + """ + prefix = f"{pack_name}." + to_remove = [name for name in sys.modules if name == pack_name or name.startswith(prefix)] + for name in to_remove: + del sys.modules[name] + return to_remove + + +def _load_node_pack(pack_name: str, pack_dir: Path) -> None: + """Loads a single node pack at runtime.""" + init = pack_dir / "__init__.py" + if not init.exists(): + return + + if pack_name in sys.modules: + logger.info(f"Node pack {pack_name} already loaded, skipping.") + return + + spec = spec_from_file_location(pack_name, init.absolute()) + if spec is None or spec.loader is None: + logger.warning(f"Could not load {init}") + return + + logger.info(f"Loading node pack {pack_name}") + module = module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + + # Invalidate OpenAPI schema cache + from invokeai.app.api_app import app + + app.openapi_schema = None + + logger.info(f"Successfully loaded node pack {pack_name}") + + +def _import_workflows_from_pack(pack_dir: Path, pack_name: str, owner_user_id: str) -> list[str]: + """Scans a node pack directory for workflow JSON files and imports them into the workflow library. + + A JSON file is considered a workflow if it contains 'nodes' and 'edges' keys at the top level. + Workflows are imported as user workflows owned by the installing admin and marked public so all + users can see them — a pack is an admin-installed shared resource, not a private asset. + + Returns the list of workflow IDs successfully created, in import order. + """ + imported_ids: list[str] = [] + + # Search for .json files recursively + for json_file in pack_dir.rglob("*.json"): + # Skip our own manifest file + if json_file.name == PACK_MANIFEST_FILENAME: + continue + try: + with open(json_file, "r", encoding="utf-8") as f: + data = json.load(f) + + # Check if this looks like a workflow (must have nodes and edges) + if not isinstance(data, dict): + continue + if "nodes" not in data or "edges" not in data: + continue + + # Ensure the workflow has a meta section with category set to "user" + if "meta" not in data: + data["meta"] = {"version": "3.0.0", "category": "user"} + else: + data["meta"]["category"] = "user" + + # Add the node pack name to tags for discoverability (display only — uninstall + # does not rely on this tag, since users can edit tags on their own workflows). + existing_tags = data.get("tags", "") + pack_tag = f"node-pack:{pack_name}" + if pack_tag not in existing_tags: + data["tags"] = f"{existing_tags}, {pack_tag}".strip(", ") if existing_tags else pack_tag + + # Remove the 'id' field if present — the system will assign a new one + data.pop("id", None) + + # Validate and import the workflow + workflow = WorkflowWithoutIDValidator.validate_python(data) + created = ApiDependencies.invoker.services.workflow_records.create( + workflow=workflow, user_id=owner_user_id, is_public=True + ) + imported_ids.append(created.workflow_id) + logger.info(f"Imported workflow '{workflow.name}' from node pack '{pack_name}'") + + except Exception: + logger.warning(f"Skipped non-workflow or invalid JSON file: {json_file}") + continue + + if imported_ids: + logger.info(f"Imported {len(imported_ids)} workflow(s) from node pack '{pack_name}'") + + return imported_ids + + +def _write_pack_manifest(pack_dir: Path, workflow_ids: list[str]) -> None: + """Writes the pack manifest recording which workflow IDs were imported from the pack.""" + manifest_path = pack_dir / PACK_MANIFEST_FILENAME + try: + with open(manifest_path, "w", encoding="utf-8") as f: + json.dump({"workflow_ids": workflow_ids}, f) + except Exception: + logger.warning(f"Failed to write pack manifest at {manifest_path}") + + +def _read_pack_manifest(pack_dir: Path) -> list[str]: + """Reads workflow IDs that this pack's install recorded in its manifest. + + Returns an empty list if the manifest is missing or malformed. We deliberately do NOT + fall back to tag-based lookup: workflow tags are user-editable and could collide with + unrelated workflows, so we only delete what we recorded ourselves at install time. + """ + manifest_path = pack_dir / PACK_MANIFEST_FILENAME + if not manifest_path.exists(): + return [] + try: + with open(manifest_path, "r", encoding="utf-8") as f: + data = json.load(f) + ids = data.get("workflow_ids", []) + if not isinstance(ids, list): + return [] + return [str(x) for x in ids if isinstance(x, str)] + except Exception: + logger.warning(f"Failed to read pack manifest at {manifest_path}") + return [] + + +def _remove_workflows_by_ids(workflow_ids: list[str], pack_name: str) -> int: + """Deletes the given workflow IDs. Used during uninstall to remove only the workflows + this pack's install recorded in its manifest. + """ + if not workflow_ids: + return 0 + + removed_count = 0 + for workflow_id in workflow_ids: + try: + ApiDependencies.invoker.services.workflow_records.delete(workflow_id) + removed_count += 1 + except Exception: + logger.warning(f"Failed to remove workflow '{workflow_id}' (from node pack '{pack_name}')") + + if removed_count > 0: + logger.info(f"Removed {removed_count} workflow(s) from node pack '{pack_name}'") + + return removed_count diff --git a/invokeai/app/api/routers/recall_parameters.py b/invokeai/app/api/routers/recall_parameters.py index ec08adba2e8..cbc986ab491 100644 --- a/invokeai/app/api/routers/recall_parameters.py +++ b/invokeai/app/api/routers/recall_parameters.py @@ -3,7 +3,7 @@ import json from typing import Any, Literal, Optional -from fastapi import Body, HTTPException, Path +from fastapi import Body, HTTPException, Path, Query from fastapi.routing import APIRouter from pydantic import BaseModel, ConfigDict, Field @@ -58,6 +58,20 @@ class IPAdapterRecallParameter(BaseModel): ) +class ReferenceImageRecallParameter(BaseModel): + """Global reference-image configuration for recall. + + Used for reference images that feed directly into the main model rather + than through a separate IP-Adapter / ControlNet model — for example + FLUX.2 Klein, FLUX Kontext, and Qwen Image Edit. The receiving frontend + picks the correct config type (``flux2_reference_image`` / + ``qwen_image_reference_image`` / ``flux_kontext_reference_image``) based + on the currently-selected main model. + """ + + image_name: str = Field(description="The filename of the reference image in outputs/images") + + class RecallParameter(BaseModel): """Request model for updating recallable parameters.""" @@ -105,6 +119,14 @@ class RecallParameter(BaseModel): ip_adapters: Optional[list[IPAdapterRecallParameter]] = Field( None, description="List of IP Adapters with their settings" ) + reference_images: Optional[list[ReferenceImageRecallParameter]] = Field( + None, + description=( + "List of model-free reference images for architectures that consume reference " + "images directly (FLUX.2 Klein, FLUX Kontext, Qwen Image Edit). The frontend " + "picks the correct config type based on the currently-selected main model." + ), + ) def resolve_model_name_to_key(model_name: str, model_type: ModelType = ModelType.Main) -> Optional[str]: @@ -292,11 +314,43 @@ def resolve_ip_adapter_models(ip_adapters: list[IPAdapterRecallParameter]) -> li return resolved_adapters +def resolve_reference_images( + reference_images: list[ReferenceImageRecallParameter], +) -> list[dict[str, Any]]: + """ + Validate model-free reference images and build the configuration list. + + Unlike IP Adapters and ControlNets, these reference images are consumed + directly by the main model (FLUX.2 Klein, FLUX Kontext, Qwen Image Edit), + so there is no adapter-model name to resolve. We simply verify that each + referenced file exists in ``outputs/images`` and pass the image metadata + through to the frontend. + + Args: + reference_images: List of reference-image recall parameters + + Returns: + List of reference-image configurations with resolved image metadata. + Entries whose image file cannot be loaded are dropped with a warning. + """ + logger = ApiDependencies.invoker.services.logger + resolved: list[dict[str, Any]] = [] + + for ref in reference_images: + image_data = load_image_file(ref.image_name) + if image_data is None: + logger.warning(f"Skipping reference image '{ref.image_name}' - file not found") + continue + resolved.append({"image": image_data}) + + return resolved + + def _assert_recall_image_access(parameters: "RecallParameter", current_user: CurrentUserOrDefault) -> None: """Validate that the caller can read every image referenced in the recall parameters. - Control layers and IP adapters may reference image_name fields. Without this - check an attacker who knows another user's image UUID could use the recall + Control layers, IP adapters, and reference images may reference image_name fields. + Without this check an attacker who knows another user's image UUID could use the recall endpoint to extract image dimensions and — for ControlNet preprocessors — mint a derived processed image they can then fetch. """ @@ -311,6 +365,10 @@ def _assert_recall_image_access(parameters: "RecallParameter", current_user: Cur for adapter in parameters.ip_adapters: if adapter.image_name is not None: image_names.append(adapter.image_name) + if parameters.reference_images: + for ref in parameters.reference_images: + if ref.image_name is not None: + image_names.append(ref.image_name) if not image_names: return @@ -346,6 +404,10 @@ async def update_recall_parameters( current_user: CurrentUserOrDefault, queue_id: str = Path(..., description="The queue id to perform this operation on"), parameters: RecallParameter = Body(..., description="Recall parameters to update"), + strict: bool = Query( + default=False, + description="When true, parameters not included in the request are reset to their defaults (cleared).", + ), ) -> dict[str, Any]: """ Update recallable parameters that can be recalled on the frontend. @@ -357,21 +419,23 @@ async def update_recall_parameters( Args: queue_id: The queue ID to associate these parameters with parameters: The RecallParameter object containing the parameters to update + strict: When true, parameters not included in the request body are reset + to their defaults (cleared on the frontend). Defaults to false, + which preserves the existing behaviour of only updating the + parameters that are explicitly provided. Returns: A dictionary containing the updated parameters and status Example: - POST /api/v1/recall/{queue_id} + POST /api/v1/recall/{queue_id}?strict=true { "positive_prompt": "a beautiful landscape", "model": "sd-1.5", - "steps": 20, - "cfg_scale": 7.5, - "width": 512, - "height": 512, - "seed": 12345 + "steps": 20 } + # In strict mode, all other parameters (reference_images, loras, etc.) + # are cleared. In non-strict mode (default) they would be left as-is. """ logger = ApiDependencies.invoker.services.logger @@ -380,8 +444,18 @@ async def update_recall_parameters( _assert_recall_image_access(parameters, current_user) try: - # Get only the parameters that were actually provided (non-None values) - provided_params = {k: v for k, v in parameters.model_dump().items() if v is not None} + # In strict mode, include all parameters so the frontend clears anything + # not explicitly provided. List-typed fields use [] instead of None so + # the frontend sees an empty collection rather than a null it might skip. + if strict: + _list_fields = { + name for name, field in RecallParameter.model_fields.items() if "list" in str(field.annotation).lower() + } + provided_params = { + k: ([] if v is None and k in _list_fields else v) for k, v in parameters.model_dump().items() + } + else: + provided_params = {k: v for k, v in parameters.model_dump().items() if v is not None} if not provided_params: return {"status": "no_parameters_provided", "updated_count": 0} @@ -442,6 +516,14 @@ async def update_recall_parameters( provided_params["ip_adapters"] = resolved_adapters logger.info(f"Resolved {len(resolved_adapters)} IP adapter(s)") + # Process model-free reference images if provided + if "reference_images" in provided_params: + reference_images_param = parameters.reference_images + if reference_images_param is not None: + resolved_refs = resolve_reference_images(reference_images_param) + provided_params["reference_images"] = resolved_refs + logger.info(f"Resolved {len(resolved_refs)} reference image(s)") + # Emit event to notify frontend of parameter updates try: logger.info( diff --git a/invokeai/app/api/routers/utilities.py b/invokeai/app/api/routers/utilities.py index 921645b1d86..f77f77a8534 100644 --- a/invokeai/app/api/routers/utilities.py +++ b/invokeai/app/api/routers/utilities.py @@ -1,13 +1,32 @@ +import asyncio +import logging +import threading +from pathlib import Path from typing import Optional, Union +import torch from dynamicprompts.generators import CombinatorialPromptGenerator, RandomPromptGenerator -from fastapi import Body +from fastapi import Body, HTTPException from fastapi.routing import APIRouter -from pydantic import BaseModel +from pydantic import BaseModel, Field from pyparsing import ParseException +from transformers import AutoProcessor, AutoTokenizer, LlavaOnevisionForConditionalGeneration, LlavaOnevisionProcessor + +from invokeai.app.api.dependencies import ApiDependencies +from invokeai.app.services.image_files.image_files_common import ImageFileNotFoundException +from invokeai.app.services.model_records.model_records_base import UnknownModelException +from invokeai.backend.llava_onevision_pipeline import LlavaOnevisionPipeline +from invokeai.backend.model_manager.taxonomy import ModelType +from invokeai.backend.text_llm_pipeline import DEFAULT_SYSTEM_PROMPT, TextLLMPipeline +from invokeai.backend.util.devices import TorchDevice + +logger = logging.getLogger(__name__) utilities_router = APIRouter(prefix="/v1/utilities", tags=["utilities"]) +# The underlying model loader is not thread-safe, so we serialize load_model calls. +_model_load_lock = threading.Lock() + class DynamicPromptsResponse(BaseModel): prompts: list[str] @@ -42,3 +61,160 @@ async def parse_dynamicprompts( prompts = [prompt] error = str(e) return DynamicPromptsResponse(prompts=prompts if prompts else [""], error=error) + + +# --- Expand Prompt --- + + +class ExpandPromptRequest(BaseModel): + prompt: str + model_key: str + max_tokens: int = Field(default=300, ge=1, le=2048) + system_prompt: str | None = None + + +class ExpandPromptResponse(BaseModel): + expanded_prompt: str + error: str | None = None + + +def _resolve_model_path(model_config_path: str) -> Path: + """Resolve a model config path to an absolute path.""" + model_path = Path(model_config_path) + if model_path.is_absolute(): + return model_path.resolve() + base_models_path = ApiDependencies.invoker.services.configuration.models_path + return (base_models_path / model_path).resolve() + + +def _run_expand_prompt(prompt: str, model_key: str, max_tokens: int, system_prompt: str | None) -> str: + """Run text LLM inference synchronously (called from thread).""" + model_manager = ApiDependencies.invoker.services.model_manager + model_config = model_manager.store.get_model(model_key) + + if model_config.type != ModelType.TextLLM: + raise ValueError(f"Model '{model_key}' is not a TextLLM model (got {model_config.type})") + + with _model_load_lock: + loaded_model = model_manager.load.load_model(model_config) + + with torch.no_grad(), loaded_model.model_on_device() as (_, model): + model_abs_path = _resolve_model_path(model_config.path) + tokenizer = AutoTokenizer.from_pretrained(model_abs_path, local_files_only=True) + + pipeline = TextLLMPipeline(model, tokenizer) + model_device = next(model.parameters()).device + output = pipeline.run( + prompt=prompt, + system_prompt=system_prompt or DEFAULT_SYSTEM_PROMPT, + max_new_tokens=max_tokens, + device=model_device, + dtype=TorchDevice.choose_torch_dtype(), + ) + + return output + + +@utilities_router.post( + "/expand-prompt", + operation_id="expand_prompt", + responses={ + 200: {"model": ExpandPromptResponse}, + }, +) +async def expand_prompt(body: ExpandPromptRequest) -> ExpandPromptResponse: + """Expand a brief prompt into a detailed image generation prompt using a text LLM.""" + try: + expanded = await asyncio.to_thread( + _run_expand_prompt, + body.prompt, + body.model_key, + body.max_tokens, + body.system_prompt, + ) + return ExpandPromptResponse(expanded_prompt=expanded) + except UnknownModelException: + raise HTTPException(status_code=404, detail=f"Model '{body.model_key}' not found") + except ValueError as e: + raise HTTPException(status_code=422, detail=str(e)) + except Exception as e: + logger.error(f"Error expanding prompt: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +# --- Image to Prompt --- + + +class ImageToPromptRequest(BaseModel): + image_name: str + model_key: str + instruction: str = "Describe this image in detail for use as an AI image generation prompt." + + +class ImageToPromptResponse(BaseModel): + prompt: str + error: str | None = None + + +def _run_image_to_prompt(image_name: str, model_key: str, instruction: str) -> str: + """Run LLaVA OneVision inference synchronously (called from thread).""" + model_manager = ApiDependencies.invoker.services.model_manager + model_config = model_manager.store.get_model(model_key) + + if model_config.type != ModelType.LlavaOnevision: + raise ValueError(f"Model '{model_key}' is not a LLaVA OneVision model (got {model_config.type})") + + with _model_load_lock: + loaded_model = model_manager.load.load_model(model_config) + + # Load the image from InvokeAI's image store + image = ApiDependencies.invoker.services.images.get_pil_image(image_name) + image = image.convert("RGB") + + with torch.no_grad(), loaded_model.model_on_device() as (_, model): + if not isinstance(model, LlavaOnevisionForConditionalGeneration): + raise TypeError(f"Expected LlavaOnevisionForConditionalGeneration, got {type(model).__name__}") + + model_abs_path = _resolve_model_path(model_config.path) + processor = AutoProcessor.from_pretrained(model_abs_path, local_files_only=True) + if not isinstance(processor, LlavaOnevisionProcessor): + raise TypeError(f"Expected LlavaOnevisionProcessor, got {type(processor).__name__}") + + pipeline = LlavaOnevisionPipeline(model, processor) + model_device = next(model.parameters()).device + output = pipeline.run( + prompt=instruction, + images=[image], + device=model_device, + dtype=TorchDevice.choose_torch_dtype(), + ) + + return output + + +@utilities_router.post( + "/image-to-prompt", + operation_id="image_to_prompt", + responses={ + 200: {"model": ImageToPromptResponse}, + }, +) +async def image_to_prompt(body: ImageToPromptRequest) -> ImageToPromptResponse: + """Generate a descriptive prompt from an image using a vision-language model.""" + try: + prompt = await asyncio.to_thread( + _run_image_to_prompt, + body.image_name, + body.model_key, + body.instruction, + ) + return ImageToPromptResponse(prompt=prompt) + except UnknownModelException: + raise HTTPException(status_code=404, detail=f"Model '{body.model_key}' not found") + except ImageFileNotFoundException: + raise HTTPException(status_code=404, detail=f"Image '{body.image_name}' not found") + except (ValueError, TypeError) as e: + raise HTTPException(status_code=422, detail=str(e)) + except Exception as e: + logger.error(f"Error generating prompt from image: {e}") + raise HTTPException(status_code=500, detail=str(e)) diff --git a/invokeai/app/api/routers/virtual_boards.py b/invokeai/app/api/routers/virtual_boards.py new file mode 100644 index 00000000000..f0c9e2edc51 --- /dev/null +++ b/invokeai/app/api/routers/virtual_boards.py @@ -0,0 +1,56 @@ +from fastapi import HTTPException, Path, Query +from fastapi.routing import APIRouter + +from invokeai.app.api.auth_dependencies import CurrentUserOrDefault +from invokeai.app.api.dependencies import ApiDependencies +from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageNamesResult +from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection +from invokeai.app.services.virtual_boards.virtual_boards_common import VirtualSubBoardDTO + +virtual_boards_router = APIRouter(prefix="/v1/virtual_boards", tags=["virtual_boards"]) + + +@virtual_boards_router.get( + "/by_date", + operation_id="list_virtual_boards_by_date", + response_model=list[VirtualSubBoardDTO], +) +async def list_virtual_boards_by_date( + current_user: CurrentUserOrDefault, +) -> list[VirtualSubBoardDTO]: + """Gets a list of virtual sub-boards grouped by date.""" + try: + return ApiDependencies.invoker.services.image_records.get_image_dates( + user_id=current_user.user_id, + is_admin=current_user.is_admin, + ) + except Exception: + raise HTTPException(status_code=500, detail="Failed to get virtual boards by date") + + +@virtual_boards_router.get( + "/by_date/{date}/image_names", + operation_id="list_virtual_board_image_names_by_date", + response_model=ImageNamesResult, +) +async def list_virtual_board_image_names_by_date( + current_user: CurrentUserOrDefault, + date: str = Path(description="The ISO date string, e.g. '2026-03-18'"), + starred_first: bool = Query(default=True, description="Whether to sort starred images first"), + order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The sort direction"), + categories: list[ImageCategory] | None = Query(default=None, description="The categories of images to include"), + search_term: str | None = Query(default=None, description="Search term to filter images"), +) -> ImageNamesResult: + """Gets ordered image names for a specific date.""" + try: + return ApiDependencies.invoker.services.image_records.get_image_names_by_date( + date=date, + starred_first=starred_first, + order_dir=order_dir, + categories=categories, + search_term=search_term, + user_id=current_user.user_id, + is_admin=current_user.is_admin, + ) + except Exception: + raise HTTPException(status_code=500, detail="Failed to get image names for date") diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index 2ca6746b496..4b79e1eeb0c 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -21,6 +21,7 @@ board_images, boards, client_state, + custom_nodes, download_queue, images, model_manager, @@ -29,6 +30,7 @@ session_queue, style_presets, utilities, + virtual_boards, workflows, ) from invokeai.app.api.sockets import SocketIO @@ -177,6 +179,7 @@ async def dispatch(self, request: Request, call_next: RequestResponseEndpoint): app.include_router(images.images_router, prefix="/api") app.include_router(boards.boards_router, prefix="/api") app.include_router(board_images.board_images_router, prefix="/api") +app.include_router(virtual_boards.virtual_boards_router, prefix="/api") app.include_router(model_relationships.model_relationships_router, prefix="/api") app.include_router(app_info.app_router, prefix="/api") app.include_router(session_queue.session_queue_router, prefix="/api") @@ -184,6 +187,7 @@ async def dispatch(self, request: Request, call_next: RequestResponseEndpoint): app.include_router(style_presets.style_presets_router, prefix="/api") app.include_router(client_state.client_state_router, prefix="/api") app.include_router(recall_parameters.recall_parameters_router, prefix="/api") +app.include_router(custom_nodes.custom_nodes_router, prefix="/api") app.openapi = get_openapi_func(app) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 9141995e460..0546dabebb5 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -338,6 +338,32 @@ def invalidate_invocation_typeadapter(cls) -> None: """Invalidates the cached invocation type adapter.""" cls.get_invocation_typeadapter.cache_clear() + @classmethod + def unregister_pack(cls, node_pack: str) -> list[str]: + """Unregisters all invocations and outputs belonging to a node pack. + + Returns a list of the invocation types that were removed. + """ + removed_types: list[str] = [] + + invocations_to_remove = {inv for inv in cls._invocation_classes if inv.UIConfig.node_pack == node_pack} + for inv in invocations_to_remove: + removed_types.append(inv.get_type()) + cls._invocation_classes.discard(inv) + + if invocations_to_remove: + cls.invalidate_invocation_typeadapter() + + # Also remove any output classes from this pack's modules + outputs_to_remove = {out for out in cls._output_classes if out.__module__.split(".")[0] == node_pack} + for out in outputs_to_remove: + cls._output_classes.discard(out) + + if outputs_to_remove: + cls.invalidate_output_typeadapter() + + return removed_types + @classmethod def get_invocation_classes(cls) -> Iterable[type[BaseInvocation]]: """Gets all invocations, respecting the allowlist and denylist.""" diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py index 2fc5fd5a3c0..e53aeb417b2 100644 --- a/invokeai/app/invocations/fields.py +++ b/invokeai/app/invocations/fields.py @@ -229,6 +229,7 @@ class FieldDescriptions: instantx_control_mode = "The control mode for InstantX ControlNet union models. Ignored for other ControlNet models. The standard mapping is: canny (0), tile (1), depth (2), blur (3), pose (4), gray (5), low quality (6). Negative values will be treated as 'None'." flux_redux_conditioning = "FLUX Redux conditioning tensor" vllm_model = "The VLLM model to use" + text_llm_model = "The text language model to use for text generation" flux_fill_conditioning = "FLUX Fill conditioning tensor" flux_kontext_conditioning = "FLUX Kontext conditioning (reference image)" diff --git a/invokeai/app/invocations/flux2_denoise.py b/invokeai/app/invocations/flux2_denoise.py index 1b5ea372d68..d4239e41420 100644 --- a/invokeai/app/invocations/flux2_denoise.py +++ b/invokeai/app/invocations/flux2_denoise.py @@ -53,8 +53,8 @@ "flux2_denoise", title="FLUX2 Denoise", tags=["image", "flux", "flux2", "klein", "denoise"], - category="latents", - version="1.4.0", + category="image", + version="1.5.0", classification=Classification.Prototype, ) class Flux2DenoiseInvocation(BaseInvocation): @@ -101,6 +101,14 @@ class Flux2DenoiseInvocation(BaseInvocation): description="Negative conditioning tensor. Can be None if cfg_scale is 1.0.", input=Input.Connection, ) + guidance: float = InputField( + default=4.0, + ge=0, + le=20, + description="Guidance strength for distilled guidance-embedding models. " + "Inert for all current FLUX.2 Klein variants (their guidance_embeds weights are absent/zero); " + "kept for node-graph compatibility and future guidance-embedded models.", + ) cfg_scale: float = InputField( default=1.0, description=FieldDescriptions.cfg_scale, @@ -467,6 +475,7 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor: txt_ids=txt_ids, timesteps=timesteps, step_callback=self._build_step_callback(context), + guidance=self.guidance, cfg_scale=cfg_scale_list, neg_txt=neg_txt, neg_txt_ids=neg_txt_ids, diff --git a/invokeai/app/invocations/flux2_klein_model_loader.py b/invokeai/app/invocations/flux2_klein_model_loader.py index f39e7688f3e..2091fd380d7 100644 --- a/invokeai/app/invocations/flux2_klein_model_loader.py +++ b/invokeai/app/invocations/flux2_klein_model_loader.py @@ -207,9 +207,9 @@ def _validate_qwen3_encoder_variant(self, context: InvocationContext, main_confi flux2_variant = main_config.variant # Validate the variants match - # Klein4B requires Qwen3_4B, Klein9B/Klein9BBase requires Qwen3_8B + # Klein4B/Klein4BBase requires Qwen3_4B, Klein9B/Klein9BBase requires Qwen3_8B expected_qwen3_variant = None - if flux2_variant == Flux2VariantType.Klein4B: + if flux2_variant in (Flux2VariantType.Klein4B, Flux2VariantType.Klein4BBase): expected_qwen3_variant = Qwen3VariantType.Qwen3_4B elif flux2_variant in (Flux2VariantType.Klein9B, Flux2VariantType.Klein9BBase): expected_qwen3_variant = Qwen3VariantType.Qwen3_8B diff --git a/invokeai/app/invocations/flux_denoise.py b/invokeai/app/invocations/flux_denoise.py index 84f0a030c51..1ad0cc559ed 100644 --- a/invokeai/app/invocations/flux_denoise.py +++ b/invokeai/app/invocations/flux_denoise.py @@ -477,7 +477,7 @@ def _run_diffusion( ) context.logger.info( f"DyPE enabled: resolution={self.width}x{self.height}, preset={self.dype_preset}, " - f"method={dype_config.method}, scale={dype_config.dype_scale:.2f}, " + f"scale={dype_config.dype_scale:.2f}, " f"exponent={dype_config.dype_exponent:.2f}, start_sigma={dype_config.dype_start_sigma:.2f}, " f"base_resolution={dype_config.base_resolution}" ) diff --git a/invokeai/app/invocations/text_llm.py b/invokeai/app/invocations/text_llm.py new file mode 100644 index 00000000000..789e65be018 --- /dev/null +++ b/invokeai/app/invocations/text_llm.py @@ -0,0 +1,65 @@ +import torch +from transformers import AutoTokenizer + +from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.fields import FieldDescriptions, InputField, UIComponent +from invokeai.app.invocations.model import ModelIdentifierField +from invokeai.app.invocations.primitives import StringOutput +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.model_manager.taxonomy import ModelType +from invokeai.backend.text_llm_pipeline import DEFAULT_SYSTEM_PROMPT, TextLLMPipeline +from invokeai.backend.util.devices import TorchDevice + + +@invocation( + "text_llm", + title="Text LLM", + tags=["llm", "text", "prompt"], + category="llm", + version="1.0.0", + classification=Classification.Beta, +) +class TextLLMInvocation(BaseInvocation): + """Run a text language model to generate or expand text (e.g. for prompt expansion).""" + + prompt: str = InputField( + default="", + description="Input text prompt.", + ui_component=UIComponent.Textarea, + ) + system_prompt: str = InputField( + default=DEFAULT_SYSTEM_PROMPT, + description="System prompt that guides the model's behavior.", + ui_component=UIComponent.Textarea, + ) + text_llm_model: ModelIdentifierField = InputField( + title="Text LLM Model", + description=FieldDescriptions.text_llm_model, + ui_model_type=ModelType.TextLLM, + ) + max_tokens: int = InputField( + default=300, + ge=1, + le=2048, + description="Maximum number of tokens to generate.", + ) + + @torch.no_grad() + def invoke(self, context: InvocationContext) -> StringOutput: + model_config = context.models.get_config(self.text_llm_model) + + with context.models.load(self.text_llm_model).model_on_device() as (_, model): + model_abs_path = context.models.get_absolute_path(model_config) + tokenizer = AutoTokenizer.from_pretrained(model_abs_path, local_files_only=True) + + pipeline = TextLLMPipeline(model, tokenizer) + model_device = next(model.parameters()).device + output = pipeline.run( + prompt=self.prompt, + system_prompt=self.system_prompt, + max_new_tokens=self.max_tokens, + device=model_device, + dtype=TorchDevice.choose_torch_dtype(), + ) + + return StringOutput(value=output) diff --git a/invokeai/app/services/client_state_persistence/client_state_persistence_base.py b/invokeai/app/services/client_state_persistence/client_state_persistence_base.py index 99ad71bc8b7..7be6841a790 100644 --- a/invokeai/app/services/client_state_persistence/client_state_persistence_base.py +++ b/invokeai/app/services/client_state_persistence/client_state_persistence_base.py @@ -36,6 +36,31 @@ def get_by_key(self, user_id: str, key: str) -> str | None: """ pass + @abstractmethod + def get_keys_by_prefix(self, user_id: str, prefix: str) -> list[str]: + """ + Get all keys matching a prefix for a user. + + Args: + user_id (str): The user ID to get keys for. + prefix (str): The prefix to filter keys by. + + Returns: + list[str]: A list of keys matching the prefix. + """ + pass + + @abstractmethod + def delete_by_key(self, user_id: str, key: str) -> None: + """ + Delete a specific key-value pair for a user. + + Args: + user_id (str): The user ID to delete state for. + key (str): The key to delete. + """ + pass + @abstractmethod def delete(self, user_id: str) -> None: """ diff --git a/invokeai/app/services/client_state_persistence/client_state_persistence_sqlite.py b/invokeai/app/services/client_state_persistence/client_state_persistence_sqlite.py index 643db306857..7605de829d9 100644 --- a/invokeai/app/services/client_state_persistence/client_state_persistence_sqlite.py +++ b/invokeai/app/services/client_state_persistence/client_state_persistence_sqlite.py @@ -44,6 +44,31 @@ def get_by_key(self, user_id: str, key: str) -> str | None: return None return row[0] + def get_keys_by_prefix(self, user_id: str, prefix: str) -> list[str]: + # Escape LIKE wildcards (%, _) and the escape char itself so callers can pass + # arbitrary strings as a literal prefix without accidental pattern matching. + escaped_prefix = prefix.replace("\\", "\\\\").replace("%", "\\%").replace("_", "\\_") + with self._db.transaction() as cursor: + cursor.execute( + """ + SELECT key FROM client_state + WHERE user_id = ? AND key LIKE ? ESCAPE '\\' + ORDER BY updated_at DESC + """, + (user_id, f"{escaped_prefix}%"), + ) + return [row[0] for row in cursor.fetchall()] + + def delete_by_key(self, user_id: str, key: str) -> None: + with self._db.transaction() as cursor: + cursor.execute( + """ + DELETE FROM client_state + WHERE user_id = ? AND key = ? + """, + (user_id, key), + ) + def delete(self, user_id: str) -> None: with self._db.transaction() as cursor: cursor.execute( diff --git a/invokeai/app/services/image_records/image_records_base.py b/invokeai/app/services/image_records/image_records_base.py index 457cf2f4686..dd1e9fd4f37 100644 --- a/invokeai/app/services/image_records/image_records_base.py +++ b/invokeai/app/services/image_records/image_records_base.py @@ -12,6 +12,7 @@ ) from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection +from invokeai.app.services.virtual_boards.virtual_boards_common import VirtualSubBoardDTO class ImageRecordStorageBase(ABC): @@ -122,3 +123,26 @@ def get_image_names( ) -> ImageNamesResult: """Gets ordered list of image names with metadata for optimistic updates.""" pass + + @abstractmethod + def get_image_dates( + self, + user_id: Optional[str] = None, + is_admin: bool = False, + ) -> list[VirtualSubBoardDTO]: + """Gets a list of dates with image counts, grouped by DATE(created_at).""" + pass + + @abstractmethod + def get_image_names_by_date( + self, + date: str, + starred_first: bool = True, + order_dir: SQLiteDirection = SQLiteDirection.Descending, + categories: Optional[list[ImageCategory]] = None, + search_term: Optional[str] = None, + user_id: Optional[str] = None, + is_admin: bool = False, + ) -> ImageNamesResult: + """Gets ordered list of image names for a specific date.""" + pass diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py index 07126d53a9f..e88b49c56d3 100644 --- a/invokeai/app/services/image_records/image_records_sqlite.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -19,6 +19,7 @@ from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase +from invokeai.app.services.virtual_boards.virtual_boards_common import VirtualSubBoardDTO class SqliteImageRecordStorage(ImageRecordStorageBase): @@ -503,3 +504,141 @@ def get_image_names( image_names = [row[0] for row in result] return ImageNamesResult(image_names=image_names, starred_count=starred_count, total_count=len(image_names)) + + def get_image_dates( + self, + user_id: Optional[str] = None, + is_admin: bool = False, + ) -> list[VirtualSubBoardDTO]: + with self._db.transaction() as cursor: + query_conditions = "" + query_params: list[Union[int, str, bool]] = [] + + # Only non-intermediate images + query_conditions += """--sql + AND images.is_intermediate = 0 + """ + + # User isolation for non-admin users + if user_id is not None and not is_admin: + query_conditions += """--sql + AND images.user_id = ? + """ + query_params.append(user_id) + + query = f"""--sql + SELECT + DATE(images.created_at) as date, + SUM(CASE WHEN images.image_category = 'general' THEN 1 ELSE 0 END) as image_count, + SUM(CASE WHEN images.image_category != 'general' THEN 1 ELSE 0 END) as asset_count, + ( + SELECT i2.image_name FROM images i2 + WHERE DATE(i2.created_at) = DATE(images.created_at) + AND i2.is_intermediate = 0 + ORDER BY i2.created_at DESC LIMIT 1 + ) as cover_image_name + FROM images + WHERE 1=1 + {query_conditions} + GROUP BY DATE(images.created_at) + ORDER BY date DESC; + """ + + cursor.execute(query, query_params) + result = cast(list[sqlite3.Row], cursor.fetchall()) + + return [ + VirtualSubBoardDTO( + virtual_board_id=f"by_date:{dict(row)['date']}", + board_name=dict(row)["date"], + date=dict(row)["date"], + image_count=dict(row)["image_count"], + asset_count=dict(row)["asset_count"], + cover_image_name=dict(row)["cover_image_name"], + ) + for row in result + ] + + def get_image_names_by_date( + self, + date: str, + starred_first: bool = True, + order_dir: SQLiteDirection = SQLiteDirection.Descending, + categories: Optional[list[ImageCategory]] = None, + search_term: Optional[str] = None, + user_id: Optional[str] = None, + is_admin: bool = False, + ) -> ImageNamesResult: + with self._db.transaction() as cursor: + query_conditions = "" + query_params: list[Union[int, str, bool]] = [] + + # Filter by date + query_conditions += """--sql + AND DATE(images.created_at) = ? + """ + query_params.append(date) + + # Only non-intermediate images + query_conditions += """--sql + AND images.is_intermediate = 0 + """ + + if categories is not None: + category_strings = [c.value for c in set(categories)] + placeholders = ",".join("?" * len(category_strings)) + query_conditions += f"""--sql + AND images.image_category IN ( {placeholders} ) + """ + for c in category_strings: + query_params.append(c) + + # User isolation for non-admin users + if user_id is not None and not is_admin: + query_conditions += """--sql + AND images.user_id = ? + """ + query_params.append(user_id) + + if search_term: + query_conditions += """--sql + AND ( + images.metadata LIKE ? + OR images.created_at LIKE ? + ) + """ + query_params.append(f"%{search_term.lower()}%") + query_params.append(f"%{search_term.lower()}%") + + # Get starred count if starred_first is enabled + starred_count = 0 + if starred_first: + starred_count_query = f"""--sql + SELECT COUNT(*) + FROM images + WHERE images.starred = TRUE AND (1=1{query_conditions}) + """ + cursor.execute(starred_count_query, query_params) + starred_count = cast(int, cursor.fetchone()[0]) + + # Get all image names with proper ordering + if starred_first: + names_query = f"""--sql + SELECT images.image_name + FROM images + WHERE 1=1{query_conditions} + ORDER BY images.starred DESC, images.created_at {order_dir.value} + """ + else: + names_query = f"""--sql + SELECT images.image_name + FROM images + WHERE 1=1{query_conditions} + ORDER BY images.created_at {order_dir.value} + """ + + cursor.execute(names_query, query_params) + result = cast(list[sqlite3.Row], cursor.fetchall()) + image_names = [row[0] for row in result] + + return ImageNamesResult(image_names=image_names, starred_count=starred_count, total_count=len(image_names)) diff --git a/invokeai/app/services/model_records/model_records_base.py b/invokeai/app/services/model_records/model_records_base.py index 31fbadb3cbe..e06f8f2df91 100644 --- a/invokeai/app/services/model_records/model_records_base.py +++ b/invokeai/app/services/model_records/model_records_base.py @@ -6,9 +6,9 @@ from abc import ABC, abstractmethod from enum import Enum from pathlib import Path -from typing import List, Optional, Set, Union +from typing import Any, List, Optional, Set, Union -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, field_validator from invokeai.app.services.shared.pagination import PaginatedResults from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection @@ -86,6 +86,19 @@ class ModelRecordChanges(BaseModelExcludeNull): source: Optional[str] = Field(description="original source of the model", default=None) source_type: Optional[ModelSourceType] = Field(description="type of model source", default=None) source_api_response: Optional[str] = Field(description="metadata from remote source", default=None) + source_url: Optional[str] = Field(description="Optional URL for the model (e.g. download page)", default=None) + + @field_validator("source_url", mode="before") + @classmethod + def validate_source_url(cls, v: Any) -> Optional[str]: + if v is None or v == "": + return None + if not isinstance(v, str): + raise ValueError("source_url must be a string") + if not v.startswith(("https://", "http://")): + raise ValueError("source_url must be an http or https URL") + return v + name: Optional[str] = Field(description="Name of the model.", default=None) path: Optional[str] = Field(description="Path to the model.", default=None) description: Optional[str] = Field(description="Model description", default=None) diff --git a/invokeai/app/services/session_queue/session_queue_base.py b/invokeai/app/services/session_queue/session_queue_base.py index 14b93d97fc7..73acf9c31aa 100644 --- a/invokeai/app/services/session_queue/session_queue_base.py +++ b/invokeai/app/services/session_queue/session_queue_base.py @@ -73,8 +73,20 @@ def is_full(self, queue_id: str) -> IsFullResult: pass @abstractmethod - def get_queue_status(self, queue_id: str, user_id: Optional[str] = None) -> SessionQueueStatus: - """Gets the status of the queue. If user_id is provided, also includes user-specific counts.""" + def get_queue_status( + self, + queue_id: str, + user_id: Optional[str] = None, + acting_user_id: Optional[str] = None, + ) -> SessionQueueStatus: + """Gets the status of the queue. If user_id is provided, also includes user-specific counts. + + acting_user_id is independent of user_id and controls only current-item redaction: + when set, the returned status omits item_id/session_id/batch_id unless the + currently-running item belongs to acting_user_id. The redaction is decided from the + same get_current() snapshot used to embed those identifiers, so it cannot race against + a concurrent state change. + """ pass @abstractmethod diff --git a/invokeai/app/services/session_queue/session_queue_sqlite.py b/invokeai/app/services/session_queue/session_queue_sqlite.py index 95fb16fcbed..a05ed468857 100644 --- a/invokeai/app/services/session_queue/session_queue_sqlite.py +++ b/invokeai/app/services/session_queue/session_queue_sqlite.py @@ -316,7 +316,14 @@ def _set_queue_item_status( queue_item = self.get_queue_item(item_id) batch_status = self.get_batch_status(queue_id=queue_item.queue_id, batch_id=queue_item.batch_id) - queue_status = self.get_queue_status(queue_id=queue_item.queue_id) + # The QueueItemStatusChangedEvent ships to user:{queue_item.user_id} and admin rooms. + # acting_user_id ensures the embedded current-item identifiers are redacted when the + # in-progress item belongs to someone else, while leaving aggregate counts global. + # Doing this inside get_queue_status guarantees the redaction decision and the + # embedded identifiers come from the same get_current() snapshot — eliminating the + # race where a second read could find None and skip scrubbing stale identifiers. + queue_status = self.get_queue_status(queue_id=queue_item.queue_id, acting_user_id=queue_item.user_id) + self.__invoker.services.events.emit_queue_item_status_changed(queue_item, batch_status, queue_status) return queue_item @@ -846,7 +853,12 @@ def get_queue_item_ids( return ItemIdsResult(item_ids=item_ids, total_count=len(item_ids)) - def get_queue_status(self, queue_id: str, user_id: Optional[str] = None) -> SessionQueueStatus: + def get_queue_status( + self, + queue_id: str, + user_id: Optional[str] = None, + acting_user_id: Optional[str] = None, + ) -> SessionQueueStatus: with self._db.transaction() as cursor: # When user_id is provided (non-admin), only count that user's items if user_id is not None: @@ -875,8 +887,16 @@ def get_queue_status(self, queue_id: str, user_id: Optional[str] = None) -> Sess total = sum(row[1] or 0 for row in counts_result) counts: dict[str, int] = {row[0]: row[1] for row in counts_result} - # For non-admin users, hide current item details if they don't own it - show_current_item = current_item is not None and (user_id is None or current_item.user_id == user_id) + # Redaction is decided from the same current_item snapshot used to embed identifiers, + # so a concurrent transition (e.g. B finishing while A's status changes) cannot leave + # stale identifiers in the result. user_id (count filter) and acting_user_id + # (redaction) are independent: callers that need global counts but per-user redaction + # pass only acting_user_id; non-admin API callers pass user_id and inherit the same + # redaction by default. + owner_user_id = user_id if acting_user_id is None else acting_user_id + show_current_item = current_item is not None and ( + owner_user_id is None or current_item.user_id == owner_user_id + ) return SessionQueueStatus( queue_id=queue_id, diff --git a/invokeai/app/services/shared/README.md b/invokeai/app/services/shared/README.md index 113b7a41e54..f92b1f1ea2e 100644 --- a/invokeai/app/services/shared/README.md +++ b/invokeai/app/services/shared/README.md @@ -96,7 +96,10 @@ mutation helpers. Those helpers reject changes once the affected nodes have alre ### 4.1 Data - `graph: Graph` - source graph for the run; treated as stable during normal execution. -- `execution_graph: Graph` - materialized runtime nodes/edges. +- `execution_graph: Graph` - materialized runtime nodes/edges. This is mutable runtime state, not an immutable audit + log. Lazy `If` pruning may remove unselected input edges during execution, so persisted failed/completed session + snapshots can contain a structurally pruned execution graph. Retry paths rebuild from `graph`, not from a previously + persisted `execution_graph`. - `executed: set[str]`, `executed_history: list[str]`. - `results: dict[str, AnyInvocationOutput]`, `errors: dict[str, str]`. - `prepared_source_mapping: dict[str, str]` - exec id -> source id. @@ -123,7 +126,8 @@ mutation helpers. Those helpers reject changes once the affected nodes have alre - `_PreparedExecRegistry` Owns the relationship between source graph nodes and prepared execution graph nodes, plus cached metadata such as iteration path and runtime state. - `_ExecutionMaterializer` Expands source graph nodes into concrete execution graph nodes when the scheduler runs out of - ready work. + ready work. When matching prepared parents for a downstream exec node, skipped prepared exec nodes are ignored and + cannot be selected as live inputs. - `_ExecutionScheduler` Owns indegree transitions, ready queues, class batching, and downstream release on completion. - `_ExecutionRuntime` Owns iteration-path lookup and input hydration for prepared exec nodes. - `_IfBranchScheduler` Applies lazy `If` semantics by deferring branch-local work until the condition is known, then @@ -178,7 +182,9 @@ Run `C` -> `D:0` -> enqueue `D`. Run `D` -> done. - For **CollectInvocation**: gather all incoming `item` values into `collection`, sorting inputs by iteration path so collected results are stable across expanded iterations. Incoming `collection` values are merged first, then incoming `item` values are appended. -- For **IfInvocation**: hydrate only `condition` and the selected branch input. +- For **IfInvocation**: hydrate only `condition` and the selected branch input. As a defensive guard against + inconsistent runtime or deserialized session state, the runtime raises if the selected input edge points at an exec + node with no stored runtime output. In normal scheduling this path should be unreachable. - For all others: deep-copy each incoming edge's value into the destination field. This prevents cross-node mutation through shared references. @@ -191,7 +197,11 @@ Run `C` -> `D:0` -> enqueue `D`. Run `D` -> done. - Once the prepared `If` node resolves its condition: - the selected branch is released - the unselected branch is marked skipped + - unselected input edges on the prepared `If` exec node are pruned from the execution graph so they no longer + participate in downstream indegree accounting - branch-exclusive ancestors of the unselected branch are never executed +- Skipped branch-local exec nodes may still be treated as executed for scheduling purposes, but they do not create + entries in `results`. - Shared ancestors still execute if they are required by the selected branch or by any other live path in the graph. This behavior is implemented in the runtime scheduler, not in the invocation body itself. diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index 992a0c6dcf3..aa47c3b4bb5 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -184,11 +184,11 @@ def _get_selected_branch_fields(self, node: IfInvocation) -> tuple[str, str]: def _prune_unselected_if_inputs(self, exec_node_id: str, unselected_field: str) -> None: for edge in self._state.execution_graph._get_input_edges(exec_node_id, unselected_field): - if edge.source.node_id in self._state.executed: - continue - if self._state.indegree[exec_node_id] == 0: - raise RuntimeError(f"indegree underflow for {exec_node_id} when pruning {unselected_field}") - self._state.indegree[exec_node_id] -= 1 + if edge.source.node_id not in self._state.executed: + if self._state.indegree[exec_node_id] == 0: + raise RuntimeError(f"indegree underflow for {exec_node_id} when pruning {unselected_field}") + self._state.indegree[exec_node_id] -= 1 + self._state.execution_graph.delete_edge(edge) def _apply_branch_resolution( self, @@ -243,6 +243,10 @@ def is_deferred_by_unresolved_if(self, exec_node_id: str) -> bool: return False def mark_exec_node_skipped(self, exec_node_id: str) -> None: + state = self._state._get_prepared_exec_metadata(exec_node_id).state + if state in ("executed", "skipped"): + return + self._state._remove_from_ready_queues(exec_node_id) self._state._set_prepared_exec_state(exec_node_id, "skipped") self._state.executed.add(exec_node_id) @@ -356,7 +360,7 @@ def _initialize_execution_node(self, exec_node_id: str) -> None: def _get_collect_iteration_mappings(self, parent_node_ids: list[str]) -> list[tuple[str, str]]: all_iteration_mappings: list[tuple[str, str]] = [] for source_node_id in parent_node_ids: - prepared_nodes = self._state.source_prepared_mapping[source_node_id] + prepared_nodes = self._get_prepared_nodes_for_source(source_node_id) all_iteration_mappings.extend((source_node_id, prepared_id) for prepared_id in prepared_nodes) return all_iteration_mappings @@ -414,7 +418,11 @@ def get_node_iterators(self, node_id: str, it_graph: Optional[nx.DiGraph] = None return [n for n in nx.ancestors(g, node_id) if isinstance(self._state.graph.get_node(n), IterateInvocation)] def _get_prepared_nodes_for_source(self, source_node_id: str) -> set[str]: - return self._state.source_prepared_mapping[source_node_id] + return { + exec_node_id + for exec_node_id in self._state.source_prepared_mapping[source_node_id] + if self._state._get_prepared_exec_metadata(exec_node_id).state != "skipped" + } def _get_parent_iterator_exec_nodes( self, source_node_id: str, graph: nx.DiGraph, prepared_iterator_nodes: list[str] @@ -471,10 +479,15 @@ def get_iteration_node( prepared_iterator_nodes: list[str], ) -> Optional[str]: prepared_nodes = self._get_prepared_nodes_for_source(source_node_id) - if len(prepared_nodes) == 1: + if len(prepared_nodes) == 1 and not prepared_iterator_nodes: return next(iter(prepared_nodes)) parent_iterators = self._get_parent_iterator_exec_nodes(source_node_id, graph, prepared_iterator_nodes) + if len(prepared_nodes) == 1: + prepared_node_id = next(iter(prepared_nodes)) + if self._matches_parent_iterators(prepared_node_id, parent_iterators, execution_graph): + return prepared_node_id + return None direct_iterator_match = self._get_direct_prepared_iterator_match( prepared_nodes, prepared_iterator_nodes, parent_iterators, execution_graph @@ -733,6 +746,12 @@ def _sort_collect_input_edges(self, input_edges: list[Edge], field_name: str) -> def _get_copied_result_value(self, edge: Edge) -> Any: return copydeep(getattr(self._state.results[edge.source.node_id], edge.source.field)) + def _try_get_copied_result_value(self, edge: Edge) -> tuple[bool, Any]: + source_output = self._state.results.get(edge.source.node_id) + if source_output is None: + return False, None + return True, copydeep(getattr(source_output, edge.source.field)) + def _build_collect_collection(self, input_edges: list[Edge]) -> list[Any]: item_edges = self._sort_collect_input_edges(input_edges, ITEM_FIELD) collection_edges = self._sort_collect_input_edges(input_edges, COLLECTION_FIELD) @@ -761,7 +780,20 @@ def _prepare_collect_inputs(self, node: "CollectInvocation", input_edges: list[E def _prepare_if_inputs(self, node: IfInvocation, input_edges: list[Edge]) -> None: selected_field = self._state._resolved_if_exec_branches.get(node.id) allowed_fields = {"condition", selected_field} if selected_field is not None else {"condition"} - self._set_node_inputs(node, input_edges, allowed_fields) + + for edge in input_edges: + if edge.destination.field not in allowed_fields: + continue + + found_value, copied_value = self._try_get_copied_result_value(edge) + if not found_value: + iteration_path = self._state._get_iteration_path(node.id) + raise RuntimeError( + "IfInvocation selected input edge points at an exec node with no stored result output: " + f"if_exec_id={node.id}, source_exec_id={edge.source.node_id}, iteration_path={iteration_path}" + ) + + setattr(node, edge.destination.field, copied_value) def _prepare_default_inputs(self, node: BaseInvocation, input_edges: list[Edge]) -> None: self._set_node_inputs(node, input_edges) diff --git a/invokeai/app/services/virtual_boards/__init__.py b/invokeai/app/services/virtual_boards/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/invokeai/app/services/virtual_boards/virtual_boards_common.py b/invokeai/app/services/virtual_boards/virtual_boards_common.py new file mode 100644 index 00000000000..e1df5a81ca5 --- /dev/null +++ b/invokeai/app/services/virtual_boards/virtual_boards_common.py @@ -0,0 +1,14 @@ +from typing import Optional + +from pydantic import BaseModel, Field + + +class VirtualSubBoardDTO(BaseModel): + """A virtual sub-board computed from image metadata, not stored in the database.""" + + virtual_board_id: str = Field(description="The virtual board ID, e.g. 'by_date:2026-03-18'.") + board_name: str = Field(description="The display name of the virtual sub-board, e.g. '2026-03-18'.") + date: str = Field(description="The ISO date string, e.g. '2026-03-18'.") + image_count: int = Field(description="The number of general images for this date.") + asset_count: int = Field(description="The number of asset images for this date.") + cover_image_name: Optional[str] = Field(default=None, description="The most recent image name for this date.") diff --git a/invokeai/backend/flux/denoise.py b/invokeai/backend/flux/denoise.py index 30d075a5270..a0ae4cfb3f3 100644 --- a/invokeai/backend/flux/denoise.py +++ b/invokeai/backend/flux/denoise.py @@ -96,15 +96,18 @@ def denoise( timestep = scheduler.timesteps[step_index] # Convert scheduler timestep (0-1000) to normalized (0-1) for the model t_curr = timestep.item() / scheduler.config.num_train_timesteps + dype_sigma = DyPEExtension.resolve_step_sigma( + fallback_sigma=t_curr, + step_index=step_index, + scheduler_sigmas=getattr(scheduler, "sigmas", None), + ) t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device) # DyPE: Update step state for timestep-dependent scaling if dype_extension is not None and dype_embedder is not None: dype_extension.update_step_state( embedder=dype_embedder, - timestep=t_curr, - timestep_index=user_step, - total_steps=total_steps, + sigma=dype_sigma, ) # For Heun scheduler, track if we're in first or second order step @@ -264,9 +267,7 @@ def denoise( if dype_extension is not None and dype_embedder is not None: dype_extension.update_step_state( embedder=dype_embedder, - timestep=t_curr, - timestep_index=step_index, - total_steps=total_steps, + sigma=t_curr, ) t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device) diff --git a/invokeai/backend/flux/dype/__init__.py b/invokeai/backend/flux/dype/__init__.py index eebcfc45df1..7af50625dd7 100644 --- a/invokeai/backend/flux/dype/__init__.py +++ b/invokeai/backend/flux/dype/__init__.py @@ -1,9 +1,9 @@ """Dynamic Position Extrapolation (DyPE) for FLUX models. -DyPE enables high-resolution image generation (4K+) with pretrained FLUX models -by dynamically scaling RoPE position embeddings during the denoising process. +DyPE enables high-resolution image generation with pretrained FLUX models by +dynamically modulating RoPE extrapolation during denoising. -Based on: https://github.com/wildminder/ComfyUI-DyPE +Based on the official DyPE project: https://github.com/guyyariv/DyPE """ from invokeai.backend.flux.dype.base import DyPEConfig diff --git a/invokeai/backend/flux/dype/base.py b/invokeai/backend/flux/dype/base.py index 7b25a7f71f3..6c3fc42fa2c 100644 --- a/invokeai/backend/flux/dype/base.py +++ b/invokeai/backend/flux/dype/base.py @@ -1,8 +1,6 @@ -"""DyPE base configuration and utilities.""" +"""DyPE base configuration and utilities for FLUX vision_yarn RoPE.""" -import math from dataclasses import dataclass -from typing import Literal import torch from torch import Tensor @@ -14,72 +12,39 @@ class DyPEConfig: enable_dype: bool = True base_resolution: int = 1024 # Native training resolution - method: Literal["vision_yarn", "yarn", "ntk", "base"] = "vision_yarn" dype_scale: float = 2.0 # Magnitude λs (0.0-8.0) dype_exponent: float = 2.0 # Decay speed λt (0.0-1000.0) dype_start_sigma: float = 1.0 # When DyPE decay starts -def get_mscale(scale: float, mscale_factor: float = 1.0) -> float: - """Calculate magnitude scaling factor. - - Args: - scale: The resolution scaling factor - mscale_factor: Adjustment factor for the scaling - - Returns: - The magnitude scaling factor - """ - if scale <= 1.0: - return 1.0 - return mscale_factor * math.log(scale) + 1.0 - - -def get_timestep_mscale( - scale: float, +def get_timestep_kappa( current_sigma: float, dype_scale: float, dype_exponent: float, dype_start_sigma: float, ) -> float: - """Calculate timestep-dependent magnitude scaling. + """Calculate the paper-style DyPE scheduler value κ(t). The key insight of DyPE: early steps focus on low frequencies (global structure), - late steps on high frequencies (details). This function modulates the scaling - based on the current timestep/sigma. + late steps on high frequencies (details). DyPE expresses this as a direct + timestep scheduler over the positional extrapolation strength: + + κ(t) = λs * t^λt Args: - scale: Resolution scaling factor current_sigma: Current noise level (1.0 = full noise, 0.0 = clean) dype_scale: DyPE magnitude (λs) dype_exponent: DyPE decay speed (λt) dype_start_sigma: Sigma threshold to start decay Returns: - Timestep-modulated scaling factor + Timestep scheduler value κ(t) """ - if scale <= 1.0: - return 1.0 - - # Normalize sigma to [0, 1] range relative to start_sigma - if current_sigma >= dype_start_sigma: - t_normalized = 1.0 - else: - t_normalized = current_sigma / dype_start_sigma - - # Apply exponential decay: stronger extrapolation early, weaker late - # decay = exp(-λt * (1 - t)) where t=1 is early (high sigma), t=0 is late - decay = math.exp(-dype_exponent * (1.0 - t_normalized)) - - # Base mscale from resolution - base_mscale = get_mscale(scale) + if dype_scale <= 0.0 or dype_start_sigma <= 0.0: + return 0.0 - # Interpolate between base_mscale and 1.0 based on decay and dype_scale - # When decay=1 (early): use scaled value - # When decay=0 (late): use base value - scaled_mscale = 1.0 + (base_mscale - 1.0) * dype_scale * decay - - return scaled_mscale + t_normalized = max(0.0, min(current_sigma / dype_start_sigma, 1.0)) + return dype_scale * (t_normalized**dype_exponent) def compute_vision_yarn_freqs( @@ -117,35 +82,23 @@ def compute_vision_yarn_freqs( """ assert dim % 2 == 0 - # Use the larger scale for NTK calculation scale = max(scale_h, scale_w) device = pos.device dtype = torch.float64 if device.type != "mps" else torch.float32 - # NTK-aware theta scaling: extends position coverage for high-res - # Formula: theta_scaled = theta * scale^(dim/(dim-2)) - # This increases the wavelength of position encodings proportionally + # DyPE applies a direct timestep scheduler to the NTK extrapolation exponent. + # Early steps keep strong extrapolation; late steps relax smoothly back + # toward the training-time RoPE. if scale > 1.0: - ntk_alpha = scale ** (dim / (dim - 2)) - - # Apply timestep-dependent DyPE modulation - # mscale controls how strongly we apply the NTK extrapolation - # Early steps (high sigma): stronger extrapolation for global structure - # Late steps (low sigma): weaker extrapolation for fine details - mscale = get_timestep_mscale( - scale=scale, + ntk_exponent = dim / (dim - 2) + kappa = get_timestep_kappa( current_sigma=current_sigma, dype_scale=dype_config.dype_scale, dype_exponent=dype_config.dype_exponent, dype_start_sigma=dype_config.dype_start_sigma, ) - - # Modulate NTK alpha by mscale - # When mscale > 1: interpolate towards stronger extrapolation - # When mscale = 1: use base NTK alpha - modulated_alpha = 1.0 + (ntk_alpha - 1.0) * mscale - scaled_theta = theta * modulated_alpha + scaled_theta = theta * (scale ** (ntk_exponent * kappa)) else: scaled_theta = theta @@ -160,101 +113,3 @@ def compute_vision_yarn_freqs( sin = torch.sin(angles) return cos.to(pos.dtype), sin.to(pos.dtype) - - -def compute_yarn_freqs( - pos: Tensor, - dim: int, - theta: int, - scale: float, - current_sigma: float, - dype_config: DyPEConfig, -) -> tuple[Tensor, Tensor]: - """Compute RoPE frequencies using YARN/NTK method. - - Uses NTK-aware theta scaling for high-resolution support with - timestep-dependent DyPE modulation. - - Args: - pos: Position tensor - dim: Embedding dimension - theta: RoPE base frequency - scale: Uniform scaling factor - current_sigma: Current noise level (1.0 = full noise, 0.0 = clean) - dype_config: DyPE configuration - - Returns: - Tuple of (cos, sin) frequency tensors - """ - assert dim % 2 == 0 - - device = pos.device - dtype = torch.float64 if device.type != "mps" else torch.float32 - - # NTK-aware theta scaling with DyPE modulation - if scale > 1.0: - ntk_alpha = scale ** (dim / (dim - 2)) - - # Apply timestep-dependent DyPE modulation - mscale = get_timestep_mscale( - scale=scale, - current_sigma=current_sigma, - dype_scale=dype_config.dype_scale, - dype_exponent=dype_config.dype_exponent, - dype_start_sigma=dype_config.dype_start_sigma, - ) - - # Modulate NTK alpha by mscale - modulated_alpha = 1.0 + (ntk_alpha - 1.0) * mscale - scaled_theta = theta * modulated_alpha - else: - scaled_theta = theta - - freq_seq = torch.arange(0, dim, 2, dtype=dtype, device=device) / dim - freqs = 1.0 / (scaled_theta**freq_seq) - - angles = torch.einsum("...n,d->...nd", pos.to(dtype), freqs) - - cos = torch.cos(angles) - sin = torch.sin(angles) - - return cos.to(pos.dtype), sin.to(pos.dtype) - - -def compute_ntk_freqs( - pos: Tensor, - dim: int, - theta: int, - scale: float, -) -> tuple[Tensor, Tensor]: - """Compute RoPE frequencies using NTK method. - - Neural Tangent Kernel approach - continuous frequency scaling without - timestep dependency. - - Args: - pos: Position tensor - dim: Embedding dimension - theta: RoPE base frequency - scale: Scaling factor - - Returns: - Tuple of (cos, sin) frequency tensors - """ - assert dim % 2 == 0 - - device = pos.device - dtype = torch.float64 if device.type != "mps" else torch.float32 - - # NTK scaling - scaled_theta = theta * (scale ** (dim / (dim - 2))) - - freq_seq = torch.arange(0, dim, 2, dtype=dtype, device=device) / dim - freqs = 1.0 / (scaled_theta**freq_seq) - - angles = torch.einsum("...n,d->...nd", pos.to(dtype), freqs) - - cos = torch.cos(angles) - sin = torch.sin(angles) - - return cos.to(pos.dtype), sin.to(pos.dtype) diff --git a/invokeai/backend/flux/dype/presets.py b/invokeai/backend/flux/dype/presets.py index 7805f4364d4..48a714b007a 100644 --- a/invokeai/backend/flux/dype/presets.py +++ b/invokeai/backend/flux/dype/presets.py @@ -31,7 +31,6 @@ class DyPEPresetConfig: """Preset configuration values.""" base_resolution: int - method: str dype_scale: float dype_exponent: float dype_start_sigma: float @@ -41,7 +40,6 @@ class DyPEPresetConfig: DYPE_PRESETS: dict[DyPEPreset, DyPEPresetConfig] = { DYPE_PRESET_4K: DyPEPresetConfig( base_resolution=1024, - method="vision_yarn", dype_scale=2.0, dype_exponent=2.0, dype_start_sigma=1.0, @@ -84,7 +82,6 @@ def get_dype_config_for_resolution( return DyPEConfig( enable_dype=True, base_resolution=base_resolution, - method="vision_yarn", dype_scale=dynamic_dype_scale, dype_exponent=2.0, dype_start_sigma=1.0, @@ -111,24 +108,24 @@ def get_dype_config_for_area( return None area_ratio = area / base_area - effective_side_ratio = math.sqrt(area_ratio) # 1.0 at base, 2.0 at 2K (if base is 1K) - - # Strength: 0 at base area, 8 at sat_area, clamped thereafter. - sat_area = 2027520 # Determined by experimentation where a vertical line appears - sat_side_ratio = math.sqrt(sat_area / base_area) - dynamic_dype_scale = 8.0 * (effective_side_ratio - 1.0) / (sat_side_ratio - 1.0) + effective_side_ratio = math.sqrt(area_ratio) + aspect_ratio = max(width, height) / min(width, height) + aspect_attenuation = 1.0 if aspect_ratio <= 2.0 else 2.0 / aspect_ratio + + # Retune area mode to be "auto, but area-aware" instead of dramatically + # stronger than auto. This keeps it closer to the paper-style core DyPE. + dynamic_dype_scale = 2.4 * effective_side_ratio + dynamic_dype_scale *= aspect_attenuation dynamic_dype_scale = max(0.0, min(dynamic_dype_scale, 8.0)) - # Continuous exponent schedule: - # r=1 -> 0.5, r=2 -> 1.0, r=4 -> 2.0 (exact), smoothly varying in between. - x = math.log2(effective_side_ratio) - dype_exponent = 0.25 * (x**2) + 0.25 * x + 0.5 - dype_exponent = max(0.5, min(dype_exponent, 2.0)) + # Use a narrower, higher exponent range than the old area heuristic so the + # paper-style scheduler decays more conservatively and artifacts are reduced. + exponent_progress = max(0.0, min(effective_side_ratio - 1.0, 1.0)) + dype_exponent = 1.25 + 0.75 * exponent_progress return DyPEConfig( enable_dype=True, base_resolution=base_resolution, - method="vision_yarn", dype_scale=dynamic_dype_scale, dype_exponent=dype_exponent, dype_start_sigma=1.0, @@ -165,7 +162,6 @@ def get_dype_config_from_preset( return DyPEConfig( enable_dype=True, base_resolution=1024, - method="vision_yarn", dype_scale=custom_scale if custom_scale is not None else dynamic_dype_scale, dype_exponent=custom_exponent if custom_exponent is not None else 2.0, dype_start_sigma=1.0, @@ -196,7 +192,6 @@ def get_dype_config_from_preset( return DyPEConfig( enable_dype=True, base_resolution=preset_config.base_resolution, - method=preset_config.method, dype_scale=preset_config.dype_scale, dype_exponent=preset_config.dype_exponent, dype_start_sigma=preset_config.dype_start_sigma, diff --git a/invokeai/backend/flux/dype/rope.py b/invokeai/backend/flux/dype/rope.py index f6a1594f6be..980b768cbc0 100644 --- a/invokeai/backend/flux/dype/rope.py +++ b/invokeai/backend/flux/dype/rope.py @@ -6,9 +6,7 @@ from invokeai.backend.flux.dype.base import ( DyPEConfig, - compute_ntk_freqs, compute_vision_yarn_freqs, - compute_yarn_freqs, ) @@ -50,37 +48,15 @@ def rope_dype( if not dype_config.enable_dype or scale <= 1.0: return _rope_base(pos, dim, theta) - # Select method and compute frequencies - method = dype_config.method - - if method == "vision_yarn": - cos, sin = compute_vision_yarn_freqs( - pos=pos, - dim=dim, - theta=theta, - scale_h=scale_h, - scale_w=scale_w, - current_sigma=current_sigma, - dype_config=dype_config, - ) - elif method == "yarn": - cos, sin = compute_yarn_freqs( - pos=pos, - dim=dim, - theta=theta, - scale=scale, - current_sigma=current_sigma, - dype_config=dype_config, - ) - elif method == "ntk": - cos, sin = compute_ntk_freqs( - pos=pos, - dim=dim, - theta=theta, - scale=scale, - ) - else: # "base" - return _rope_base(pos, dim, theta) + cos, sin = compute_vision_yarn_freqs( + pos=pos, + dim=dim, + theta=theta, + scale_h=scale_h, + scale_w=scale_w, + current_sigma=current_sigma, + dype_config=dype_config, + ) # Construct rotation matrix from cos/sin # Output shape: (batch, seq_len, dim/2, 2, 2) diff --git a/invokeai/backend/flux/extensions/dype_extension.py b/invokeai/backend/flux/extensions/dype_extension.py index db27c053dd3..af01a305b7b 100644 --- a/invokeai/backend/flux/extensions/dype_extension.py +++ b/invokeai/backend/flux/extensions/dype_extension.py @@ -1,7 +1,9 @@ """DyPE extension for FLUX denoising pipeline.""" from dataclasses import dataclass -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Sequence + +import torch from invokeai.backend.flux.dype.base import DyPEConfig from invokeai.backend.flux.dype.embed import DyPEEmbedND @@ -59,9 +61,7 @@ def patch_model(self, model: "Flux") -> tuple[DyPEEmbedND, object]: def update_step_state( self, embedder: DyPEEmbedND, - timestep: float, - timestep_index: int, - total_steps: int, + sigma: float, ) -> None: """Update the step state in the DyPE embedder. @@ -70,16 +70,38 @@ def update_step_state( Args: embedder: The DyPE embedder to update - timestep: Current timestep value (sigma/noise level) - timestep_index: Current step index (0-based) - total_steps: Total number of denoising steps + sigma: Current noise level for the active denoising step """ embedder.set_step_state( - sigma=timestep, + sigma=sigma, height=self.target_height, width=self.target_width, ) + @staticmethod + def resolve_step_sigma( + fallback_sigma: float, + step_index: int, + scheduler_sigmas: Sequence[float] | torch.Tensor | None, + ) -> float: + """Resolve the actual sigma for the current denoising step. + + Diffusers schedulers may expose both normalized timesteps and the underlying + sigma sequence. DyPE should follow the noise schedule, so prefer + ``scheduler.sigmas`` when available and fall back to the provided value + otherwise. + """ + if scheduler_sigmas is None: + return fallback_sigma + + if step_index >= len(scheduler_sigmas): + return fallback_sigma + + sigma = scheduler_sigmas[step_index] + if isinstance(sigma, torch.Tensor): + return float(sigma.item()) + return float(sigma) + @staticmethod def restore_model(model: "Flux", original_embedder: object) -> None: """Restore the original position embedder. diff --git a/invokeai/backend/flux/util.py b/invokeai/backend/flux/util.py index da6590c7573..81b10a913ac 100644 --- a/invokeai/backend/flux/util.py +++ b/invokeai/backend/flux/util.py @@ -133,7 +133,24 @@ def get_flux_ae_params() -> AutoEncoderParams: axes_dim=[16, 56, 56], theta=10_000, qkv_bias=True, - guidance_embed=True, + guidance_embed=False, + ), + # Flux2 Klein 4B Base is the undistilled foundation model. It shares the same + # architecture as Klein 4B (distilled) and reports guidance_embeds=False in its + # HF transformer config - classical CFG (external negative pass) is the guidance mechanism. + Flux2VariantType.Klein4BBase: FluxParams( + in_channels=64, + vec_in_dim=2560, # Qwen3-4B hidden size (used for pooled output) + context_in_dim=7680, # 3 layers * 2560 = 7680 for Qwen3-4B + hidden_size=3072, + mlp_ratio=4.0, + num_heads=24, + depth=19, + depth_single_blocks=38, + axes_dim=[16, 56, 56], + theta=10_000, + qkv_bias=True, + guidance_embed=False, ), # Flux2 Klein 9B uses Qwen3 8B text encoder with stacked embeddings from layers [9, 18, 27] # The context_in_dim is 3 * hidden_size of Qwen3 (3 * 4096 = 12288) @@ -149,7 +166,24 @@ def get_flux_ae_params() -> AutoEncoderParams: axes_dim=[16, 56, 56], theta=10_000, qkv_bias=True, - guidance_embed=True, + guidance_embed=False, + ), + # Flux2 Klein 9B Base is the undistilled foundation model. It shares the same + # architecture as Klein 9B (distilled) and reports guidance_embeds=False in its + # HF transformer config - the guidance scalar is inert for all Klein variants. + Flux2VariantType.Klein9BBase: FluxParams( + in_channels=64, + vec_in_dim=4096, # Qwen3-8B hidden size (used for pooled output) + context_in_dim=12288, # 3 layers * 4096 = 12288 for Qwen3-8B + hidden_size=3072, + mlp_ratio=4.0, + num_heads=24, + depth=19, + depth_single_blocks=38, + axes_dim=[16, 56, 56], + theta=10_000, + qkv_bias=True, + guidance_embed=False, ), } diff --git a/invokeai/backend/flux2/denoise.py b/invokeai/backend/flux2/denoise.py index 7b5bd6194e0..b4438094f7b 100644 --- a/invokeai/backend/flux2/denoise.py +++ b/invokeai/backend/flux2/denoise.py @@ -26,6 +26,7 @@ def denoise( # sampling parameters timesteps: list[float], step_callback: Callable[[PipelineIntermediateState], None], + guidance: float, cfg_scale: list[float], # Negative conditioning for CFG neg_txt: torch.Tensor | None = None, @@ -45,7 +46,10 @@ def denoise( This is a simplified denoise function for FLUX.2 Klein models that uses the diffusers Flux2Transformer2DModel interface. - Note: FLUX.2 Klein has guidance_embeds=False, so no guidance parameter is used. + All current FLUX.2 Klein variants (4B, 4B Base, 9B, 9B Base) have guidance_embeds=False + in their HF transformer config (or absent/zeroed projection weights), so the guidance + value is passed but effectively ignored by the model. The argument is retained for + node-graph compatibility and future variants that may ship trained guidance projections. CFG is applied externally using negative conditioning when cfg_scale != 1.0. Args: @@ -56,6 +60,8 @@ def denoise( txt_ids: Text position IDs tensor. timesteps: List of timesteps for denoising schedule (linear sigmas from 1.0 to 1/n). step_callback: Callback function for progress updates. + guidance: Guidance strength. Inert for all current FLUX.2 Klein variants + (their guidance_embeds projection weights are absent/zero). cfg_scale: List of CFG scale values per step. neg_txt: Negative text embeddings for CFG (optional). neg_txt_ids: Negative text position IDs (optional). @@ -76,9 +82,10 @@ def denoise( img = torch.cat([img, img_cond_seq], dim=1) img_ids = torch.cat([img_ids, img_cond_seq_ids], dim=1) - # Klein has guidance_embeds=False, but the transformer forward() still requires a guidance tensor - # We pass a dummy value (1.0) since it won't affect the output when guidance_embeds=False - guidance = torch.full((img.shape[0],), 1.0, device=img.device, dtype=img.dtype) + # The transformer forward() requires a guidance tensor even when guidance_embeds=False, + # because the Flux2TimestepGuidanceEmbeddings forward signature takes it unconditionally. + # All current Klein variants have guidance_embeds=False, so the value is ignored internally. + guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype) # Use scheduler if provided use_scheduler = scheduler is not None @@ -121,7 +128,7 @@ def denoise( timestep=t_vec, img_ids=img_ids, txt_ids=txt_ids, - guidance=guidance, + guidance=guidance_vec, return_dict=False, ) @@ -141,7 +148,7 @@ def denoise( timestep=t_vec, img_ids=img_ids, txt_ids=neg_txt_ids if neg_txt_ids is not None else txt_ids, - guidance=guidance, + guidance=guidance_vec, return_dict=False, ) @@ -222,7 +229,7 @@ def denoise( timestep=t_vec, img_ids=img_ids, txt_ids=txt_ids, - guidance=guidance, + guidance=guidance_vec, return_dict=False, ) @@ -242,7 +249,7 @@ def denoise( timestep=t_vec, img_ids=img_ids, txt_ids=neg_txt_ids if neg_txt_ids is not None else txt_ids, - guidance=guidance, + guidance=guidance_vec, return_dict=False, ) diff --git a/invokeai/backend/model_manager/configs/base.py b/invokeai/backend/model_manager/configs/base.py index 8c12e7594be..cc6637233ac 100644 --- a/invokeai/backend/model_manager/configs/base.py +++ b/invokeai/backend/model_manager/configs/base.py @@ -10,7 +10,7 @@ Type, ) -from pydantic import BaseModel, ConfigDict, Field, Tag +from pydantic import BaseModel, ConfigDict, Field, Tag, field_validator from pydantic_core import PydanticUndefined from invokeai.app.util.misc import uuid_string @@ -77,6 +77,22 @@ class Config_Base(ABC, BaseModel): default=None, description="The original API response from the source, as stringified JSON.", ) + source_url: str | None = Field( + default=None, + description="Optional URL for the model (e.g. download page or model page).", + ) + + @field_validator("source_url", mode="before") + @classmethod + def validate_source_url(cls, v: Any) -> str | None: + if v is None or v == "": + return None + if not isinstance(v, str): + raise ValueError("source_url must be a string") + if not v.startswith(("https://", "http://")): + raise ValueError("source_url must be an http or https URL") + return v + cover_image: str | None = Field( default=None, description="Url for image to preview model", diff --git a/invokeai/backend/model_manager/configs/factory.py b/invokeai/backend/model_manager/configs/factory.py index 4d26b4c3347..9059aecebd9 100644 --- a/invokeai/backend/model_manager/configs/factory.py +++ b/invokeai/backend/model_manager/configs/factory.py @@ -97,6 +97,7 @@ T2IAdapter_Diffusers_SDXL_Config, ) from invokeai.backend.model_manager.configs.t5_encoder import T5Encoder_BnBLLMint8_Config, T5Encoder_T5Encoder_Config +from invokeai.backend.model_manager.configs.text_llm import TextLLM_Diffusers_Config from invokeai.backend.model_manager.configs.textual_inversion import ( TI_File_SD1_Config, TI_File_SD2_Config, @@ -269,6 +270,7 @@ Annotated[SigLIP_Diffusers_Config, SigLIP_Diffusers_Config.get_tag()], Annotated[FLUXRedux_Checkpoint_Config, FLUXRedux_Checkpoint_Config.get_tag()], Annotated[LlavaOnevision_Diffusers_Config, LlavaOnevision_Diffusers_Config.get_tag()], + Annotated[TextLLM_Diffusers_Config, TextLLM_Diffusers_Config.get_tag()], Annotated[ExternalApiModelConfig, ExternalApiModelConfig.get_tag()], # Unknown model (fallback) Annotated[Unknown_Config, Unknown_Config.get_tag()], diff --git a/invokeai/backend/model_manager/configs/main.py b/invokeai/backend/model_manager/configs/main.py index a2f008f41ed..da5bc5eed36 100644 --- a/invokeai/backend/model_manager/configs/main.py +++ b/invokeai/backend/model_manager/configs/main.py @@ -81,8 +81,8 @@ def from_base( return cls(steps=35, cfg_scale=4.5, width=1024, height=1024) case BaseModelType.Flux2: # Different defaults based on variant - if variant == Flux2VariantType.Klein9BBase: - # Undistilled base model needs more steps + if variant in (Flux2VariantType.Klein4BBase, Flux2VariantType.Klein9BBase): + # Undistilled base models need more steps return cls(steps=28, cfg_scale=1.0, width=1024, height=1024) else: # Distilled models (Klein 4B, Klein 9B) use fewer steps @@ -389,6 +389,7 @@ def _get_flux2_variant(state_dict: dict[str | int, Any]) -> Flux2VariantType | N # Default to Klein9B - callers use filename heuristics to detect Klein9BBase return Flux2VariantType.Klein9B elif context_in_dim == KLEIN_4B_CONTEXT_DIM: + # Default to Klein4B - callers use filename heuristics to detect Klein4BBase return Flux2VariantType.Klein4B elif context_in_dim > 4096: # Unknown FLUX.2 variant, default to 4B @@ -573,10 +574,12 @@ def _get_variant_or_raise(cls, mod: ModelOnDisk) -> Flux2VariantType: if variant is None: raise NotAMatchError("unable to determine FLUX.2 model variant from state dict") - # Klein 9B Base and Klein 9B have identical architectures. - # Use filename heuristic to detect the Base (undistilled) variant. + # Base (undistilled) and distilled variants share identical architectures. + # Use filename heuristic to detect the Base variant. if variant == Flux2VariantType.Klein9B and _filename_suggests_base(mod.name): return Flux2VariantType.Klein9BBase + if variant == Flux2VariantType.Klein4B and _filename_suggests_base(mod.name): + return Flux2VariantType.Klein4BBase return variant @@ -745,10 +748,12 @@ def _get_variant_or_raise(cls, mod: ModelOnDisk) -> Flux2VariantType: if variant is None: raise NotAMatchError("unable to determine FLUX.2 model variant from state dict") - # Klein 9B Base and Klein 9B have identical architectures. - # Use filename heuristic to detect the Base (undistilled) variant. + # Base (undistilled) and distilled variants share identical architectures. + # Use filename heuristic to detect the Base variant. if variant == Flux2VariantType.Klein9B and _filename_suggests_base(mod.name): return Flux2VariantType.Klein9BBase + if variant == Flux2VariantType.Klein4B and _filename_suggests_base(mod.name): + return Flux2VariantType.Klein4BBase return variant @@ -856,11 +861,10 @@ def _get_variant_or_raise(cls, mod: ModelOnDisk) -> Flux2VariantType: """Determine the FLUX.2 variant from the transformer config. FLUX.2 Klein uses Qwen3 text encoder with larger joint_attention_dim: - - Klein 4B: joint_attention_dim = 7680 (3×Qwen3-4B hidden size) + - Klein 4B/4B Base: joint_attention_dim = 7680 (3×Qwen3-4B hidden size) - Klein 9B/9B Base: joint_attention_dim = 12288 (3×Qwen3-8B hidden size) - Klein 9B (distilled) and Klein 9B Base (undistilled) have identical architectures - and both have guidance_embeds=False. We use a filename heuristic to detect Base models. + Distilled and Base variants share identical architectures. We use a filename heuristic to detect Base models. """ KLEIN_4B_CONTEXT_DIM = 7680 # 3 × 2560 KLEIN_9B_CONTEXT_DIM = 12288 # 3 × 4096 @@ -875,6 +879,8 @@ def _get_variant_or_raise(cls, mod: ModelOnDisk) -> Flux2VariantType: return Flux2VariantType.Klein9BBase return Flux2VariantType.Klein9B elif joint_attention_dim == KLEIN_4B_CONTEXT_DIM: + if _filename_suggests_base(mod.name): + return Flux2VariantType.Klein4BBase return Flux2VariantType.Klein4B elif joint_attention_dim > 4096: # Unknown FLUX.2 variant, default to 4B diff --git a/invokeai/backend/model_manager/configs/text_llm.py b/invokeai/backend/model_manager/configs/text_llm.py new file mode 100644 index 00000000000..a0fb3e009f9 --- /dev/null +++ b/invokeai/backend/model_manager/configs/text_llm.py @@ -0,0 +1,52 @@ +from typing import ( + Literal, + Self, +) + +from pydantic import Field +from typing_extensions import Any + +from invokeai.backend.model_manager.configs.base import Config_Base, Diffusers_Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + common_config_paths, + get_class_name_from_config_dict_or_raise, + raise_for_override_fields, + raise_if_not_dir, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelType, +) + + +class TextLLM_Diffusers_Config(Diffusers_Config_Base, Config_Base): + """Model config for text-only causal language models (e.g. Llama, Phi, Qwen, Mistral).""" + + type: Literal[ModelType.TextLLM] = Field(default=ModelType.TextLLM) + base: Literal[BaseModelType.Any] = Field(default=BaseModelType.Any) + cpu_only: bool | None = Field(default=None, description="Whether this model should run on CPU only") + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + # Check that the model's architecture is a causal language model. + # This covers LlamaForCausalLM, PhiForCausalLM, Phi3ForCausalLM, Qwen2ForCausalLM, + # MistralForCausalLM, GemmaForCausalLM, GPTNeoXForCausalLM, etc. + class_name = get_class_name_from_config_dict_or_raise(common_config_paths(mod.path)) + if not class_name.endswith("ForCausalLM"): + raise NotAMatchError(f"model architecture '{class_name}' is not a causal language model") + + # Verify tokenizer files exist to avoid runtime failures + tokenizer_files = {"tokenizer.json", "tokenizer.model", "tokenizer_config.json"} + if not any((mod.path / f).exists() for f in tokenizer_files): + raise NotAMatchError( + f"no tokenizer files found in '{mod.path}' " + f"(expected at least one of: {', '.join(sorted(tokenizer_files))})" + ) + + return cls(**override_fields) diff --git a/invokeai/backend/model_manager/load/model_loaders/text_llm.py b/invokeai/backend/model_manager/load/model_loaders/text_llm.py new file mode 100644 index 00000000000..0ebfe3cc453 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/text_llm.py @@ -0,0 +1,32 @@ +from pathlib import Path +from typing import Optional + +import torch +from transformers import AutoModelForCausalLM + +from invokeai.backend.model_manager.configs.factory import AnyModelConfig +from invokeai.backend.model_manager.load.load_default import ModelLoader +from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry +from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType + + +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.TextLLM, format=ModelFormat.Diffusers) +class TextLLMModelLoader(ModelLoader): + """Class for loading text causal language models (Llama, Phi, Qwen, Mistral, etc.).""" + + def _load_model( + self, + config: AnyModelConfig, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + if submodel_type is not None: + raise ValueError("Unexpected submodel requested for TextLLM model.") + + # Use float32 for CPU-only models since CPU fp16 is emulated and slow. + dtype = self._torch_dtype + if getattr(config, "cpu_only", False) is True: + dtype = torch.float32 + + model_path = Path(config.path) + model = AutoModelForCausalLM.from_pretrained(model_path, local_files_only=True, torch_dtype=dtype) + return model diff --git a/invokeai/backend/model_manager/taxonomy.py b/invokeai/backend/model_manager/taxonomy.py index b2b55ebd3fc..8f1fb00b5b7 100644 --- a/invokeai/backend/model_manager/taxonomy.py +++ b/invokeai/backend/model_manager/taxonomy.py @@ -82,6 +82,7 @@ class ModelType(str, Enum): SigLIP = "siglip" FluxRedux = "flux_redux" LlavaOnevision = "llava_onevision" + TextLLM = "text_llm" ExternalImageGenerator = "external_image_generator" Unknown = "unknown" @@ -131,7 +132,10 @@ class Flux2VariantType(str, Enum): """FLUX.2 model variants.""" Klein4B = "klein_4b" - """Flux2 Klein 4B variant using Qwen3 4B text encoder.""" + """Flux2 Klein 4B variant using Qwen3 4B text encoder (distilled).""" + + Klein4BBase = "klein_4b_base" + """Flux2 Klein 4B Base variant - undistilled foundation model using Qwen3 4B text encoder.""" Klein9B = "klein_9b" """Flux2 Klein 9B variant using Qwen3 8B text encoder (distilled).""" diff --git a/invokeai/backend/text_llm_pipeline.py b/invokeai/backend/text_llm_pipeline.py new file mode 100644 index 00000000000..69815c1a7f7 --- /dev/null +++ b/invokeai/backend/text_llm_pipeline.py @@ -0,0 +1,56 @@ +import torch +from transformers import PreTrainedModel, PreTrainedTokenizerBase + +DEFAULT_SYSTEM_PROMPT = ( + "You are an expert prompt writer for AI image generation. " + "Given a brief description, expand it into a detailed, vivid prompt suitable for generating high-quality images. " + "Only output the expanded prompt, nothing else." +) + + +class TextLLMPipeline: + """A wrapper for a causal language model + tokenizer for text generation.""" + + def __init__(self, model: PreTrainedModel, tokenizer: PreTrainedTokenizerBase): + self._model = model + self._tokenizer = tokenizer + + def run( + self, + prompt: str, + system_prompt: str = DEFAULT_SYSTEM_PROMPT, + max_new_tokens: int = 300, + device: torch.device = torch.device("cpu"), + dtype: torch.dtype = torch.float16, + ) -> str: + # Build messages for chat template if supported, otherwise use raw prompt. + if hasattr(self._tokenizer, "apply_chat_template") and self._tokenizer.chat_template is not None: + messages = [] + if system_prompt: + messages.append({"role": "system", "content": system_prompt}) + messages.append({"role": "user", "content": prompt}) + formatted_prompt: str = self._tokenizer.apply_chat_template( + messages, tokenize=False, add_generation_prompt=True + ) + else: + # Fallback for models without chat template + if system_prompt: + formatted_prompt = f"{system_prompt}\n\nUser: {prompt}\nAssistant:" + else: + formatted_prompt = prompt + + inputs = self._tokenizer(formatted_prompt, return_tensors="pt").to(device=device) + output = self._model.generate( + **inputs, + max_new_tokens=max_new_tokens, + do_sample=True, + temperature=0.7, + top_p=0.9, + ) + + # Decode only the newly generated tokens (exclude the input prompt tokens). + input_length = inputs["input_ids"].shape[1] + generated_tokens = output[0][input_length:] + response = self._tokenizer.decode(generated_tokens, skip_special_tokens=True).strip() + + return response diff --git a/invokeai/frontend/web/openapi.json b/invokeai/frontend/web/openapi.json index 19e5a3a68e9..0b2aa90e272 100644 --- a/invokeai/frontend/web/openapi.json +++ b/invokeai/frontend/web/openapi.json @@ -6,6 +6,444 @@ "version": "1.0.0" }, "paths": { + "/api/v1/auth/status": { + "get": { + "tags": ["authentication"], + "summary": "Get Setup Status", + "description": "Check if initial administrator setup is required.\n\nReturns:\n SetupStatusResponse indicating whether setup is needed and multiuser mode status", + "operationId": "get_setup_status_api_v1_auth_status_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SetupStatusResponse" + } + } + } + } + } + } + }, + "/api/v1/auth/login": { + "post": { + "tags": ["authentication"], + "summary": "Login", + "description": "Authenticate user and return access token.\n\nArgs:\n request: Login credentials (email and password)\n\nReturns:\n LoginResponse containing JWT token and user information\n\nRaises:\n HTTPException: 401 if credentials are invalid or user is inactive\n HTTPException: 403 if multiuser mode is disabled", + "operationId": "login_api_v1_auth_login_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoginRequest", + "description": "Login credentials" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoginResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/v1/auth/logout": { + "post": { + "tags": ["authentication"], + "summary": "Logout", + "description": "Logout current user.\n\nCurrently a no-op since we use stateless JWT tokens. For token invalidation in\nfuture implementations, consider:\n- Token blacklist: Store invalidated tokens in Redis/database with expiration\n- Token versioning: Add version field to user record, increment on logout\n- Short-lived tokens: Use refresh token pattern with token rotation\n- Session storage: Track active sessions server-side for revocation\n\nArgs:\n current_user: The authenticated user (validates token)\n\nReturns:\n LogoutResponse indicating success", + "operationId": "logout_api_v1_auth_logout_post", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LogoutResponse" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ] + } + }, + "/api/v1/auth/me": { + "get": { + "tags": ["authentication"], + "summary": "Get Current User Info", + "description": "Get current authenticated user's information.\n\nArgs:\n current_user: The authenticated user's token data\n\nReturns:\n UserDTO containing user information\n\nRaises:\n HTTPException: 404 if user is not found (should not happen normally)", + "operationId": "get_current_user_info_api_v1_auth_me_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserDTO" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ] + }, + "patch": { + "tags": ["authentication"], + "summary": "Update Current User", + "description": "Update the current user's own profile.\n\nTo change the password, both ``current_password`` and ``new_password`` must\nbe provided. The current password is verified before the change is applied.\n\nArgs:\n request: Profile fields to update\n current_user: The authenticated user\n\nReturns:\n The updated user\n\nRaises:\n HTTPException: 400 if current password is incorrect or new password is weak\n HTTPException: 404 if user not found", + "operationId": "update_current_user_api_v1_auth_me_patch", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserProfileUpdateRequest", + "description": "Profile fields to update" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserDTO" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ] + } + }, + "/api/v1/auth/setup": { + "post": { + "tags": ["authentication"], + "summary": "Setup Admin", + "description": "Set up initial administrator account.\n\nThis endpoint can only be called once, when no admin user exists. It creates\nthe first admin user for the system.\n\nArgs:\n request: Admin account details (email, display_name, password)\n\nReturns:\n SetupResponse containing the created admin user\n\nRaises:\n HTTPException: 400 if admin already exists or password is weak\n HTTPException: 403 if multiuser mode is disabled", + "operationId": "setup_admin_api_v1_auth_setup_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SetupRequest", + "description": "Admin account details" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SetupResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/v1/auth/generate-password": { + "get": { + "tags": ["authentication"], + "summary": "Generate Password", + "description": "Generate a strong random password.\n\nReturns a cryptographically secure random password of 16 characters\ncontaining uppercase, lowercase, digits, and punctuation.", + "operationId": "generate_password_api_v1_auth_generate_password_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GeneratePasswordResponse" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ] + } + }, + "/api/v1/auth/users": { + "get": { + "tags": ["authentication"], + "summary": "List Users", + "description": "List all users. Requires admin privileges.\n\nThe internal 'system' user (created for backward compatibility) is excluded\nfrom the results since it cannot be managed through this interface.\n\nReturns:\n List of all real users (system user excluded)", + "operationId": "list_users_api_v1_auth_users_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "items": { + "$ref": "#/components/schemas/UserDTO" + }, + "type": "array", + "title": "Response List Users Api V1 Auth Users Get" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ] + }, + "post": { + "tags": ["authentication"], + "summary": "Create User", + "description": "Create a new user. Requires admin privileges.\n\nArgs:\n request: New user details\n\nReturns:\n The created user\n\nRaises:\n HTTPException: 400 if email already exists or password is weak", + "operationId": "create_user_api_v1_auth_users_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AdminUserCreateRequest", + "description": "New user details" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserDTO" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ] + } + }, + "/api/v1/auth/users/{user_id}": { + "get": { + "tags": ["authentication"], + "summary": "Get User", + "description": "Get a user by ID. Requires admin privileges.\n\nArgs:\n user_id: The user ID\n\nReturns:\n The user\n\nRaises:\n HTTPException: 404 if user not found", + "operationId": "get_user_api_v1_auth_users__user_id__get", + "security": [ + { + "HTTPBearer": [] + } + ], + "parameters": [ + { + "name": "user_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "User ID", + "title": "User Id" + }, + "description": "User ID" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserDTO" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "patch": { + "tags": ["authentication"], + "summary": "Update User", + "description": "Update a user. Requires admin privileges.\n\nArgs:\n user_id: The user ID\n request: Fields to update\n\nReturns:\n The updated user\n\nRaises:\n HTTPException: 400 if password is weak\n HTTPException: 404 if user not found", + "operationId": "update_user_api_v1_auth_users__user_id__patch", + "security": [ + { + "HTTPBearer": [] + } + ], + "parameters": [ + { + "name": "user_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "User ID", + "title": "User Id" + }, + "description": "User ID" + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AdminUserUpdateRequest", + "description": "User fields to update" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserDTO" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": ["authentication"], + "summary": "Delete User", + "description": "Delete a user. Requires admin privileges.\n\nAdmins can delete any user including other admins, but cannot delete the last\nremaining admin.\n\nArgs:\n user_id: The user ID\n\nRaises:\n HTTPException: 400 if attempting to delete the last admin\n HTTPException: 404 if user not found", + "operationId": "delete_user_api_v1_auth_users__user_id__delete", + "security": [ + { + "HTTPBearer": [] + } + ], + "parameters": [ + { + "name": "user_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "User ID", + "title": "User Id" + }, + "description": "User ID" + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, "/api/v1/utilities/dynamicprompts": { "post": { "tags": ["utilities"], @@ -243,6 +681,9 @@ { "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" }, + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" }, @@ -267,6 +708,9 @@ { "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" }, @@ -276,6 +720,9 @@ { "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" }, @@ -294,6 +741,9 @@ { "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" }, @@ -339,12 +789,21 @@ { "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" }, { "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, { "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" }, @@ -360,6 +819,9 @@ { "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" }, @@ -472,28 +934,28 @@ } } }, - "/api/v2/models/i/{key}": { + "/api/v2/models/get_by_hash": { "get": { "tags": ["model_manager"], - "summary": "Get Model Record", - "description": "Get a model record", - "operationId": "get_model_record", + "summary": "Get Model Records By Hash", + "description": "Gets a model by its hash. This is useful for recalling models that were deleted and reinstalled,\nas the hash remains stable across reinstallations while the key (UUID) changes.", + "operationId": "get_model_records_by_hash", "parameters": [ { - "name": "key", - "in": "path", + "name": "hash", + "in": "query", "required": true, "schema": { "type": "string", - "description": "Key of the model record to fetch.", - "title": "Key" + "description": "The hash of the model", + "title": "Hash" }, - "description": "Key of the model record to fetch." + "description": "The hash of the model" } ], "responses": { "200": { - "description": "The model configuration was retrieved successfully", + "description": "Successful Response", "content": { "application/json": { "schema": { @@ -522,6 +984,9 @@ { "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" }, + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" }, @@ -546,6 +1011,9 @@ { "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" }, @@ -555,6 +1023,9 @@ { "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" }, @@ -573,6 +1044,9 @@ { "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" }, @@ -618,12 +1092,21 @@ { "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" }, { "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, { "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" }, @@ -639,6 +1122,9 @@ { "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" }, @@ -733,35 +1219,11 @@ "$ref": "#/components/schemas/Unknown_Config" } ], - "title": "Response Get Model Record" - }, - "example": { - "path": "string", - "name": "string", - "base": "sd-1", - "type": "main", - "format": "checkpoint", - "config_path": "string", - "key": "string", - "hash": "string", - "file_size": 1, - "description": "string", - "source": "string", - "converted_at": 0, - "variant": "normal", - "prediction_type": "epsilon", - "repo_variant": "fp16", - "upcast_attention": false + "title": "Response Get Model Records By Hash" } } } }, - "400": { - "description": "Bad request" - }, - "404": { - "description": "The model could not be found" - }, "422": { "description": "Validation Error", "content": { @@ -773,12 +1235,14 @@ } } } - }, - "patch": { + } + }, + "/api/v2/models/i/{key}": { + "get": { "tags": ["model_manager"], - "summary": "Update Model Record", - "description": "Update a model's config.", - "operationId": "update_model_record", + "summary": "Get Model Record", + "description": "Get a model record", + "operationId": "get_model_record", "parameters": [ { "name": "key", @@ -786,38 +1250,15 @@ "required": true, "schema": { "type": "string", - "description": "Unique key of model", + "description": "Key of the model record to fetch.", "title": "Key" }, - "description": "Unique key of model" + "description": "Key of the model record to fetch." } ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ModelRecordChanges", - "description": "Model config", - "examples": [ - { - "path": "/path/to/model", - "name": "model_name", - "base": "sd-1", - "type": "main", - "format": "checkpoint", - "config_path": "configs/stable-diffusion/v1-inference.yaml", - "description": "Model description", - "variant": "normal" - } - ] - } - } - } - }, "responses": { "200": { - "description": "The model was updated successfully", + "description": "The model configuration was retrieved successfully", "content": { "application/json": { "schema": { @@ -846,6 +1287,9 @@ { "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" }, + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" }, @@ -870,6 +1314,9 @@ { "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" }, @@ -879,6 +1326,9 @@ { "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" }, @@ -897,6 +1347,9 @@ { "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" }, @@ -942,12 +1395,21 @@ { "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" }, { "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, { "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" }, @@ -963,6 +1425,362 @@ { "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_ZImage_Config" + }, + { + "$ref": "#/components/schemas/ControlLoRA_LyCORIS_FLUX_Config" + }, + { + "$ref": "#/components/schemas/T5Encoder_T5Encoder_Config" + }, + { + "$ref": "#/components/schemas/T5Encoder_BnBLLMint8_Config" + }, + { + "$ref": "#/components/schemas/Qwen3Encoder_Qwen3Encoder_Config" + }, + { + "$ref": "#/components/schemas/Qwen3Encoder_Checkpoint_Config" + }, + { + "$ref": "#/components/schemas/Qwen3Encoder_GGUF_Config" + }, + { + "$ref": "#/components/schemas/TI_File_SD1_Config" + }, + { + "$ref": "#/components/schemas/TI_File_SD2_Config" + }, + { + "$ref": "#/components/schemas/TI_File_SDXL_Config" + }, + { + "$ref": "#/components/schemas/TI_Folder_SD1_Config" + }, + { + "$ref": "#/components/schemas/TI_Folder_SD2_Config" + }, + { + "$ref": "#/components/schemas/TI_Folder_SDXL_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_InvokeAI_SD1_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_InvokeAI_SD2_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_InvokeAI_SDXL_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/T2IAdapter_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/T2IAdapter_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/Spandrel_Checkpoint_Config" + }, + { + "$ref": "#/components/schemas/CLIPEmbed_Diffusers_G_Config" + }, + { + "$ref": "#/components/schemas/CLIPEmbed_Diffusers_L_Config" + }, + { + "$ref": "#/components/schemas/CLIPVision_Diffusers_Config" + }, + { + "$ref": "#/components/schemas/SigLIP_Diffusers_Config" + }, + { + "$ref": "#/components/schemas/FLUXRedux_Checkpoint_Config" + }, + { + "$ref": "#/components/schemas/LlavaOnevision_Diffusers_Config" + }, + { + "$ref": "#/components/schemas/Unknown_Config" + } + ], + "title": "Response Get Model Record" + }, + "example": { + "path": "string", + "name": "string", + "base": "sd-1", + "type": "main", + "format": "checkpoint", + "config_path": "string", + "key": "string", + "hash": "string", + "file_size": 1, + "description": "string", + "source": "string", + "converted_at": 0, + "variant": "normal", + "prediction_type": "epsilon", + "repo_variant": "fp16", + "upcast_attention": false + } + } + } + }, + "400": { + "description": "Bad request" + }, + "404": { + "description": "The model could not be found" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "patch": { + "tags": ["model_manager"], + "summary": "Update Model Record", + "description": "Update a model's config.", + "operationId": "update_model_record", + "security": [ + { + "HTTPBearer": [] + } + ], + "parameters": [ + { + "name": "key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Unique key of model", + "title": "Key" + }, + "description": "Unique key of model" + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModelRecordChanges", + "description": "Model config", + "examples": [ + { + "path": "/path/to/model", + "name": "model_name", + "base": "sd-1", + "type": "main", + "format": "checkpoint", + "config_path": "configs/stable-diffusion/v1-inference.yaml", + "description": "Model description", + "variant": "normal" + } + ] + } + } + } + }, + "responses": { + "200": { + "description": "The model was updated successfully", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/Main_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SD2_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SDXLRefiner_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SD3_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_Flux2_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SDXLRefiner_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Flux2_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, + { + "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_Flux2_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_Flux2_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_ZImage_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_SD2_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_SD1_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_SD2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, + { + "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" + }, + { + "$ref": "#/components/schemas/LoRA_OMI_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_SD2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" }, @@ -1106,6 +1924,11 @@ "summary": "Delete Model", "description": "Delete model record from database.\n\nThe configuration record will be removed. The corresponding weights files will be\ndeleted as well if they reside within the InvokeAI \"models\" directory.", "operationId": "delete_model", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "key", @@ -1145,6 +1968,11 @@ "summary": "Reidentify Model", "description": "Attempt to reidentify a model by re-probing its weights file.", "operationId": "reidentify_model", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "key", @@ -1189,6 +2017,9 @@ { "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" }, + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" }, @@ -1213,6 +2044,9 @@ { "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" }, @@ -1222,6 +2056,9 @@ { "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" }, @@ -1240,6 +2077,9 @@ { "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" }, @@ -1285,12 +2125,21 @@ { "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" }, { "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, { "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" }, @@ -1306,6 +2155,9 @@ { "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" }, @@ -1586,6 +2438,11 @@ "tags": ["model_manager"], "summary": "Update Model Image", "operationId": "update_model_image", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "key", @@ -1637,6 +2494,11 @@ "tags": ["model_manager"], "summary": "Delete Model Image", "operationId": "delete_model_image", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "key", @@ -1708,7 +2570,58 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] + } + }, + "/api/v2/models/i/bulk_reidentify": { + "post": { + "tags": ["model_manager"], + "summary": "Bulk Reidentify Models", + "description": "Reidentify multiple models by re-probing their weights files.\n\nReturns a list of successfully reidentified keys and failed reidentifications with error messages.", + "operationId": "bulk_reidentify_models", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BulkReidentifyModelsRequest", + "description": "List of model keys to reidentify" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Models reidentified (possibly with some failures)", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BulkReidentifyModelsResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v2/models/install": { @@ -1717,6 +2630,11 @@ "summary": "Install Model", "description": "Install a model using a string identifier.\n\n`source` can be any of the following.\n\n1. A path on the local filesystem ('C:\\users\\fred\\model.safetensors')\n2. A Url pointing to a single downloadable model file\n3. A HuggingFace repo_id with any of the following formats:\n - model/name\n - model/name:fp16:vae\n - model/name::vae -- use default precision\n - model/name:fp16:path/to/model.safetensors\n - model/name::path/to/model.safetensors\n\n`config` is a ModelRecordChanges object. Fields in this object will override\nthe ones that are probed automatically. Pass an empty object to accept\nall the defaults.\n\n`access_token` is an optional access token for use with Urls that require\nauthentication.\n\nModels will be downloaded, probed, configured and installed in a\nseries of background threads. The return object has `status` attribute\nthat can be used to monitor progress.\n\nSee the documentation for `import_model_record` for more information on\ninterpreting the job information returned by this route.", "operationId": "install_model", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "source", @@ -1821,6 +2739,11 @@ "summary": "List Model Installs", "description": "Return the list of model install jobs.\n\nInstall jobs have a numeric `id`, a `status`, and other fields that provide information on\nthe nature of the job and its progress. The `status` is one of:\n\n* \"waiting\" -- Job is waiting in the queue to run\n* \"downloading\" -- Model file(s) are downloading\n* \"running\" -- Model has downloaded and the model probing and registration process is running\n* \"paused\" -- Job is paused and can be resumed\n* \"completed\" -- Installation completed successfully\n* \"error\" -- An error occurred. Details will be in the \"error_type\" and \"error\" fields.\n* \"cancelled\" -- Job was cancelled before completion.\n\nOnce completed, information about the model such as its size, base\nmodel and type can be retrieved from the `config_out` field. For multi-file models such as diffusers,\ninformation on individual files can be retrieved from `download_parts`.\n\nSee the example and schema below for more information.", "operationId": "list_model_installs", + "security": [ + { + "HTTPBearer": [] + } + ], "responses": { "200": { "description": "Successful Response", @@ -1843,6 +2766,11 @@ "summary": "Prune Model Install Jobs", "description": "Prune all completed and errored jobs from the install job list.", "operationId": "prune_model_install_jobs", + "security": [ + { + "HTTPBearer": [] + } + ], "responses": { "200": { "description": "Successful Response", @@ -1867,6 +2795,11 @@ "summary": "Install Hugging Face Model", "description": "Install a Hugging Face model using a string identifier.", "operationId": "install_hugging_face_model", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "source", @@ -1916,6 +2849,11 @@ "summary": "Get Model Install Job", "description": "Return model install job corresponding to the given source. See the documentation for 'List Model Install Jobs'\nfor information on the format of the return value.", "operationId": "get_model_install_job", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "id", @@ -1960,6 +2898,11 @@ "summary": "Cancel Model Install Job", "description": "Cancel the model install job(s) corresponding to the given job ID.", "operationId": "cancel_model_install_job", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "id", @@ -2004,6 +2947,11 @@ "summary": "Pause Model Install Job", "description": "Pause the model install job corresponding to the given job ID.", "operationId": "pause_model_install_job", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "id", @@ -2050,6 +2998,11 @@ "summary": "Resume Model Install Job", "description": "Resume a paused model install job corresponding to the given job ID.", "operationId": "resume_model_install_job", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "id", @@ -2096,6 +3049,11 @@ "summary": "Restart Failed Model Install Job", "description": "Restart failed or non-resumable file downloads for the given job.", "operationId": "restart_failed_model_install_job", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "id", @@ -2142,6 +3100,11 @@ "summary": "Restart Model Install File", "description": "Restart a specific file download for the given job.", "operationId": "restart_model_install_file", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "id", @@ -2202,6 +3165,11 @@ "summary": "Convert Model", "description": "Permanently convert a model into diffusers format, replacing the safetensors version.\nNote that during the conversion process the key and model hash will change.\nThe return value is the model configuration for the converted model.", "operationId": "convert_model", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "key", @@ -2246,6 +3214,9 @@ { "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" }, + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" }, @@ -2270,6 +3241,9 @@ { "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" }, @@ -2279,6 +3253,9 @@ { "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" }, @@ -2297,6 +3274,9 @@ { "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" }, @@ -2342,12 +3322,21 @@ { "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" }, { "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, { "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" }, @@ -2363,6 +3352,9 @@ { "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" }, @@ -2564,7 +3556,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v2/models/hf_login": { @@ -2620,7 +3617,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] }, "delete": { "tags": ["model_manager"], @@ -2637,7 +3639,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v2/models/sync/orphaned": { @@ -2661,7 +3668,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] }, "delete": { "tags": ["model_manager"], @@ -2699,7 +3711,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v1/download_queue/": { @@ -2904,8 +3921,13 @@ "post": { "tags": ["images"], "summary": "Upload Image", - "description": "Uploads an image", + "description": "Uploads an image for the current user", "operationId": "upload_image", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "image_category", @@ -3063,8 +4085,13 @@ "get": { "tags": ["images"], "summary": "List Image Dtos", - "description": "Gets a list of image DTOs", + "description": "Gets a list of image DTOs for the current user", "operationId": "list_image_dtos", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "image_origin", @@ -3237,6 +4264,11 @@ "summary": "Delete Image", "description": "Deletes an image", "operationId": "delete_image", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "image_name", @@ -3278,6 +4310,11 @@ "summary": "Update Image", "description": "Updates an image", "operationId": "update_image", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "image_name", @@ -3330,6 +4367,11 @@ "summary": "Get Image Dto", "description": "Gets an image's DTO", "operationId": "get_image_dto", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "image_name", @@ -3371,7 +4413,7 @@ "get": { "tags": ["images"], "summary": "Get Intermediates Count", - "description": "Gets the count of intermediate images", + "description": "Gets the count of intermediate images. Non-admin users only see their own intermediates.", "operationId": "get_intermediates_count", "responses": { "200": { @@ -3385,12 +4427,17 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] }, "delete": { "tags": ["images"], "summary": "Clear Intermediates", - "description": "Clears all intermediates", + "description": "Clears all intermediates. Requires admin.", "operationId": "clear_intermediates", "responses": { "200": { @@ -3404,7 +4451,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v1/images/i/{image_name}/metadata": { @@ -3413,6 +4465,11 @@ "summary": "Get Image Metadata", "description": "Gets an image's metadata", "operationId": "get_image_metadata", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "image_name", @@ -3463,6 +4520,11 @@ "tags": ["images"], "summary": "Get Image Workflow", "operationId": "get_image_workflow", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "image_name", @@ -3504,7 +4566,7 @@ "head": { "tags": ["images"], "summary": "Get Image Full", - "description": "Gets a full-resolution image file", + "description": "Gets a full-resolution image file.\n\nThis endpoint is intentionally unauthenticated because browsers load images\nvia tags which cannot send Bearer tokens. Image names are UUIDs,\nproviding security through unguessability.", "operationId": "get_image_full_head", "parameters": [ { @@ -3544,7 +4606,7 @@ "get": { "tags": ["images"], "summary": "Get Image Full", - "description": "Gets a full-resolution image file", + "description": "Gets a full-resolution image file.\n\nThis endpoint is intentionally unauthenticated because browsers load images\nvia tags which cannot send Bearer tokens. Image names are UUIDs,\nproviding security through unguessability.", "operationId": "get_image_full", "parameters": [ { @@ -3586,7 +4648,7 @@ "get": { "tags": ["images"], "summary": "Get Image Thumbnail", - "description": "Gets a thumbnail image file", + "description": "Gets a thumbnail image file.\n\nThis endpoint is intentionally unauthenticated because browsers load images\nvia tags which cannot send Bearer tokens. Image names are UUIDs,\nproviding security through unguessability.", "operationId": "get_image_thumbnail", "parameters": [ { @@ -3630,6 +4692,11 @@ "summary": "Get Image Urls", "description": "Gets an image and thumbnail URL", "operationId": "get_image_urls", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "image_name", @@ -3703,14 +4770,19 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v1/images/uncategorized": { "delete": { "tags": ["images"], "summary": "Delete Uncategorized Images", - "description": "Deletes all images that are uncategorized", + "description": "Deletes all uncategorized images owned by the current user (or all if admin)", "operationId": "delete_uncategorized_images", "responses": { "200": { @@ -3723,7 +4795,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v1/images/star": { @@ -3762,7 +4839,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v1/images/unstar": { @@ -3801,7 +4883,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v1/images/download": { @@ -3839,15 +4926,25 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v1/images/download/{bulk_download_item_name}": { "get": { "tags": ["images"], "summary": "Get Bulk Download Item", - "description": "Gets a bulk download zip file", + "description": "Gets a bulk download zip file.\n\nRequires authentication. The caller must be the user who initiated the\ndownload (tracked by the bulk download service) or an admin.", "operationId": "get_bulk_download_item", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "bulk_download_item_name", @@ -3890,6 +4987,11 @@ "summary": "Get Image Names", "description": "Gets ordered list of image names with metadata for optimistic updates", "operationId": "get_image_names", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "image_origin", @@ -4073,15 +5175,25 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v1/boards/": { "post": { "tags": ["boards"], "summary": "Create Board", - "description": "Creates a board", + "description": "Creates a board for the current user", "operationId": "create_board", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "board_name", @@ -4122,8 +5234,13 @@ "get": { "tags": ["boards"], "summary": "List Boards", - "description": "Gets a list of boards", + "description": "Gets a list of boards for the current user, including shared boards. Admin users see all boards.", "operationId": "list_boards", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "order_by", @@ -4253,8 +5370,13 @@ "get": { "tags": ["boards"], "summary": "Get Board", - "description": "Gets a board", + "description": "Gets a board (user must have access to it)", "operationId": "get_board", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "board_id", @@ -4294,8 +5416,13 @@ "patch": { "tags": ["boards"], "summary": "Update Board", - "description": "Updates a board", + "description": "Updates a board (user must have access to it)", "operationId": "update_board", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "board_id", @@ -4346,8 +5473,13 @@ "delete": { "tags": ["boards"], "summary": "Delete Board", - "description": "Deletes a board", + "description": "Deletes a board (user must have access to it)", "operationId": "delete_board", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "board_id", @@ -4410,6 +5542,11 @@ "summary": "List All Board Image Names", "description": "Gets a list of images for a board", "operationId": "list_all_board_image_names", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "board_id", @@ -4527,7 +5664,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] }, "delete": { "tags": ["boards"], @@ -4565,7 +5707,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v1/board_images/batch": { @@ -4605,7 +5752,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v1/board_images/batch/delete": { @@ -4645,7 +5797,12 @@ } } } - } + }, + "security": [ + { + "HTTPBearer": [] + } + ] } }, "/api/v1/model_relationships/i/{model_key}": { @@ -5034,8 +6191,13 @@ "post": { "tags": ["queue"], "summary": "Enqueue Batch", - "description": "Processes a batch and enqueues the output graphs for execution.", + "description": "Processes a batch and enqueues the output graphs for execution for the current user.", "operationId": "enqueue_batch", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5099,6 +6261,11 @@ "summary": "List All Queue Items", "description": "Gets all queue items", "operationId": "list_all_queue_items", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5162,8 +6329,13 @@ "get": { "tags": ["queue"], "summary": "Get Queue Item Ids", - "description": "Gets all queue item ids that match the given parameters", + "description": "Gets all queue item ids that match the given parameters. Non-admin users only see their own items.", "operationId": "get_queue_item_ids", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5218,6 +6390,11 @@ "summary": "Get Queue Items By Item Ids", "description": "Gets queue items for the specified queue item ids. Maintains order of item ids.", "operationId": "get_queue_items_by_item_ids", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5273,8 +6450,13 @@ "put": { "tags": ["queue"], "summary": "Resume", - "description": "Resumes session processor", + "description": "Resumes session processor. Admin only.", "operationId": "resume", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5316,8 +6498,13 @@ "put": { "tags": ["queue"], "summary": "Pause", - "description": "Pauses session processor", + "description": "Pauses session processor. Admin only.", "operationId": "pause", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5359,8 +6546,13 @@ "put": { "tags": ["queue"], "summary": "Cancel All Except Current", - "description": "Immediately cancels all queue items except in-processing items", + "description": "Immediately cancels all queue items except in-processing items. Non-admin users can only cancel their own items.", "operationId": "cancel_all_except_current", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5402,8 +6594,13 @@ "put": { "tags": ["queue"], "summary": "Delete All Except Current", - "description": "Immediately deletes all queue items except in-processing items", + "description": "Immediately deletes all queue items except in-processing items. Non-admin users can only delete their own items.", "operationId": "delete_all_except_current", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5445,8 +6642,13 @@ "put": { "tags": ["queue"], "summary": "Cancel By Batch Ids", - "description": "Immediately cancels all queue items from the given batch ids", + "description": "Immediately cancels all queue items from the given batch ids. Non-admin users can only cancel their own items.", "operationId": "cancel_by_batch_ids", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5498,8 +6700,13 @@ "put": { "tags": ["queue"], "summary": "Cancel By Destination", - "description": "Immediately cancels all queue items with the given origin", + "description": "Immediately cancels all queue items with the given destination. Non-admin users can only cancel their own items.", "operationId": "cancel_by_destination", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5552,8 +6759,13 @@ "put": { "tags": ["queue"], "summary": "Retry Items By Id", - "description": "Immediately cancels all queue items with the given origin", + "description": "Retries the given queue items. Users can only retry their own items unless they are an admin.", "operationId": "retry_items_by_id", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5610,8 +6822,13 @@ "put": { "tags": ["queue"], "summary": "Clear", - "description": "Clears the queue entirely, immediately canceling the currently-executing session", + "description": "Clears the queue entirely. Admin users clear all items; non-admin users only clear their own items. If there's a currently-executing item, users can only cancel it if they own it or are an admin.", "operationId": "clear", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5653,8 +6870,13 @@ "put": { "tags": ["queue"], "summary": "Prune", - "description": "Prunes all completed or errored queue items", + "description": "Prunes all completed or errored queue items. Non-admin users can only prune their own items.", "operationId": "prune", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5698,6 +6920,11 @@ "summary": "Get Current Queue Item", "description": "Gets the currently execution queue item", "operationId": "get_current_queue_item", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5755,6 +6982,11 @@ "summary": "Get Next Queue Item", "description": "Gets the next queue item, without executing it", "operationId": "get_next_queue_item", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5810,8 +7042,13 @@ "get": { "tags": ["queue"], "summary": "Get Queue Status", - "description": "Gets the status of the session queue", + "description": "Gets the status of the session queue. Non-admin users see only their own counts and cannot see current item details unless they own it.", "operationId": "get_queue_status", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5853,8 +7090,13 @@ "get": { "tags": ["queue"], "summary": "Get Batch Status", - "description": "Gets the status of the session queue", + "description": "Gets the status of a batch. Non-admin users only see their own batches.", "operationId": "get_batch_status", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5909,6 +7151,11 @@ "summary": "Get Queue Item", "description": "Gets a queue item", "operationId": "get_queue_item", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -5959,8 +7206,13 @@ "delete": { "tags": ["queue"], "summary": "Delete Queue Item", - "description": "Deletes a queue item", + "description": "Deletes a queue item. Users can only delete their own items unless they are an admin.", "operationId": "delete_queue_item", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -6011,8 +7263,13 @@ "put": { "tags": ["queue"], "summary": "Cancel Queue Item", - "description": "Deletes a queue item", + "description": "Cancels a queue item. Users can only cancel their own items unless they are an admin.", "operationId": "cancel_queue_item", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -6065,8 +7322,13 @@ "get": { "tags": ["queue"], "summary": "Counts By Destination", - "description": "Gets the counts of queue items by destination", + "description": "Gets the counts of queue items by destination. Non-admin users only see their own items.", "operationId": "counts_by_destination", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -6119,8 +7381,13 @@ "delete": { "tags": ["queue"], "summary": "Delete By Destination", - "description": "Deletes all items with the given destination", + "description": "Deletes all items with the given destination. Non-admin users can only delete their own items.", "operationId": "delete_by_destination", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -6175,6 +7442,11 @@ "summary": "Get Workflow", "description": "Gets a workflow", "operationId": "get_workflow", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "workflow_id", @@ -6216,6 +7488,11 @@ "summary": "Update Workflow", "description": "Updates a workflow", "operationId": "update_workflow", + "security": [ + { + "HTTPBearer": [] + } + ], "requestBody": { "required": true, "content": { @@ -6254,6 +7531,11 @@ "summary": "Delete Workflow", "description": "Deletes a workflow", "operationId": "delete_workflow", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "workflow_id", @@ -6295,242 +7577,258 @@ "summary": "Create Workflow", "description": "Creates a workflow", "operationId": "create_workflow", - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Body_create_workflow" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/WorkflowRecordDTO" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "get": { - "tags": ["workflows"], - "summary": "List Workflows", - "description": "Gets a page of workflows", - "operationId": "list_workflows", - "parameters": [ - { - "name": "page", - "in": "query", - "required": false, - "schema": { - "type": "integer", - "description": "The page to get", - "default": 0, - "title": "Page" - }, - "description": "The page to get" - }, - { - "name": "per_page", - "in": "query", - "required": false, - "schema": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "description": "The number of workflows per page", - "title": "Per Page" - }, - "description": "The number of workflows per page" - }, - { - "name": "order_by", - "in": "query", - "required": false, - "schema": { - "$ref": "#/components/schemas/WorkflowRecordOrderBy", - "description": "The attribute to order by", - "default": "name" - }, - "description": "The attribute to order by" - }, - { - "name": "direction", - "in": "query", - "required": false, - "schema": { - "$ref": "#/components/schemas/SQLiteDirection", - "description": "The direction to order by", - "default": "ASC" - }, - "description": "The direction to order by" - }, - { - "name": "categories", - "in": "query", - "required": false, - "schema": { - "anyOf": [ - { - "type": "array", - "items": { - "$ref": "#/components/schemas/WorkflowCategory" - } - }, - { - "type": "null" - } - ], - "description": "The categories of workflow to get", - "title": "Categories" - }, - "description": "The categories of workflow to get" - }, - { - "name": "tags", - "in": "query", - "required": false, - "schema": { - "anyOf": [ - { - "type": "array", - "items": { - "type": "string" - } - }, - { - "type": "null" - } - ], - "description": "The tags of workflow to get", - "title": "Tags" - }, - "description": "The tags of workflow to get" - }, - { - "name": "query", - "in": "query", - "required": false, - "schema": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "description": "The text to query by (matches name and description)", - "title": "Query" - }, - "description": "The text to query by (matches name and description)" - }, + "security": [ { - "name": "has_been_opened", - "in": "query", - "required": false, - "schema": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "description": "Whether to include/exclude recent workflows", - "title": "Has Been Opened" - }, - "description": "Whether to include/exclude recent workflows" - }, - { - "name": "is_public", - "in": "query", - "required": false, - "schema": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "title": "Is Public" - }, - "description": "Filter by public/shared status" - } - ], - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PaginatedResults_WorkflowRecordListItemWithThumbnailDTO_" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/api/v1/workflows/i/{workflow_id}/thumbnail": { - "put": { - "tags": ["workflows"], - "summary": "Set Workflow Thumbnail", - "description": "Sets a workflow's thumbnail image", - "operationId": "set_workflow_thumbnail", - "parameters": [ - { - "name": "workflow_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "description": "The workflow to update", - "title": "Workflow Id" - }, - "description": "The workflow to update" + "HTTPBearer": [] } ], "requestBody": { "required": true, "content": { - "multipart/form-data": { + "application/json": { "schema": { - "$ref": "#/components/schemas/Body_set_workflow_thumbnail" + "$ref": "#/components/schemas/Body_create_workflow" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkflowRecordDTO" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": ["workflows"], + "summary": "List Workflows", + "description": "Gets a page of workflows", + "operationId": "list_workflows", + "security": [ + { + "HTTPBearer": [] + } + ], + "parameters": [ + { + "name": "page", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "description": "The page to get", + "default": 0, + "title": "Page" + }, + "description": "The page to get" + }, + { + "name": "per_page", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of workflows per page", + "title": "Per Page" + }, + "description": "The number of workflows per page" + }, + { + "name": "order_by", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/WorkflowRecordOrderBy", + "description": "The attribute to order by", + "default": "name" + }, + "description": "The attribute to order by" + }, + { + "name": "direction", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/SQLiteDirection", + "description": "The direction to order by", + "default": "ASC" + }, + "description": "The direction to order by" + }, + { + "name": "categories", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/WorkflowCategory" + } + }, + { + "type": "null" + } + ], + "description": "The categories of workflow to get", + "title": "Categories" + }, + "description": "The categories of workflow to get" + }, + { + "name": "tags", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "The tags of workflow to get", + "title": "Tags" + }, + "description": "The tags of workflow to get" + }, + { + "name": "query", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The text to query by (matches name and description)", + "title": "Query" + }, + "description": "The text to query by (matches name and description)" + }, + { + "name": "has_been_opened", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "description": "Whether to include/exclude recent workflows", + "title": "Has Been Opened" + }, + "description": "Whether to include/exclude recent workflows" + }, + { + "name": "is_public", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "description": "Filter by public/shared status", + "title": "Is Public" + }, + "description": "Filter by public/shared status" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PaginatedResults_WorkflowRecordListItemWithThumbnailDTO_" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/v1/workflows/i/{workflow_id}/thumbnail": { + "put": { + "tags": ["workflows"], + "summary": "Set Workflow Thumbnail", + "description": "Sets a workflow's thumbnail image", + "operationId": "set_workflow_thumbnail", + "security": [ + { + "HTTPBearer": [] + } + ], + "parameters": [ + { + "name": "workflow_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The workflow to update", + "title": "Workflow Id" + }, + "description": "The workflow to update" + } + ], + "requestBody": { + "required": true, + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_set_workflow_thumbnail" } } } @@ -6563,6 +7861,11 @@ "summary": "Delete Workflow Thumbnail", "description": "Removes a workflow's thumbnail image", "operationId": "delete_workflow_thumbnail", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "workflow_id", @@ -6602,7 +7905,7 @@ "get": { "tags": ["workflows"], "summary": "Get Workflow Thumbnail", - "description": "Gets a workflow's thumbnail image", + "description": "Gets a workflow's thumbnail image.\n\nThis endpoint is intentionally unauthenticated because browsers load images\nvia tags which cannot send Bearer tokens. Workflow IDs are UUIDs,\nproviding security through unguessability.", "operationId": "get_workflow_thumbnail", "parameters": [ { @@ -6645,12 +7948,75 @@ } } }, + "/api/v1/workflows/i/{workflow_id}/is_public": { + "patch": { + "tags": ["workflows"], + "summary": "Update Workflow Is Public", + "description": "Updates whether a workflow is shared publicly", + "operationId": "update_workflow_is_public", + "security": [ + { + "HTTPBearer": [] + } + ], + "parameters": [ + { + "name": "workflow_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The workflow to update", + "title": "Workflow Id" + }, + "description": "The workflow to update" + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_update_workflow_is_public" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkflowRecordDTO" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, "/api/v1/workflows/tags": { "get": { "tags": ["workflows"], "summary": "Get All Tags", "description": "Gets all unique tags from workflows", "operationId": "get_all_tags", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "categories", @@ -6686,6 +8052,7 @@ "type": "null" } ], + "description": "Filter by public/shared status", "title": "Is Public" }, "description": "Filter by public/shared status" @@ -6725,6 +8092,11 @@ "summary": "Get Counts By Tag", "description": "Counts workflows by tag", "operationId": "get_counts_by_tag", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "tags", @@ -6792,6 +8164,7 @@ "type": "null" } ], + "description": "Filter by public/shared status", "title": "Is Public" }, "description": "Filter by public/shared status" @@ -6831,6 +8204,11 @@ "summary": "Counts By Category", "description": "Counts workflows by category", "operationId": "counts_by_category", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "categories", @@ -6877,6 +8255,7 @@ "type": "null" } ], + "description": "Filter by public/shared status", "title": "Is Public" }, "description": "Filter by public/shared status" @@ -6916,6 +8295,11 @@ "summary": "Update Opened At", "description": "Updates the opened_at field of a workflow", "operationId": "update_opened_at", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "workflow_id", @@ -7252,8 +8636,13 @@ "get": { "tags": ["client_state"], "summary": "Get Client State By Key", - "description": "Gets the client state", + "description": "Gets the client state for the current user (or system user if not authenticated)", "operationId": "get_client_state_by_key", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -7261,10 +8650,10 @@ "required": true, "schema": { "type": "string", - "description": "The queue id to perform this operation on", + "description": "The queue id (ignored, kept for backwards compatibility)", "title": "Queue Id" }, - "description": "The queue id to perform this operation on" + "description": "The queue id (ignored, kept for backwards compatibility)" }, { "name": "key", @@ -7314,8 +8703,13 @@ "post": { "tags": ["client_state"], "summary": "Set Client State", - "description": "Sets the client state", + "description": "Sets the client state for the current user (or system user if not authenticated)", "operationId": "set_client_state", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -7323,10 +8717,10 @@ "required": true, "schema": { "type": "string", - "description": "The queue id to perform this operation on", + "description": "The queue id (ignored, kept for backwards compatibility)", "title": "Queue Id" }, - "description": "The queue id to perform this operation on" + "description": "The queue id (ignored, kept for backwards compatibility)" }, { "name": "key", @@ -7381,8 +8775,13 @@ "post": { "tags": ["client_state"], "summary": "Delete Client State", - "description": "Deletes the client state", + "description": "Deletes the client state for the current user (or system user if not authenticated)", "operationId": "delete_client_state", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { "name": "queue_id", @@ -7390,10 +8789,10 @@ "required": true, "schema": { "type": "string", - "description": "The queue id to perform this operation on", + "description": "The queue id (ignored, kept for backwards compatibility)", "title": "Queue Id" }, - "description": "The queue id to perform this operation on" + "description": "The queue id (ignored, kept for backwards compatibility)" } ], "responses": { @@ -7421,42 +8820,52 @@ } } }, - "/api/v1/workflows/i/{workflow_id}/is_public": { - "patch": { - "tags": ["workflows"], - "summary": "Update Workflow Is Public", - "description": "Updates whether a workflow is shared publicly", - "operationId": "update_workflow_is_public", + "/api/v1/recall/{queue_id}": { + "post": { + "tags": ["recall"], + "summary": "Update Recall Parameters", + "description": "Update recallable parameters that can be recalled on the frontend.\n\nThis endpoint allows updating parameters such as prompt, model, steps, and other\ngeneration settings. These parameters are stored in client state and can be\naccessed by the frontend to populate UI elements.\n\nArgs:\n queue_id: The queue ID to associate these parameters with\n parameters: The RecallParameter object containing the parameters to update\n strict: When true, parameters not included in the request body are reset\n to their defaults (cleared on the frontend). Defaults to false,\n which preserves the existing behaviour of only updating the\n parameters that are explicitly provided.\n\nReturns:\n A dictionary containing the updated parameters and status\n\nExample:\n POST /api/v1/recall/{queue_id}?strict=true\n {\n \"positive_prompt\": \"a beautiful landscape\",\n \"model\": \"sd-1.5\",\n \"steps\": 20\n }\n # In strict mode, all other parameters (reference_images, loras, etc.)\n # are cleared. In non-strict mode (default) they would be left as-is.", + "operationId": "update_recall_parameters", + "security": [ + { + "HTTPBearer": [] + } + ], "parameters": [ { - "name": "workflow_id", + "name": "queue_id", "in": "path", "required": true, "schema": { "type": "string", - "title": "Workflow Id" + "description": "The queue id to perform this operation on", + "title": "Queue Id" }, - "description": "The workflow to update" + "description": "The queue id to perform this operation on" + }, + { + "name": "strict", + "in": "query", + "required": false, + "schema": { + "type": "boolean", + "description": "When true, parameters not included in the request are reset to their defaults (cleared).", + "default": false, + "title": "Strict" + }, + "description": "When true, parameters not included in the request are reset to their defaults (cleared)." } ], "requestBody": { + "required": true, "content": { "application/json": { "schema": { - "properties": { - "is_public": { - "type": "boolean", - "title": "Is Public", - "description": "Whether the workflow should be shared publicly" - } - }, - "type": "object", - "required": ["is_public"], - "title": "Body_update_workflow_is_public" + "$ref": "#/components/schemas/RecallParameter", + "description": "Recall parameters to update" } } - }, - "required": true + } }, "responses": { "200": { @@ -7464,7 +8873,57 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/WorkflowRecordDTO" + "type": "object", + "additionalProperties": true, + "title": "Response Update Recall Parameters" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": ["recall"], + "summary": "Get Recall Parameters", + "description": "Retrieve all stored recall parameters for a given queue.\n\nReturns a dictionary of all recall parameters that have been set for the queue.\n\nArgs:\n queue_id: The queue ID to retrieve parameters for\n\nReturns:\n A dictionary containing all stored recall parameters", + "operationId": "get_recall_parameters", + "security": [ + { + "HTTPBearer": [] + } + ], + "parameters": [ + { + "name": "queue_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The queue id to retrieve parameters for", + "title": "Queue Id" + }, + "description": "The queue id to retrieve parameters for" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "object", + "additionalProperties": true, + "title": "Response Get Recall Parameters" } } } @@ -7576,6 +9035,97 @@ "$ref": "#/components/schemas/IntegerOutput" } }, + "AdminUserCreateRequest": { + "properties": { + "email": { + "type": "string", + "title": "Email", + "description": "User email address" + }, + "display_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Display Name", + "description": "Display name" + }, + "password": { + "type": "string", + "title": "Password", + "description": "User password" + }, + "is_admin": { + "type": "boolean", + "title": "Is Admin", + "description": "Whether user should have admin privileges", + "default": false + } + }, + "type": "object", + "required": ["email", "password"], + "title": "AdminUserCreateRequest", + "description": "Request body for admin to create a new user." + }, + "AdminUserUpdateRequest": { + "properties": { + "display_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Display Name", + "description": "Display name" + }, + "password": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Password", + "description": "New password" + }, + "is_admin": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Is Admin", + "description": "Whether user should have admin privileges" + }, + "is_active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Is Active", + "description": "Whether user account should be active" + } + }, + "type": "object", + "title": "AdminUserUpdateRequest", + "description": "Request body for admin to update any user." + }, "AlphaMaskToTensorInvocation": { "category": "conditioning", "class": "invocation", @@ -7649,296 +9199,60 @@ "$ref": "#/components/schemas/MaskOutput" } }, - "AnyModelConfig": { - "oneOf": [ - { - "$ref": "#/components/schemas/Main_Diffusers_SD1_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SD2_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SDXL_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SDXLRefiner_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SD3_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_FLUX_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_Flux2_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_SD1_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_SD2_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_SDXL_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_SDXLRefiner_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_Flux2_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_FLUX_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" - }, - { - "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" - }, - { - "$ref": "#/components/schemas/Main_GGUF_Flux2_Config" - }, - { - "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" - }, - { - "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" - }, - { - "$ref": "#/components/schemas/VAE_Checkpoint_SD1_Config" - }, - { - "$ref": "#/components/schemas/VAE_Checkpoint_SD2_Config" - }, - { - "$ref": "#/components/schemas/VAE_Checkpoint_SDXL_Config" - }, - { - "$ref": "#/components/schemas/VAE_Checkpoint_FLUX_Config" - }, - { - "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" - }, - { - "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" - }, - { - "$ref": "#/components/schemas/VAE_Diffusers_SDXL_Config" - }, - { - "$ref": "#/components/schemas/VAE_Diffusers_Flux2_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Checkpoint_SD1_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Checkpoint_SD2_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Checkpoint_SDXL_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Checkpoint_FLUX_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Checkpoint_ZImage_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Diffusers_SD1_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Diffusers_SD2_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Diffusers_SDXL_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Diffusers_FLUX_Config" - }, - { - "$ref": "#/components/schemas/LoRA_LyCORIS_SD1_Config" - }, - { - "$ref": "#/components/schemas/LoRA_LyCORIS_SD2_Config" - }, - { - "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" - }, - { - "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" - }, - { - "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" - }, - { - "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" - }, - { - "$ref": "#/components/schemas/LoRA_OMI_FLUX_Config" - }, - { - "$ref": "#/components/schemas/LoRA_Diffusers_SD1_Config" - }, - { - "$ref": "#/components/schemas/LoRA_Diffusers_SD2_Config" - }, - { - "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" - }, - { - "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" - }, - { - "$ref": "#/components/schemas/LoRA_Diffusers_ZImage_Config" - }, - { - "$ref": "#/components/schemas/ControlLoRA_LyCORIS_FLUX_Config" - }, - { - "$ref": "#/components/schemas/T5Encoder_T5Encoder_Config" - }, - { - "$ref": "#/components/schemas/T5Encoder_BnBLLMint8_Config" - }, - { - "$ref": "#/components/schemas/Qwen3Encoder_Qwen3Encoder_Config" - }, - { - "$ref": "#/components/schemas/Qwen3Encoder_Checkpoint_Config" - }, - { - "$ref": "#/components/schemas/Qwen3Encoder_GGUF_Config" - }, - { - "$ref": "#/components/schemas/TI_File_SD1_Config" - }, - { - "$ref": "#/components/schemas/TI_File_SD2_Config" - }, - { - "$ref": "#/components/schemas/TI_File_SDXL_Config" - }, - { - "$ref": "#/components/schemas/TI_Folder_SD1_Config" - }, - { - "$ref": "#/components/schemas/TI_Folder_SD2_Config" - }, - { - "$ref": "#/components/schemas/TI_Folder_SDXL_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_InvokeAI_SD1_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_InvokeAI_SD2_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_InvokeAI_SDXL_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_Checkpoint_SD1_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_Checkpoint_SD2_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_Checkpoint_SDXL_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_Checkpoint_FLUX_Config" - }, - { - "$ref": "#/components/schemas/T2IAdapter_Diffusers_SD1_Config" - }, - { - "$ref": "#/components/schemas/T2IAdapter_Diffusers_SDXL_Config" - }, - { - "$ref": "#/components/schemas/Spandrel_Checkpoint_Config" - }, - { - "$ref": "#/components/schemas/CLIPEmbed_Diffusers_G_Config" - }, - { - "$ref": "#/components/schemas/CLIPEmbed_Diffusers_L_Config" - }, - { - "$ref": "#/components/schemas/CLIPVision_Diffusers_Config" - }, - { - "$ref": "#/components/schemas/SigLIP_Diffusers_Config" - }, - { - "$ref": "#/components/schemas/FLUXRedux_Checkpoint_Config" - }, - { - "$ref": "#/components/schemas/LlavaOnevision_Diffusers_Config" - }, - { - "$ref": "#/components/schemas/Unknown_Config" - } - ] - }, - "AppVersion": { - "properties": { - "version": { - "type": "string", - "title": "Version", - "description": "App version" - } - }, - "type": "object", - "required": ["version"], - "title": "AppVersion", - "description": "App Version Response" - }, - "ApplyMaskTensorToImageInvocation": { - "category": "mask", - "class": "invocation", - "classification": "stable", - "description": "Applies a tensor mask to an image.\n\nThe image is converted to RGBA and the mask is applied to the alpha channel.", - "node_pack": "invokeai", + "AnimaConditioningField": { + "description": "An Anima conditioning tensor primitive value.\n\nAnima conditioning contains Qwen3 0.6B hidden states and T5-XXL token IDs,\nwhich are combined by the LLM Adapter inside the transformer.", "properties": { - "board": { - "anyOf": [ - { - "$ref": "#/components/schemas/BoardField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "conditioning_name": { + "description": "The name of conditioning tensor", + "title": "Conditioning Name", + "type": "string" }, - "metadata": { + "mask": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "$ref": "#/components/schemas/TensorField" }, { "type": "null" } ], "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, + "description": "The mask associated with this conditioning tensor for regional prompting. Excluded regions should be set to False, included regions should be set to True." + } + }, + "required": ["conditioning_name"], + "title": "AnimaConditioningField", + "type": "object" + }, + "AnimaConditioningOutput": { + "class": "output", + "description": "Base class for nodes that output an Anima text conditioning tensor.", + "properties": { + "conditioning": { + "$ref": "#/components/schemas/AnimaConditioningField", + "description": "Conditioning tensor", + "field_kind": "output", "ui_hidden": false }, + "type": { + "const": "anima_conditioning_output", + "default": "anima_conditioning_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "conditioning", "type", "type"], + "title": "AnimaConditioningOutput", + "type": "object" + }, + "AnimaDenoiseInvocation": { + "category": "image", + "class": "invocation", + "classification": "prototype", + "description": "Run the denoising process with an Anima model.\n\nUses rectified flow sampling with shift=3.0 and the Cosmos Predict2 DiT\nbackbone with integrated LLM Adapter for text conditioning.\n\nSupports txt2img, img2img (via latents input), and inpainting (via denoise_mask).", + "node_pack": "invokeai", + "properties": { "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -7963,68 +9277,225 @@ "title": "Use Cache", "type": "boolean" }, - "mask": { + "latents": { "anyOf": [ { - "$ref": "#/components/schemas/TensorField" + "$ref": "#/components/schemas/LatentsField" }, { "type": "null" } ], "default": null, - "description": "The mask tensor to apply.", + "description": "Latents tensor", "field_kind": "input", - "input": "any", - "orig_required": true + "input": "connection", + "orig_default": null, + "orig_required": false }, - "image": { + "denoise_mask": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "$ref": "#/components/schemas/DenoiseMaskField" }, { "type": "null" } ], "default": null, - "description": "The image to apply the mask to.", + "description": "A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved.", "field_kind": "input", - "input": "any", - "orig_required": true + "input": "connection", + "orig_default": null, + "orig_required": false }, - "invert": { - "default": false, - "description": "Whether to invert the mask.", + "denoising_start": { + "default": 0.0, + "description": "When to start denoising, expressed a percentage of total steps", "field_kind": "input", "input": "any", - "orig_default": false, + "maximum": 1, + "minimum": 0, + "orig_default": 0.0, "orig_required": false, - "title": "Invert", - "type": "boolean" + "title": "Denoising Start", + "type": "number" }, - "type": { - "const": "apply_tensor_mask_to_image", - "default": "apply_tensor_mask_to_image", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["mask"], - "title": "Apply Tensor Mask to Image", + "denoising_end": { + "default": 1.0, + "description": "When to stop denoising, expressed a percentage of total steps", + "field_kind": "input", + "input": "any", + "maximum": 1, + "minimum": 0, + "orig_default": 1.0, + "orig_required": false, + "title": "Denoising End", + "type": "number" + }, + "add_noise": { + "default": true, + "description": "Add noise based on denoising start.", + "field_kind": "input", + "input": "any", + "orig_default": true, + "orig_required": false, + "title": "Add Noise", + "type": "boolean" + }, + "transformer": { + "anyOf": [ + { + "$ref": "#/components/schemas/TransformerField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Anima transformer model.", + "field_kind": "input", + "input": "connection", + "orig_required": true, + "title": "Transformer" + }, + "positive_conditioning": { + "anyOf": [ + { + "$ref": "#/components/schemas/AnimaConditioningField" + }, + { + "items": { + "$ref": "#/components/schemas/AnimaConditioningField" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Positive conditioning tensor", + "field_kind": "input", + "input": "connection", + "orig_required": true, + "title": "Positive Conditioning" + }, + "negative_conditioning": { + "anyOf": [ + { + "$ref": "#/components/schemas/AnimaConditioningField" + }, + { + "items": { + "$ref": "#/components/schemas/AnimaConditioningField" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Negative conditioning tensor", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "Negative Conditioning" + }, + "guidance_scale": { + "default": 4.5, + "description": "Guidance scale for classifier-free guidance. Recommended: 4.0-5.0 for Anima.", + "field_kind": "input", + "input": "any", + "minimum": 1.0, + "orig_default": 4.5, + "orig_required": false, + "title": "Guidance Scale", + "type": "number" + }, + "width": { + "default": 1024, + "description": "Width of the generated image.", + "field_kind": "input", + "input": "any", + "multipleOf": 8, + "orig_default": 1024, + "orig_required": false, + "title": "Width", + "type": "integer" + }, + "height": { + "default": 1024, + "description": "Height of the generated image.", + "field_kind": "input", + "input": "any", + "multipleOf": 8, + "orig_default": 1024, + "orig_required": false, + "title": "Height", + "type": "integer" + }, + "steps": { + "default": 30, + "description": "Number of denoising steps. 30 recommended for Anima.", + "exclusiveMinimum": 0, + "field_kind": "input", + "input": "any", + "orig_default": 30, + "orig_required": false, + "title": "Steps", + "type": "integer" + }, + "seed": { + "default": 0, + "description": "Randomness seed for reproducibility.", + "field_kind": "input", + "input": "any", + "orig_default": 0, + "orig_required": false, + "title": "Seed", + "type": "integer" + }, + "scheduler": { + "default": "euler", + "description": "Scheduler (sampler) for the denoising process.", + "enum": ["euler", "heun", "lcm"], + "field_kind": "input", + "input": "any", + "orig_default": "euler", + "orig_required": false, + "title": "Scheduler", + "type": "string", + "ui_choice_labels": { + "euler": "Euler", + "heun": "Heun (2nd order)", + "lcm": "LCM" + } + }, + "type": { + "const": "anima_denoise", + "default": "anima_denoise", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["image", "anima"], + "title": "Denoise - Anima", "type": "object", - "version": "1.0.0", + "version": "1.2.0", "output": { - "$ref": "#/components/schemas/ImageOutput" + "$ref": "#/components/schemas/LatentsOutput" } }, - "ApplyMaskToImageInvocation": { + "AnimaImageToLatentsInvocation": { "category": "image", "class": "invocation", - "classification": "stable", - "description": "Extracts a region from a generated image using a mask and blends it seamlessly onto a source image.\nThe mask uses black to indicate areas to keep from the generated image and white for areas to discard.", + "classification": "prototype", + "description": "Generates latents from an image using the Anima VAE (supports Wan 2.1 and FLUX VAE).", "node_pack": "invokeai", "properties": { "board": { @@ -8093,376 +9564,421 @@ } ], "default": null, - "description": "The image from which to extract the masked region", + "description": "The image to encode.", "field_kind": "input", "input": "any", "orig_required": true }, - "mask": { + "vae": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "$ref": "#/components/schemas/VAEField" }, { "type": "null" } ], "default": null, - "description": "The mask defining the region (black=keep, white=discard)", + "description": "VAE", "field_kind": "input", - "input": "any", + "input": "connection", "orig_required": true }, - "invert_mask": { - "default": false, - "description": "Whether to invert the mask before applying it", - "field_kind": "input", - "input": "any", - "orig_default": false, - "orig_required": false, - "title": "Invert Mask", - "type": "boolean" - }, "type": { - "const": "apply_mask_to_image", - "default": "apply_mask_to_image", + "const": "anima_i2l", + "default": "anima_i2l", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["image", "mask", "blend"], - "title": "Apply Mask to Image", + "tags": ["image", "latents", "vae", "i2l", "anima"], + "title": "Image to Latents - Anima", "type": "object", "version": "1.0.0", "output": { - "$ref": "#/components/schemas/ImageOutput" + "$ref": "#/components/schemas/LatentsOutput" } }, - "BaseMetadata": { - "properties": { - "name": { - "type": "string", - "title": "Name", - "description": "model's name" - }, - "type": { - "type": "string", - "const": "basemetadata", - "title": "Type", - "default": "basemetadata" - } - }, - "type": "object", - "required": ["name"], - "title": "BaseMetadata", - "description": "Adds typing data for discriminated union." - }, - "BaseModelType": { - "type": "string", - "enum": [ - "any", - "sd-1", - "sd-2", - "sd-3", - "sdxl", - "sdxl-refiner", - "flux", - "flux2", - "cogview4", - "z-image", - "unknown" - ], - "title": "BaseModelType", - "description": "An enumeration of base model architectures. For example, Stable Diffusion 1.x, Stable Diffusion 2.x, FLUX, etc.\n\nEvery model config must have a base architecture type.\n\nNot all models are associated with a base architecture. For example, CLIP models are their own thing, not related\nto any particular model architecture. To simplify internal APIs and make it easier to work with models, we use a\nfallback/null value `BaseModelType.Any` for these models, instead of making the model base optional." - }, - "Batch": { + "AnimaLatentsToImageInvocation": { + "category": "latents", + "class": "invocation", + "classification": "prototype", + "description": "Generates an image from latents using the Anima VAE.\n\nSupports the Wan 2.1 QwenImage VAE (AutoencoderKLWan) with explicit\nlatent denormalization, and FLUX VAE as fallback.", + "node_pack": "invokeai", "properties": { - "batch_id": { - "type": "string", - "title": "Batch Id", - "description": "The ID of the batch" - }, - "origin": { + "board": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } ], - "title": "Origin", - "description": "The origin of this queue item. This data is used by the frontend to determine how to handle results." + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "destination": { + "metadata": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "title": "Destination", - "description": "The origin of this queue item. This data is used by the frontend to determine how to handle results" + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "data": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "latents": { "anyOf": [ { - "items": { - "items": { - "$ref": "#/components/schemas/BatchDatum" - }, - "type": "array" - }, - "type": "array" + "$ref": "#/components/schemas/LatentsField" }, { "type": "null" } ], - "title": "Data", - "description": "The batch data collection." - }, - "graph": { - "$ref": "#/components/schemas/Graph", - "description": "The graph to initialize the session with" + "default": null, + "description": "Latents tensor", + "field_kind": "input", + "input": "connection", + "orig_required": true }, - "workflow": { + "vae": { "anyOf": [ { - "$ref": "#/components/schemas/WorkflowWithoutID" + "$ref": "#/components/schemas/VAEField" }, { "type": "null" } ], - "description": "The workflow to initialize the session with" - }, - "runs": { - "type": "integer", - "minimum": 1.0, - "title": "Runs", - "description": "Int stating how many times to iterate through all possible batch indices", - "default": 1 - } - }, - "type": "object", - "required": ["graph", "runs"], - "title": "Batch" - }, - "BatchDatum": { - "properties": { - "node_path": { - "type": "string", - "title": "Node Path", - "description": "The node into which this batch data collection will be substituted." - }, - "field_name": { - "type": "string", - "title": "Field Name", - "description": "The field into which this batch data collection will be substituted." + "default": null, + "description": "VAE", + "field_kind": "input", + "input": "connection", + "orig_required": true }, - "items": { - "items": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - }, - { - "type": "integer" - }, - { - "$ref": "#/components/schemas/ImageField" - } - ] - }, - "type": "array", - "title": "Items", - "description": "The list of items to substitute into the node/field." + "type": { + "const": "anima_l2i", + "default": "anima_l2i", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["type", "id"], + "tags": ["latents", "image", "vae", "l2i", "anima"], + "title": "Latents to Image - Anima", "type": "object", - "required": ["node_path", "field_name"], - "title": "BatchDatum" + "version": "1.0.2", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } }, - "BatchEnqueuedEvent": { - "description": "Event model for batch_enqueued", + "AnimaLoRACollectionLoader": { + "category": "model", + "class": "invocation", + "classification": "prototype", + "description": "Applies a collection of LoRAs to an Anima transformer.", + "node_pack": "invokeai", "properties": { - "timestamp": { - "description": "The timestamp of the event", - "title": "Timestamp", - "type": "integer" - }, - "queue_id": { - "description": "The ID of the queue", - "title": "Queue Id", - "type": "string" - }, - "batch_id": { - "description": "The ID of the batch", - "title": "Batch Id", + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", "type": "string" }, - "enqueued": { - "description": "The number of invocations enqueued", - "title": "Enqueued", - "type": "integer" - }, - "requested": { - "description": "The number of invocations initially requested to be enqueued (may be less than enqueued if queue was full)", - "title": "Requested", - "type": "integer" + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "priority": { - "description": "The priority of the batch", - "title": "Priority", - "type": "integer" + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "origin": { + "loras": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/LoRAField" + }, + { + "items": { + "$ref": "#/components/schemas/LoRAField" + }, + "type": "array" }, { "type": "null" } ], "default": null, - "description": "The origin of the batch", - "title": "Origin" - } - }, - "required": ["timestamp", "queue_id", "batch_id", "enqueued", "requested", "priority", "origin"], - "title": "BatchEnqueuedEvent", - "type": "object" - }, - "BatchStatus": { - "properties": { - "queue_id": { - "type": "string", - "title": "Queue Id", - "description": "The ID of the queue" - }, - "batch_id": { - "type": "string", - "title": "Batch Id", - "description": "The ID of the batch" + "description": "LoRA models and weights. May be a single LoRA or collection.", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "LoRAs" }, - "origin": { + "transformer": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/TransformerField" }, { "type": "null" } ], - "title": "Origin", - "description": "The origin of the batch" + "default": null, + "description": "Transformer", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "Transformer" }, - "destination": { + "qwen3_encoder": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/Qwen3EncoderField" }, { "type": "null" } ], - "title": "Destination", - "description": "The destination of the batch" - }, - "pending": { - "type": "integer", - "title": "Pending", - "description": "Number of queue items with status 'pending'" - }, - "in_progress": { - "type": "integer", - "title": "In Progress", - "description": "Number of queue items with status 'in_progress'" - }, - "completed": { - "type": "integer", - "title": "Completed", - "description": "Number of queue items with status 'complete'" - }, - "failed": { - "type": "integer", - "title": "Failed", - "description": "Number of queue items with status 'error'" - }, - "canceled": { - "type": "integer", - "title": "Canceled", - "description": "Number of queue items with status 'canceled'" + "default": null, + "description": "Qwen3 tokenizer and text encoder", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "Qwen3 Encoder" }, - "total": { - "type": "integer", - "title": "Total", - "description": "Total number of queue items" + "type": { + "const": "anima_lora_collection_loader", + "default": "anima_lora_collection_loader", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["type", "id"], + "tags": ["lora", "model", "anima"], + "title": "Apply LoRA Collection - Anima", "type": "object", - "required": [ - "queue_id", - "batch_id", - "origin", - "destination", - "pending", - "in_progress", - "completed", - "failed", - "canceled", - "total" - ], - "title": "BatchStatus" + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/AnimaLoRALoaderOutput" + } }, - "BlankImageInvocation": { - "category": "image", + "AnimaLoRALoaderInvocation": { + "category": "model", "class": "invocation", - "classification": "stable", - "description": "Creates a blank image and forwards it to the pipeline", + "classification": "prototype", + "description": "Apply a LoRA model to an Anima transformer and/or Qwen3 text encoder.", "node_pack": "invokeai", "properties": { - "board": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "lora": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "$ref": "#/components/schemas/ModelIdentifierField" }, { "type": "null" } ], "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", + "description": "LoRA model to load", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "LoRA", + "ui_model_base": ["anima"], + "ui_model_type": ["lora"] + }, + "weight": { + "default": 0.75, + "description": "The weight at which the LoRA is applied to each model", + "field_kind": "input", + "input": "any", + "orig_default": 0.75, "orig_required": false, - "ui_hidden": false + "title": "Weight", + "type": "number" }, - "metadata": { + "transformer": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "$ref": "#/components/schemas/TransformerField" }, { "type": "null" } ], "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", + "description": "Transformer", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "Anima Transformer" + }, + "qwen3_encoder": { + "anyOf": [ + { + "$ref": "#/components/schemas/Qwen3EncoderField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Qwen3 tokenizer and text encoder", + "field_kind": "input", "input": "connection", + "orig_default": null, "orig_required": false, + "title": "Qwen3 Encoder" + }, + "type": { + "const": "anima_lora_loader", + "default": "anima_lora_loader", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["lora", "model", "anima"], + "title": "Apply LoRA - Anima", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/AnimaLoRALoaderOutput" + } + }, + "AnimaLoRALoaderOutput": { + "class": "output", + "description": "Anima LoRA Loader Output", + "properties": { + "transformer": { + "anyOf": [ + { + "$ref": "#/components/schemas/TransformerField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Transformer", + "field_kind": "output", + "title": "Anima Transformer", + "ui_hidden": false + }, + "qwen3_encoder": { + "anyOf": [ + { + "$ref": "#/components/schemas/Qwen3EncoderField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Qwen3 tokenizer and text encoder", + "field_kind": "output", + "title": "Qwen3 Encoder", "ui_hidden": false }, + "type": { + "const": "anima_lora_loader_output", + "default": "anima_lora_loader_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "transformer", "qwen3_encoder", "type", "type"], + "title": "AnimaLoRALoaderOutput", + "type": "object" + }, + "AnimaModelLoaderInvocation": { + "category": "model", + "class": "invocation", + "classification": "prototype", + "description": "Loads an Anima model, outputting its submodels.\n\nAnima uses:\n- Transformer: Cosmos Predict2 DiT + LLM Adapter (from single-file checkpoint)\n- Qwen3 Encoder: Qwen3 0.6B (standalone single-file)\n- VAE: AutoencoderKLQwenImage / Wan 2.1 VAE (standalone single-file or FLUX VAE)\n- T5 Encoder: T5-XXL model (only the tokenizer submodel is used, for LLM Adapter token IDs)", + "node_pack": "invokeai", + "properties": { "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -8487,78 +10003,109 @@ "title": "Use Cache", "type": "boolean" }, - "width": { - "default": 512, - "description": "The width of the image", + "model": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Anima main model (transformer + LLM adapter).", "field_kind": "input", - "input": "any", - "orig_default": 512, - "orig_required": false, - "title": "Width", - "type": "integer" + "input": "direct", + "orig_required": true, + "title": "Transformer", + "ui_model_base": ["anima"], + "ui_model_type": ["main"] }, - "height": { - "default": 512, - "description": "The height of the image", + "vae_model": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Standalone VAE model. Anima uses a Wan 2.1 / QwenImage VAE (16-channel). A FLUX VAE can also be used as a compatible fallback.", "field_kind": "input", - "input": "any", - "orig_default": 512, - "orig_required": false, - "title": "Height", - "type": "integer" + "input": "direct", + "orig_required": true, + "title": "VAE", + "ui_model_type": ["vae"] }, - "mode": { - "default": "RGB", - "description": "The mode of the image", - "enum": ["RGB", "RGBA"], + "qwen3_encoder_model": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Standalone Qwen3 0.6B Encoder model.", "field_kind": "input", - "input": "any", - "orig_default": "RGB", - "orig_required": false, - "title": "Mode", - "type": "string" + "input": "direct", + "orig_required": true, + "title": "Qwen3 Encoder", + "ui_model_type": ["qwen3_encoder"] }, - "color": { - "$ref": "#/components/schemas/ColorField", - "default": { - "r": 0, - "g": 0, - "b": 0, - "a": 255 - }, - "description": "The color of the image", + "t5_encoder_model": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "T5-XXL encoder model. The tokenizer submodel is used for Anima text encoding.", "field_kind": "input", - "input": "any", - "orig_default": { - "a": 255, - "b": 0, - "g": 0, - "r": 0 - }, - "orig_required": false + "input": "direct", + "orig_required": true, + "title": "T5 Encoder", + "ui_model_type": ["t5_encoder"] }, "type": { - "const": "blank_image", - "default": "blank_image", + "const": "anima_model_loader", + "default": "anima_model_loader", "field_kind": "node_attribute", "title": "type", "type": "string" } }, - "required": ["type", "id"], - "tags": ["image"], - "title": "Blank Image", + "required": ["model", "vae_model", "qwen3_encoder_model", "t5_encoder_model", "type", "id"], + "tags": ["model", "anima"], + "title": "Main Model - Anima", "type": "object", - "version": "1.2.2", + "version": "1.3.0", "output": { - "$ref": "#/components/schemas/ImageOutput" + "$ref": "#/components/schemas/AnimaModelLoaderOutput" } }, - "BlendLatentsInvocation": { - "category": "latents", + "AnimaModelLoaderOutput": { + "class": "output", + "description": "Anima model loader output.", + "properties": { + "transformer": { + "$ref": "#/components/schemas/TransformerField", + "description": "Transformer", + "field_kind": "output", + "title": "Transformer", + "ui_hidden": false + }, + "qwen3_encoder": { + "$ref": "#/components/schemas/Qwen3EncoderField", + "description": "Qwen3 tokenizer and text encoder", + "field_kind": "output", + "title": "Qwen3 Encoder", + "ui_hidden": false + }, + "vae": { + "$ref": "#/components/schemas/VAEField", + "description": "VAE", + "field_kind": "output", + "title": "VAE", + "ui_hidden": false + }, + "t5_encoder": { + "$ref": "#/components/schemas/T5EncoderField", + "description": "T5 tokenizer and text encoder", + "field_kind": "output", + "title": "T5 Encoder", + "ui_hidden": false + }, + "type": { + "const": "anima_model_loader_output", + "default": "anima_model_loader_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "transformer", "qwen3_encoder", "vae", "t5_encoder", "type", "type"], + "title": "AnimaModelLoaderOutput", + "type": "object" + }, + "AnimaTextEncoderInvocation": { + "category": "conditioning", "class": "invocation", - "classification": "stable", - "description": "Blend two latents using a given alpha. If a mask is provided, the second latents will be masked before blending.\nLatents must have same size. Masking functionality added by @dwringer.", + "classification": "prototype", + "description": "Encodes and preps a prompt for an Anima image.\n\nUses Qwen3 0.6B for hidden state extraction and T5-XXL tokenizer for\ntoken IDs (no T5 model weights needed). Both are combined by the\nLLM Adapter inside the Anima transformer during denoising.", "node_pack": "invokeai", "properties": { "id": { @@ -8585,677 +10132,826 @@ "title": "Use Cache", "type": "boolean" }, - "latents_a": { + "prompt": { "anyOf": [ { - "$ref": "#/components/schemas/LatentsField" + "type": "string" }, { "type": "null" } ], "default": null, - "description": "Latents tensor", + "description": "Text prompt to encode.", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "Prompt", + "ui_component": "textarea" + }, + "qwen3_encoder": { + "anyOf": [ + { + "$ref": "#/components/schemas/Qwen3EncoderField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Qwen3 tokenizer and text encoder", "field_kind": "input", "input": "connection", - "orig_required": true + "orig_required": true, + "title": "Qwen3 Encoder" }, - "latents_b": { + "t5_encoder": { "anyOf": [ { - "$ref": "#/components/schemas/LatentsField" + "$ref": "#/components/schemas/T5EncoderField" }, { "type": "null" } ], "default": null, - "description": "Latents tensor", + "description": "T5 tokenizer and text encoder", "field_kind": "input", "input": "connection", - "orig_required": true + "orig_required": true, + "title": "T5 Encoder" }, "mask": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "$ref": "#/components/schemas/TensorField" }, { "type": "null" } ], "default": null, - "description": "Mask for blending in latents B", + "description": "A mask defining the region that this conditioning prompt applies to.", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false }, - "alpha": { - "default": 0.5, - "description": "Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B.", - "field_kind": "input", - "input": "any", - "minimum": 0, - "orig_default": 0.5, - "orig_required": false, - "title": "Alpha", - "type": "number" - }, "type": { - "const": "lblend", - "default": "lblend", + "const": "anima_text_encoder", + "default": "anima_text_encoder", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["latents", "blend", "mask"], - "title": "Blend Latents", + "tags": ["prompt", "conditioning", "anima"], + "title": "Prompt - Anima", "type": "object", - "version": "1.1.0", + "version": "1.3.0", "output": { - "$ref": "#/components/schemas/LatentsOutput" + "$ref": "#/components/schemas/AnimaConditioningOutput" } }, - "BoardChanges": { - "properties": { - "board_name": { - "anyOf": [ - { - "type": "string", - "maxLength": 300 - }, - { - "type": "null" - } - ], - "title": "Board Name", - "description": "The board's new name." + "AnyModelConfig": { + "oneOf": [ + { + "$ref": "#/components/schemas/Main_Diffusers_SD1_Config" }, - "cover_image_name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Cover Image Name", - "description": "The name of the board's new cover image." + { + "$ref": "#/components/schemas/Main_Diffusers_SD2_Config" }, - "archived": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "title": "Archived", - "description": "Whether or not the board is archived" - } - }, - "additionalProperties": false, - "type": "object", - "title": "BoardChanges" - }, - "BoardDTO": { - "properties": { - "board_id": { - "type": "string", - "title": "Board Id", - "description": "The unique ID of the board." + { + "$ref": "#/components/schemas/Main_Diffusers_SDXL_Config" }, - "board_name": { - "type": "string", - "title": "Board Name", - "description": "The name of the board." + { + "$ref": "#/components/schemas/Main_Diffusers_SDXLRefiner_Config" }, - "created_at": { - "anyOf": [ - { - "type": "string", - "format": "date-time" - }, - { - "type": "string" - } - ], - "title": "Created At", - "description": "The created timestamp of the board." + { + "$ref": "#/components/schemas/Main_Diffusers_SD3_Config" }, - "updated_at": { - "anyOf": [ - { - "type": "string", - "format": "date-time" - }, - { - "type": "string" - } - ], - "title": "Updated At", - "description": "The updated timestamp of the board." + { + "$ref": "#/components/schemas/Main_Diffusers_FLUX_Config" }, - "deleted_at": { - "anyOf": [ - { - "type": "string", - "format": "date-time" - }, - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Deleted At", - "description": "The deleted timestamp of the board." + { + "$ref": "#/components/schemas/Main_Diffusers_Flux2_Config" }, - "cover_image_name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Cover Image Name", - "description": "The name of the board's cover image." + { + "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" }, - "archived": { - "type": "boolean", - "title": "Archived", - "description": "Whether or not the board is archived." + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" }, - "image_count": { - "type": "integer", - "title": "Image Count", - "description": "The number of images in the board." + { + "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" }, - "asset_count": { - "type": "integer", - "title": "Asset Count", - "description": "The number of assets in the board." - } - }, - "type": "object", - "required": [ - "board_id", - "board_name", - "created_at", - "updated_at", - "cover_image_name", - "archived", - "image_count", - "asset_count" - ], - "title": "BoardDTO", - "description": "Deserialized board record with cover image URL and image count." - }, - "BoardField": { - "description": "A board primitive field", - "properties": { - "board_id": { - "description": "The id of the board", - "title": "Board Id", - "type": "string" - } - }, - "required": ["board_id"], - "title": "BoardField", - "type": "object" - }, - "BoardRecordOrderBy": { - "type": "string", - "enum": ["created_at", "board_name"], - "title": "BoardRecordOrderBy", - "description": "The order by options for board records" - }, - "Body_add_image_to_board": { - "properties": { - "board_id": { - "type": "string", - "title": "Board Id", - "description": "The id of the board to add to" + { + "$ref": "#/components/schemas/Main_Checkpoint_SD1_Config" }, - "image_name": { - "type": "string", - "title": "Image Name", - "description": "The name of the image to add" - } - }, - "type": "object", - "required": ["board_id", "image_name"], - "title": "Body_add_image_to_board" - }, - "Body_add_images_to_board": { - "properties": { - "board_id": { - "type": "string", - "title": "Board Id", - "description": "The id of the board to add to" + { + "$ref": "#/components/schemas/Main_Checkpoint_SD2_Config" }, - "image_names": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Image Names", - "description": "The names of the images to add" + { + "$ref": "#/components/schemas/Main_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SDXLRefiner_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Flux2_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, + { + "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_Flux2_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_Flux2_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_ZImage_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_SD2_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_SD1_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_SD2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, + { + "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" + }, + { + "$ref": "#/components/schemas/LoRA_OMI_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_SD2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_ZImage_Config" + }, + { + "$ref": "#/components/schemas/ControlLoRA_LyCORIS_FLUX_Config" + }, + { + "$ref": "#/components/schemas/T5Encoder_T5Encoder_Config" + }, + { + "$ref": "#/components/schemas/T5Encoder_BnBLLMint8_Config" + }, + { + "$ref": "#/components/schemas/Qwen3Encoder_Qwen3Encoder_Config" + }, + { + "$ref": "#/components/schemas/Qwen3Encoder_Checkpoint_Config" + }, + { + "$ref": "#/components/schemas/Qwen3Encoder_GGUF_Config" + }, + { + "$ref": "#/components/schemas/TI_File_SD1_Config" + }, + { + "$ref": "#/components/schemas/TI_File_SD2_Config" + }, + { + "$ref": "#/components/schemas/TI_File_SDXL_Config" + }, + { + "$ref": "#/components/schemas/TI_Folder_SD1_Config" + }, + { + "$ref": "#/components/schemas/TI_Folder_SD2_Config" + }, + { + "$ref": "#/components/schemas/TI_Folder_SDXL_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_InvokeAI_SD1_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_InvokeAI_SD2_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_InvokeAI_SDXL_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/T2IAdapter_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/T2IAdapter_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/Spandrel_Checkpoint_Config" + }, + { + "$ref": "#/components/schemas/CLIPEmbed_Diffusers_G_Config" + }, + { + "$ref": "#/components/schemas/CLIPEmbed_Diffusers_L_Config" + }, + { + "$ref": "#/components/schemas/CLIPVision_Diffusers_Config" + }, + { + "$ref": "#/components/schemas/SigLIP_Diffusers_Config" + }, + { + "$ref": "#/components/schemas/FLUXRedux_Checkpoint_Config" + }, + { + "$ref": "#/components/schemas/LlavaOnevision_Diffusers_Config" + }, + { + "$ref": "#/components/schemas/Unknown_Config" } - }, - "type": "object", - "required": ["board_id", "image_names"], - "title": "Body_add_images_to_board" + ] }, - "Body_cancel_by_batch_ids": { + "AppVersion": { "properties": { - "batch_ids": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Batch Ids", - "description": "The list of batch_ids to cancel all queue items for" + "version": { + "type": "string", + "title": "Version", + "description": "App version" } }, "type": "object", - "required": ["batch_ids"], - "title": "Body_cancel_by_batch_ids" + "required": ["version"], + "title": "AppVersion", + "description": "App Version Response" }, - "Body_create_image_upload_entry": { + "ApplyMaskTensorToImageInvocation": { + "category": "mask", + "class": "invocation", + "classification": "stable", + "description": "Applies a tensor mask to an image.\n\nThe image is converted to RGBA and the mask is applied to the alpha channel.", + "node_pack": "invokeai", "properties": { - "width": { - "type": "integer", - "title": "Width", - "description": "The width of the image" - }, - "height": { - "type": "integer", - "title": "Height", - "description": "The height of the image" - }, - "board_id": { + "board": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } ], - "title": "Board Id", - "description": "The board to add this image to, if any" - } - }, - "type": "object", - "required": ["width", "height"], - "title": "Body_create_image_upload_entry" - }, - "Body_create_style_preset": { - "properties": { - "image": { + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false + }, + "metadata": { "anyOf": [ { - "type": "string", - "format": "binary" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "title": "Image", - "description": "The image file to upload" + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "data": { - "type": "string", - "title": "Data", - "description": "The data of the style preset to create" - } - }, - "type": "object", - "required": ["data"], - "title": "Body_create_style_preset" - }, - "Body_create_workflow": { - "properties": { - "workflow": { - "$ref": "#/components/schemas/WorkflowWithoutID", - "description": "The workflow to create" - } - }, - "type": "object", - "required": ["workflow"], - "title": "Body_create_workflow" - }, - "Body_delete_images_from_list": { - "properties": { - "image_names": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Image Names", - "description": "The list of names of images to delete" - } - }, - "type": "object", - "required": ["image_names"], - "title": "Body_delete_images_from_list" - }, - "Body_do_hf_login": { - "properties": { - "token": { - "type": "string", - "title": "Token", - "description": "Hugging Face token to use for login" - } - }, - "type": "object", - "required": ["token"], - "title": "Body_do_hf_login" - }, - "Body_download": { - "properties": { - "source": { - "type": "string", - "minLength": 1, - "format": "uri", - "title": "Source", - "description": "download source" + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "dest": { - "type": "string", - "title": "Dest", - "description": "download destination" + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "priority": { - "type": "integer", - "title": "Priority", - "description": "queue priority", - "default": 10 + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "access_token": { + "mask": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/TensorField" }, { "type": "null" } ], - "title": "Access Token", - "description": "token for authorization to download" + "default": null, + "description": "The mask tensor to apply.", + "field_kind": "input", + "input": "any", + "orig_required": true + }, + "image": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The image to apply the mask to.", + "field_kind": "input", + "input": "any", + "orig_required": true + }, + "invert": { + "default": false, + "description": "Whether to invert the mask.", + "field_kind": "input", + "input": "any", + "orig_default": false, + "orig_required": false, + "title": "Invert", + "type": "boolean" + }, + "type": { + "const": "apply_tensor_mask_to_image", + "default": "apply_tensor_mask_to_image", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["type", "id"], + "tags": ["mask"], + "title": "Apply Tensor Mask to Image", "type": "object", - "required": ["source", "dest"], - "title": "Body_download" + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } }, - "Body_download_images_from_list": { + "ApplyMaskToImageInvocation": { + "category": "image", + "class": "invocation", + "classification": "stable", + "description": "Extracts a region from a generated image using a mask and blends it seamlessly onto a source image.\nThe mask uses black to indicate areas to keep from the generated image and white for areas to discard.", + "node_pack": "invokeai", "properties": { - "image_names": { + "board": { "anyOf": [ { - "items": { - "type": "string" - }, - "type": "array" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } ], - "title": "Image Names", - "description": "The list of names of images to download" + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "board_id": { + "metadata": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "title": "Board Id", - "description": "The board from which image should be downloaded" - } - }, - "type": "object", - "title": "Body_download_images_from_list" - }, - "Body_enqueue_batch": { - "properties": { - "batch": { - "$ref": "#/components/schemas/Batch", - "description": "Batch to process" + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "prepend": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", "type": "boolean", - "title": "Prepend", - "description": "Whether or not to prepend this batch in the queue", - "default": false - } - }, - "type": "object", - "required": ["batch"], - "title": "Body_enqueue_batch" - }, - "Body_get_images_by_names": { - "properties": { - "image_names": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Image Names", - "description": "Object containing list of image names to fetch DTOs for" - } - }, - "type": "object", - "required": ["image_names"], - "title": "Body_get_images_by_names" - }, - "Body_get_queue_items_by_item_ids": { - "properties": { - "item_ids": { - "items": { - "type": "integer" - }, - "type": "array", - "title": "Item Ids", - "description": "Object containing list of queue item ids to fetch queue items for" + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "image": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The image from which to extract the masked region", + "field_kind": "input", + "input": "any", + "orig_required": true + }, + "mask": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The mask defining the region (black=keep, white=discard)", + "field_kind": "input", + "input": "any", + "orig_required": true + }, + "invert_mask": { + "default": false, + "description": "Whether to invert the mask before applying it", + "field_kind": "input", + "input": "any", + "orig_default": false, + "orig_required": false, + "title": "Invert Mask", + "type": "boolean" + }, + "type": { + "const": "apply_mask_to_image", + "default": "apply_mask_to_image", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["type", "id"], + "tags": ["image", "mask", "blend"], + "title": "Apply Mask to Image", "type": "object", - "required": ["item_ids"], - "title": "Body_get_queue_items_by_item_ids" + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } }, - "Body_import_style_presets": { + "BaseMetadata": { "properties": { - "file": { + "name": { "type": "string", - "format": "binary", - "title": "File", - "description": "The file to import" + "title": "Name", + "description": "model's name" + }, + "type": { + "type": "string", + "const": "basemetadata", + "title": "Type", + "default": "basemetadata" } }, "type": "object", - "required": ["file"], - "title": "Body_import_style_presets" + "required": ["name"], + "title": "BaseMetadata", + "description": "Adds typing data for discriminated union." }, - "Body_parse_dynamicprompts": { + "BaseModelType": { + "type": "string", + "enum": [ + "any", + "sd-1", + "sd-2", + "sd-3", + "sdxl", + "sdxl-refiner", + "flux", + "flux2", + "cogview4", + "z-image", + "qwen-image", + "anima", + "unknown" + ], + "title": "BaseModelType", + "description": "An enumeration of base model architectures. For example, Stable Diffusion 1.x, Stable Diffusion 2.x, FLUX, etc.\n\nEvery model config must have a base architecture type.\n\nNot all models are associated with a base architecture. For example, CLIP models are their own thing, not related\nto any particular model architecture. To simplify internal APIs and make it easier to work with models, we use a\nfallback/null value `BaseModelType.Any` for these models, instead of making the model base optional." + }, + "Batch": { "properties": { - "prompt": { + "batch_id": { "type": "string", - "title": "Prompt", - "description": "The prompt to parse with dynamicprompts" + "title": "Batch Id", + "description": "The ID of the batch" }, - "max_prompts": { - "type": "integer", - "maximum": 10000.0, - "minimum": 1.0, - "title": "Max Prompts", - "description": "The max number of prompts to generate", - "default": 1000 + "origin": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Origin", + "description": "The origin of this queue item. This data is used by the frontend to determine how to handle results." }, - "combinatorial": { - "type": "boolean", - "title": "Combinatorial", - "description": "Whether to use the combinatorial generator", - "default": true + "destination": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Destination", + "description": "The origin of this queue item. This data is used by the frontend to determine how to handle results" }, - "seed": { + "data": { "anyOf": [ { - "type": "integer" + "items": { + "items": { + "$ref": "#/components/schemas/BatchDatum" + }, + "type": "array" + }, + "type": "array" }, { "type": "null" } ], - "title": "Seed", - "description": "The seed to use for random generation. Only used if not combinatorial" + "title": "Data", + "description": "The batch data collection." + }, + "graph": { + "$ref": "#/components/schemas/Graph", + "description": "The graph to initialize the session with" + }, + "workflow": { + "anyOf": [ + { + "$ref": "#/components/schemas/WorkflowWithoutID" + }, + { + "type": "null" + } + ], + "description": "The workflow to initialize the session with" + }, + "runs": { + "type": "integer", + "minimum": 1.0, + "title": "Runs", + "description": "Int stating how many times to iterate through all possible batch indices", + "default": 1 } }, "type": "object", - "required": ["prompt"], - "title": "Body_parse_dynamicprompts" + "required": ["graph", "runs"], + "title": "Batch" }, - "Body_remove_image_from_board": { + "BatchDatum": { "properties": { - "image_name": { + "node_path": { "type": "string", - "title": "Image Name", - "description": "The name of the image to remove" - } - }, - "type": "object", - "required": ["image_name"], - "title": "Body_remove_image_from_board" - }, - "Body_remove_images_from_board": { - "properties": { - "image_names": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Image Names", - "description": "The names of the images to remove" - } - }, - "type": "object", - "required": ["image_names"], - "title": "Body_remove_images_from_board" - }, - "Body_set_workflow_thumbnail": { - "properties": { - "image": { + "title": "Node Path", + "description": "The node into which this batch data collection will be substituted." + }, + "field_name": { "type": "string", - "format": "binary", - "title": "Image", - "description": "The image file to upload" - } - }, - "type": "object", - "required": ["image"], - "title": "Body_set_workflow_thumbnail" - }, - "Body_star_images_in_list": { - "properties": { - "image_names": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Image Names", - "description": "The list of names of images to star" - } - }, - "type": "object", - "required": ["image_names"], - "title": "Body_star_images_in_list" - }, - "Body_unstar_images_in_list": { - "properties": { - "image_names": { + "title": "Field Name", + "description": "The field into which this batch data collection will be substituted." + }, + "items": { "items": { - "type": "string" + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "integer" + }, + { + "$ref": "#/components/schemas/ImageField" + } + ] }, "type": "array", - "title": "Image Names", - "description": "The list of names of images to unstar" - } - }, - "type": "object", - "required": ["image_names"], - "title": "Body_unstar_images_in_list" - }, - "Body_update_model_image": { - "properties": { - "image": { - "type": "string", - "format": "binary", - "title": "Image" + "title": "Items", + "description": "The list of items to substitute into the node/field." } }, "type": "object", - "required": ["image"], - "title": "Body_update_model_image" + "required": ["node_path", "field_name"], + "title": "BatchDatum" }, - "Body_update_style_preset": { + "BatchEnqueuedEvent": { + "description": "Event model for batch_enqueued", "properties": { - "image": { + "timestamp": { + "description": "The timestamp of the event", + "title": "Timestamp", + "type": "integer" + }, + "queue_id": { + "description": "The ID of the queue", + "title": "Queue Id", + "type": "string" + }, + "batch_id": { + "description": "The ID of the batch", + "title": "Batch Id", + "type": "string" + }, + "enqueued": { + "description": "The number of invocations enqueued", + "title": "Enqueued", + "type": "integer" + }, + "requested": { + "description": "The number of invocations initially requested to be enqueued (may be less than enqueued if queue was full)", + "title": "Requested", + "type": "integer" + }, + "priority": { + "description": "The priority of the batch", + "title": "Priority", + "type": "integer" + }, + "origin": { "anyOf": [ { - "type": "string", - "format": "binary" + "type": "string" }, { "type": "null" } ], - "title": "Image", - "description": "The image file to upload" + "default": null, + "description": "The origin of the batch", + "title": "Origin" }, - "data": { - "type": "string", - "title": "Data", - "description": "The data of the style preset to update" - } - }, - "type": "object", - "required": ["data"], - "title": "Body_update_style_preset" - }, - "Body_update_workflow": { - "properties": { - "workflow": { - "$ref": "#/components/schemas/Workflow", - "description": "The updated workflow" + "user_id": { + "default": "system", + "description": "The ID of the user who enqueued the batch", + "title": "User Id", + "type": "string" } }, - "type": "object", - "required": ["workflow"], - "title": "Body_update_workflow" + "required": ["timestamp", "queue_id", "batch_id", "enqueued", "requested", "priority", "origin", "user_id"], + "title": "BatchEnqueuedEvent", + "type": "object" }, - "Body_upload_image": { + "BatchStatus": { "properties": { - "file": { + "queue_id": { "type": "string", - "format": "binary", - "title": "File" + "title": "Queue Id", + "description": "The ID of the queue" }, - "resize_to": { + "batch_id": { + "type": "string", + "title": "Batch Id", + "description": "The ID of the batch" + }, + "origin": { "anyOf": [ { "type": "string" @@ -9264,11 +10960,10 @@ "type": "null" } ], - "title": "Resize To", - "description": "Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: 16777216", - "examples": ["\"[1024,1024]\""] + "title": "Origin", + "description": "The origin of the batch" }, - "metadata": { + "destination": { "anyOf": [ { "type": "string" @@ -9277,21 +10972,94 @@ "type": "null" } ], - "title": "Metadata", - "description": "The metadata to associate with the image, must be a stringified JSON dict" + "title": "Destination", + "description": "The destination of the batch" + }, + "pending": { + "type": "integer", + "title": "Pending", + "description": "Number of queue items with status 'pending'" + }, + "in_progress": { + "type": "integer", + "title": "In Progress", + "description": "Number of queue items with status 'in_progress'" + }, + "completed": { + "type": "integer", + "title": "Completed", + "description": "Number of queue items with status 'complete'" + }, + "failed": { + "type": "integer", + "title": "Failed", + "description": "Number of queue items with status 'error'" + }, + "canceled": { + "type": "integer", + "title": "Canceled", + "description": "Number of queue items with status 'canceled'" + }, + "total": { + "type": "integer", + "title": "Total", + "description": "Total number of queue items" } }, "type": "object", - "required": ["file"], - "title": "Body_upload_image" + "required": [ + "queue_id", + "batch_id", + "origin", + "destination", + "pending", + "in_progress", + "completed", + "failed", + "canceled", + "total" + ], + "title": "BatchStatus" }, - "BooleanCollectionInvocation": { - "category": "primitives", + "BlankImageInvocation": { + "category": "image", "class": "invocation", "classification": "stable", - "description": "A collection of boolean primitive values", + "description": "Creates a blank image and forwards it to the pipeline", "node_pack": "invokeai", "properties": { + "board": { + "anyOf": [ + { + "$ref": "#/components/schemas/BoardField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false + }, + "metadata": { + "anyOf": [ + { + "$ref": "#/components/schemas/MetadataField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -9316,67 +11084,78 @@ "title": "Use Cache", "type": "boolean" }, - "collection": { - "default": [], - "description": "The collection of boolean values", + "width": { + "default": 512, + "description": "The width of the image", "field_kind": "input", "input": "any", - "items": { - "type": "boolean" - }, - "orig_default": [], + "orig_default": 512, "orig_required": false, - "title": "Collection", - "type": "array" + "title": "Width", + "type": "integer" }, - "type": { - "const": "boolean_collection", - "default": "boolean_collection", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["primitives", "boolean", "collection"], - "title": "Boolean Collection Primitive", - "type": "object", - "version": "1.0.2", - "output": { - "$ref": "#/components/schemas/BooleanCollectionOutput" - } - }, - "BooleanCollectionOutput": { - "class": "output", - "description": "Base class for nodes that output a collection of booleans", - "properties": { - "collection": { - "description": "The output boolean collection", - "field_kind": "output", - "items": { - "type": "boolean" + "height": { + "default": 512, + "description": "The height of the image", + "field_kind": "input", + "input": "any", + "orig_default": 512, + "orig_required": false, + "title": "Height", + "type": "integer" + }, + "mode": { + "default": "RGB", + "description": "The mode of the image", + "enum": ["RGB", "RGBA"], + "field_kind": "input", + "input": "any", + "orig_default": "RGB", + "orig_required": false, + "title": "Mode", + "type": "string" + }, + "color": { + "$ref": "#/components/schemas/ColorField", + "default": { + "r": 0, + "g": 0, + "b": 0, + "a": 255 }, - "title": "Collection", - "type": "array", - "ui_hidden": false + "description": "The color of the image", + "field_kind": "input", + "input": "any", + "orig_default": { + "a": 255, + "b": 0, + "g": 0, + "r": 0 + }, + "orig_required": false }, "type": { - "const": "boolean_collection_output", - "default": "boolean_collection_output", + "const": "blank_image", + "default": "blank_image", "field_kind": "node_attribute", "title": "type", "type": "string" } }, - "required": ["output_meta", "collection", "type", "type"], - "title": "BooleanCollectionOutput", - "type": "object" + "required": ["type", "id"], + "tags": ["image"], + "title": "Blank Image", + "type": "object", + "version": "1.2.2", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } }, - "BooleanInvocation": { - "category": "primitives", + "BlendLatentsInvocation": { + "category": "latents", "class": "invocation", "classification": "stable", - "description": "A boolean primitive value", + "description": "Blend two latents using a given alpha. If a mask is provided, the second latents will be masked before blending.\nLatents must have same size. Masking functionality added by @dwringer.", "node_pack": "invokeai", "properties": { "id": { @@ -9403,426 +11182,344 @@ "title": "Use Cache", "type": "boolean" }, - "value": { - "default": false, - "description": "The boolean value", + "latents_a": { + "anyOf": [ + { + "$ref": "#/components/schemas/LatentsField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Latents tensor", + "field_kind": "input", + "input": "connection", + "orig_required": true + }, + "latents_b": { + "anyOf": [ + { + "$ref": "#/components/schemas/LatentsField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Latents tensor", + "field_kind": "input", + "input": "connection", + "orig_required": true + }, + "mask": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Mask for blending in latents B", "field_kind": "input", "input": "any", - "orig_default": false, + "orig_default": null, + "orig_required": false + }, + "alpha": { + "default": 0.5, + "description": "Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B.", + "field_kind": "input", + "input": "any", + "minimum": 0, + "orig_default": 0.5, "orig_required": false, - "title": "Value", - "type": "boolean" + "title": "Alpha", + "type": "number" }, "type": { - "const": "boolean", - "default": "boolean", + "const": "lblend", + "default": "lblend", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["primitives", "boolean"], - "title": "Boolean Primitive", + "tags": ["latents", "blend", "mask"], + "title": "Blend Latents", "type": "object", - "version": "1.0.1", + "version": "1.1.0", "output": { - "$ref": "#/components/schemas/BooleanOutput" + "$ref": "#/components/schemas/LatentsOutput" } }, - "BooleanOutput": { - "class": "output", - "description": "Base class for nodes that output a single boolean", + "BoardChanges": { "properties": { - "value": { - "description": "The output boolean", - "field_kind": "output", - "title": "Value", - "type": "boolean", - "ui_hidden": false + "board_name": { + "anyOf": [ + { + "type": "string", + "maxLength": 300 + }, + { + "type": "null" + } + ], + "title": "Board Name", + "description": "The board's new name." }, - "type": { - "const": "boolean_output", - "default": "boolean_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "value", "type", "type"], - "title": "BooleanOutput", - "type": "object" - }, - "BoundingBoxCollectionOutput": { - "class": "output", - "description": "Base class for nodes that output a collection of bounding boxes", - "properties": { - "collection": { - "description": "The output bounding boxes.", - "field_kind": "output", - "items": { - "$ref": "#/components/schemas/BoundingBoxField" - }, - "title": "Bounding Boxes", - "type": "array", - "ui_hidden": false + "cover_image_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Cover Image Name", + "description": "The name of the board's new cover image." }, - "type": { - "const": "bounding_box_collection_output", - "default": "bounding_box_collection_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "archived": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Archived", + "description": "Whether or not the board is archived" + }, + "board_visibility": { + "anyOf": [ + { + "$ref": "#/components/schemas/BoardVisibility" + }, + { + "type": "null" + } + ], + "description": "The visibility of the board." } }, - "required": ["output_meta", "collection", "type", "type"], - "title": "BoundingBoxCollectionOutput", - "type": "object" + "additionalProperties": false, + "type": "object", + "title": "BoardChanges" }, - "BoundingBoxField": { - "description": "A bounding box primitive value.", + "BoardDTO": { "properties": { - "x_min": { - "description": "The minimum x-coordinate of the bounding box (inclusive).", - "title": "X Min", - "type": "integer" + "board_id": { + "type": "string", + "title": "Board Id", + "description": "The unique ID of the board." }, - "x_max": { - "description": "The maximum x-coordinate of the bounding box (exclusive).", - "title": "X Max", - "type": "integer" + "board_name": { + "type": "string", + "title": "Board Name", + "description": "The name of the board." }, - "y_min": { - "description": "The minimum y-coordinate of the bounding box (inclusive).", - "title": "Y Min", - "type": "integer" + "user_id": { + "type": "string", + "title": "User Id", + "description": "The user ID of the board owner." }, - "y_max": { - "description": "The maximum y-coordinate of the bounding box (exclusive).", - "title": "Y Max", - "type": "integer" + "created_at": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "string" + } + ], + "title": "Created At", + "description": "The created timestamp of the board." }, - "score": { + "updated_at": { "anyOf": [ { - "maximum": 1.0, - "minimum": 0.0, - "type": "number" + "type": "string", + "format": "date-time" }, { - "type": "null" + "type": "string" } ], - "default": null, - "description": "The score associated with the bounding box. In the range [0, 1]. This value is typically set when the bounding box was produced by a detector and has an associated confidence score.", - "title": "Score" - } - }, - "required": ["x_min", "x_max", "y_min", "y_max"], - "title": "BoundingBoxField", - "type": "object" - }, - "BoundingBoxInvocation": { - "category": "primitives", - "class": "invocation", - "classification": "stable", - "description": "Create a bounding box manually by supplying box coordinates", - "node_pack": "invokeai", - "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "title": "Updated At", + "description": "The updated timestamp of the board." }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "deleted_at": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Deleted At", + "description": "The deleted timestamp of the board." }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "cover_image_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Cover Image Name", + "description": "The name of the board's cover image." }, - "x_min": { - "default": 0, - "description": "x-coordinate of the bounding box's top left vertex", - "field_kind": "input", - "input": "any", - "orig_default": 0, - "orig_required": false, - "title": "X Min", - "type": "integer" + "archived": { + "type": "boolean", + "title": "Archived", + "description": "Whether or not the board is archived." }, - "y_min": { - "default": 0, - "description": "y-coordinate of the bounding box's top left vertex", - "field_kind": "input", - "input": "any", - "orig_default": 0, - "orig_required": false, - "title": "Y Min", - "type": "integer" + "board_visibility": { + "$ref": "#/components/schemas/BoardVisibility", + "description": "The visibility of the board.", + "default": "private" }, - "x_max": { - "default": 0, - "description": "x-coordinate of the bounding box's bottom right vertex", - "field_kind": "input", - "input": "any", - "orig_default": 0, - "orig_required": false, - "title": "X Max", - "type": "integer" + "image_count": { + "type": "integer", + "title": "Image Count", + "description": "The number of images in the board." }, - "y_max": { - "default": 0, - "description": "y-coordinate of the bounding box's bottom right vertex", - "field_kind": "input", - "input": "any", - "orig_default": 0, - "orig_required": false, - "title": "Y Max", - "type": "integer" + "asset_count": { + "type": "integer", + "title": "Asset Count", + "description": "The number of assets in the board." }, - "type": { - "const": "bounding_box", - "default": "bounding_box", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "owner_username": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Owner Username", + "description": "The username of the board owner (for admin view)." } }, - "required": ["type", "id"], - "tags": ["primitives", "segmentation", "collection", "bounding box"], - "title": "Bounding Box", "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/BoundingBoxOutput" - } + "required": [ + "board_id", + "board_name", + "user_id", + "created_at", + "updated_at", + "cover_image_name", + "archived", + "image_count", + "asset_count" + ], + "title": "BoardDTO", + "description": "Deserialized board record with cover image URL and image count." }, - "BoundingBoxOutput": { - "class": "output", - "description": "Base class for nodes that output a single bounding box", + "BoardField": { + "description": "A board primitive field", "properties": { - "bounding_box": { - "$ref": "#/components/schemas/BoundingBoxField", - "description": "The output bounding box.", - "field_kind": "output", - "ui_hidden": false - }, - "type": { - "const": "bounding_box_output", - "default": "bounding_box_output", - "field_kind": "node_attribute", - "title": "type", + "board_id": { + "description": "The id of the board", + "title": "Board Id", "type": "string" } }, - "required": ["output_meta", "bounding_box", "type", "type"], - "title": "BoundingBoxOutput", + "required": ["board_id"], + "title": "BoardField", "type": "object" }, - "BulkDeleteModelsRequest": { + "BoardRecordOrderBy": { + "type": "string", + "enum": ["created_at", "board_name"], + "title": "BoardRecordOrderBy", + "description": "The order by options for board records" + }, + "BoardVisibility": { + "type": "string", + "enum": ["private", "shared", "public"], + "title": "BoardVisibility", + "description": "The visibility options for a board." + }, + "Body_add_image_to_board": { "properties": { - "keys": { + "board_id": { + "type": "string", + "title": "Board Id", + "description": "The id of the board to add to" + }, + "image_name": { + "type": "string", + "title": "Image Name", + "description": "The name of the image to add" + } + }, + "type": "object", + "required": ["board_id", "image_name"], + "title": "Body_add_image_to_board" + }, + "Body_add_images_to_board": { + "properties": { + "board_id": { + "type": "string", + "title": "Board Id", + "description": "The id of the board to add to" + }, + "image_names": { "items": { "type": "string" }, "type": "array", - "title": "Keys", - "description": "List of model keys to delete" + "title": "Image Names", + "description": "The names of the images to add" } }, "type": "object", - "required": ["keys"], - "title": "BulkDeleteModelsRequest", - "description": "Request body for bulk model deletion." + "required": ["board_id", "image_names"], + "title": "Body_add_images_to_board" }, - "BulkDeleteModelsResponse": { + "Body_cancel_by_batch_ids": { "properties": { - "deleted": { + "batch_ids": { "items": { "type": "string" }, "type": "array", - "title": "Deleted", - "description": "List of successfully deleted model keys" - }, - "failed": { - "items": { - "additionalProperties": true, - "type": "object" - }, - "type": "array", - "title": "Failed", - "description": "List of failed deletions with error messages" + "title": "Batch Ids", + "description": "The list of batch_ids to cancel all queue items for" } }, "type": "object", - "required": ["deleted", "failed"], - "title": "BulkDeleteModelsResponse", - "description": "Response body for bulk model deletion." - }, - "BulkDownloadCompleteEvent": { - "description": "Event model for bulk_download_complete", - "properties": { - "timestamp": { - "description": "The timestamp of the event", - "title": "Timestamp", - "type": "integer" - }, - "bulk_download_id": { - "description": "The ID of the bulk image download", - "title": "Bulk Download Id", - "type": "string" - }, - "bulk_download_item_id": { - "description": "The ID of the bulk image download item", - "title": "Bulk Download Item Id", - "type": "string" - }, - "bulk_download_item_name": { - "description": "The name of the bulk image download item", - "title": "Bulk Download Item Name", - "type": "string" - } - }, - "required": ["timestamp", "bulk_download_id", "bulk_download_item_id", "bulk_download_item_name"], - "title": "BulkDownloadCompleteEvent", - "type": "object" - }, - "BulkDownloadErrorEvent": { - "description": "Event model for bulk_download_error", - "properties": { - "timestamp": { - "description": "The timestamp of the event", - "title": "Timestamp", - "type": "integer" - }, - "bulk_download_id": { - "description": "The ID of the bulk image download", - "title": "Bulk Download Id", - "type": "string" - }, - "bulk_download_item_id": { - "description": "The ID of the bulk image download item", - "title": "Bulk Download Item Id", - "type": "string" - }, - "bulk_download_item_name": { - "description": "The name of the bulk image download item", - "title": "Bulk Download Item Name", - "type": "string" - }, - "error": { - "description": "The error message", - "title": "Error", - "type": "string" - } - }, - "required": ["timestamp", "bulk_download_id", "bulk_download_item_id", "bulk_download_item_name", "error"], - "title": "BulkDownloadErrorEvent", - "type": "object" - }, - "BulkDownloadStartedEvent": { - "description": "Event model for bulk_download_started", - "properties": { - "timestamp": { - "description": "The timestamp of the event", - "title": "Timestamp", - "type": "integer" - }, - "bulk_download_id": { - "description": "The ID of the bulk image download", - "title": "Bulk Download Id", - "type": "string" - }, - "bulk_download_item_id": { - "description": "The ID of the bulk image download item", - "title": "Bulk Download Item Id", - "type": "string" - }, - "bulk_download_item_name": { - "description": "The name of the bulk image download item", - "title": "Bulk Download Item Name", - "type": "string" - } - }, - "required": ["timestamp", "bulk_download_id", "bulk_download_item_id", "bulk_download_item_name"], - "title": "BulkDownloadStartedEvent", - "type": "object" + "required": ["batch_ids"], + "title": "Body_cancel_by_batch_ids" }, - "CLIPEmbed_Diffusers_G_Config": { + "Body_create_image_upload_entry": { "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." - }, - "file_size": { + "width": { "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." - }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." - }, - "description": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Description", - "description": "Model description" - }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." - }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" + "title": "Width", + "description": "The width of the image" }, - "source_api_response": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "height": { + "type": "integer", + "title": "Height", + "description": "The height of the image" }, - "cover_image": { + "board_id": { "anyOf": [ { "type": "string" @@ -9831,99 +11528,98 @@ "type": "null" } ], - "title": "Cover Image", - "description": "Url for image to preview model" - }, - "format": { - "type": "string", - "const": "diffusers", - "title": "Format", - "default": "diffusers" - }, - "repo_variant": { - "$ref": "#/components/schemas/ModelRepoVariant", - "default": "" - }, - "base": { - "type": "string", - "const": "any", - "title": "Base", - "default": "any" - }, - "type": { - "type": "string", - "const": "clip_embed", - "title": "Type", - "default": "clip_embed" - }, - "cpu_only": { + "title": "Board Id", + "description": "The board to add this image to, if any" + } + }, + "type": "object", + "required": ["width", "height"], + "title": "Body_create_image_upload_entry" + }, + "Body_create_style_preset": { + "properties": { + "image": { "anyOf": [ { - "type": "boolean" + "type": "string", + "format": "binary" }, { "type": "null" } ], - "title": "Cpu Only", - "description": "Whether this model should run on CPU only" + "title": "Image", + "description": "The image file to upload" }, - "variant": { + "data": { "type": "string", - "const": "gigantic", - "title": "Variant", - "default": "gigantic" + "title": "Data", + "description": "The data of the style preset to create" } }, "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "format", - "repo_variant", - "base", - "type", - "cpu_only", - "variant" - ], - "title": "CLIPEmbed_Diffusers_G_Config" + "required": ["data"], + "title": "Body_create_style_preset" }, - "CLIPEmbed_Diffusers_L_Config": { + "Body_create_workflow": { "properties": { - "key": { + "workflow": { + "$ref": "#/components/schemas/WorkflowWithoutID", + "description": "The workflow to create" + } + }, + "type": "object", + "required": ["workflow"], + "title": "Body_create_workflow" + }, + "Body_delete_images_from_list": { + "properties": { + "image_names": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Image Names", + "description": "The list of names of images to delete" + } + }, + "type": "object", + "required": ["image_names"], + "title": "Body_delete_images_from_list" + }, + "Body_do_hf_login": { + "properties": { + "token": { "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { + "title": "Token", + "description": "Hugging Face token to use for login" + } + }, + "type": "object", + "required": ["token"], + "title": "Body_do_hf_login" + }, + "Body_download": { + "properties": { + "source": { "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." + "minLength": 1, + "format": "uri", + "title": "Source", + "description": "download source" }, - "path": { + "dest": { "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + "title": "Dest", + "description": "download destination" }, - "file_size": { + "priority": { "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." - }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." + "title": "Priority", + "description": "queue priority", + "default": 10 }, - "description": { + "access_token": { "anyOf": [ { "type": "string" @@ -9932,31 +11628,32 @@ "type": "null" } ], - "title": "Description", - "description": "Model description" - }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." - }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" - }, - "source_api_response": { + "title": "Access Token", + "description": "token for authorization to download" + } + }, + "type": "object", + "required": ["source", "dest"], + "title": "Body_download" + }, + "Body_download_images_from_list": { + "properties": { + "image_names": { "anyOf": [ { - "type": "string" + "items": { + "type": "string" + }, + "type": "array" }, { "type": "null" } ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "title": "Image Names", + "description": "The list of names of images to download" }, - "cover_image": { + "board_id": { "anyOf": [ { "type": "string" @@ -9965,277 +11662,249 @@ "type": "null" } ], - "title": "Cover Image", - "description": "Url for image to preview model" + "title": "Board Id", + "description": "The board from which image should be downloaded" + } + }, + "type": "object", + "title": "Body_download_images_from_list" + }, + "Body_enqueue_batch": { + "properties": { + "batch": { + "$ref": "#/components/schemas/Batch", + "description": "Batch to process" }, - "format": { + "prepend": { + "type": "boolean", + "title": "Prepend", + "description": "Whether or not to prepend this batch in the queue", + "default": false + } + }, + "type": "object", + "required": ["batch"], + "title": "Body_enqueue_batch" + }, + "Body_get_images_by_names": { + "properties": { + "image_names": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Image Names", + "description": "Object containing list of image names to fetch DTOs for" + } + }, + "type": "object", + "required": ["image_names"], + "title": "Body_get_images_by_names" + }, + "Body_get_queue_items_by_item_ids": { + "properties": { + "item_ids": { + "items": { + "type": "integer" + }, + "type": "array", + "title": "Item Ids", + "description": "Object containing list of queue item ids to fetch queue items for" + } + }, + "type": "object", + "required": ["item_ids"], + "title": "Body_get_queue_items_by_item_ids" + }, + "Body_import_style_presets": { + "properties": { + "file": { "type": "string", - "const": "diffusers", - "title": "Format", - "default": "diffusers" - }, - "repo_variant": { - "$ref": "#/components/schemas/ModelRepoVariant", - "default": "" - }, - "base": { + "format": "binary", + "title": "File", + "description": "The file to import" + } + }, + "type": "object", + "required": ["file"], + "title": "Body_import_style_presets" + }, + "Body_parse_dynamicprompts": { + "properties": { + "prompt": { "type": "string", - "const": "any", - "title": "Base", - "default": "any" + "title": "Prompt", + "description": "The prompt to parse with dynamicprompts" }, - "type": { - "type": "string", - "const": "clip_embed", - "title": "Type", - "default": "clip_embed" + "max_prompts": { + "type": "integer", + "maximum": 10000.0, + "minimum": 1.0, + "title": "Max Prompts", + "description": "The max number of prompts to generate", + "default": 1000 }, - "cpu_only": { + "combinatorial": { + "type": "boolean", + "title": "Combinatorial", + "description": "Whether to use the combinatorial generator", + "default": true + }, + "seed": { "anyOf": [ { - "type": "boolean" + "type": "integer" }, { "type": "null" } ], - "title": "Cpu Only", - "description": "Whether this model should run on CPU only" - }, - "variant": { + "title": "Seed", + "description": "The seed to use for random generation. Only used if not combinatorial" + } + }, + "type": "object", + "required": ["prompt"], + "title": "Body_parse_dynamicprompts" + }, + "Body_remove_image_from_board": { + "properties": { + "image_name": { "type": "string", - "const": "large", - "title": "Variant", - "default": "large" + "title": "Image Name", + "description": "The name of the image to remove" } }, "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "format", - "repo_variant", - "base", - "type", - "cpu_only", - "variant" - ], - "title": "CLIPEmbed_Diffusers_L_Config" + "required": ["image_name"], + "title": "Body_remove_image_from_board" }, - "CLIPField": { + "Body_remove_images_from_board": { "properties": { - "tokenizer": { - "$ref": "#/components/schemas/ModelIdentifierField", - "description": "Info to load tokenizer submodel" - }, - "text_encoder": { - "$ref": "#/components/schemas/ModelIdentifierField", - "description": "Info to load text_encoder submodel" - }, - "skipped_layers": { - "description": "Number of skipped layers in text_encoder", - "title": "Skipped Layers", - "type": "integer" - }, - "loras": { - "description": "LoRAs to apply on model loading", + "image_names": { "items": { - "$ref": "#/components/schemas/LoRAField" + "type": "string" }, - "title": "Loras", - "type": "array" + "type": "array", + "title": "Image Names", + "description": "The names of the images to remove" } }, - "required": ["tokenizer", "text_encoder", "skipped_layers", "loras"], - "title": "CLIPField", - "type": "object" + "type": "object", + "required": ["image_names"], + "title": "Body_remove_images_from_board" }, - "CLIPOutput": { - "class": "output", - "description": "Base class for invocations that output a CLIP field", + "Body_set_workflow_thumbnail": { "properties": { - "clip": { - "$ref": "#/components/schemas/CLIPField", - "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", - "field_kind": "output", - "title": "CLIP", - "ui_hidden": false - }, - "type": { - "const": "clip_output", - "default": "clip_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "image": { + "type": "string", + "format": "binary", + "title": "Image", + "description": "The image file to upload" } }, - "required": ["output_meta", "clip", "type", "type"], - "title": "CLIPOutput", - "type": "object" + "type": "object", + "required": ["image"], + "title": "Body_set_workflow_thumbnail" }, - "CLIPSkipInvocation": { - "category": "conditioning", - "class": "invocation", - "classification": "stable", - "description": "Skip layers in clip text_encoder model.", - "node_pack": "invokeai", + "Body_star_images_in_list": { "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" - }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" - }, - "clip": { - "anyOf": [ - { - "$ref": "#/components/schemas/CLIPField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", - "field_kind": "input", - "input": "connection", - "orig_required": true, - "title": "CLIP" - }, - "skipped_layers": { - "default": 0, - "description": "Number of layers to skip in text encoder", - "field_kind": "input", - "input": "any", - "minimum": 0, - "orig_default": 0, - "orig_required": false, - "title": "Skipped Layers", - "type": "integer" - }, - "type": { - "const": "clip_skip", - "default": "clip_skip", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "image_names": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Image Names", + "description": "The list of names of images to star" } }, - "required": ["type", "id"], - "tags": ["clipskip", "clip", "skip"], - "title": "Apply CLIP Skip - SD1.5, SDXL", "type": "object", - "version": "1.1.1", - "output": { - "$ref": "#/components/schemas/CLIPSkipInvocationOutput" - } + "required": ["image_names"], + "title": "Body_star_images_in_list" }, - "CLIPSkipInvocationOutput": { - "class": "output", - "description": "CLIP skip node output", + "Body_unstar_images_in_list": { "properties": { - "clip": { - "anyOf": [ - { - "$ref": "#/components/schemas/CLIPField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", - "field_kind": "output", - "title": "CLIP", - "ui_hidden": false - }, - "type": { - "const": "clip_skip_output", - "default": "clip_skip_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "image_names": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Image Names", + "description": "The list of names of images to unstar" } }, - "required": ["output_meta", "clip", "type", "type"], - "title": "CLIPSkipInvocationOutput", - "type": "object" + "type": "object", + "required": ["image_names"], + "title": "Body_unstar_images_in_list" }, - "CLIPVision_Diffusers_Config": { + "Body_update_model_image": { "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." - }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." - }, - "name": { + "image": { "type": "string", - "title": "Name", - "description": "Name of the model." - }, - "description": { + "format": "binary", + "title": "Image" + } + }, + "type": "object", + "required": ["image"], + "title": "Body_update_model_image" + }, + "Body_update_style_preset": { + "properties": { + "image": { "anyOf": [ { - "type": "string" + "type": "string", + "format": "binary" }, { "type": "null" } ], - "title": "Description", - "description": "Model description" + "title": "Image", + "description": "The image file to upload" }, - "source": { + "data": { "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." - }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" + "title": "Data", + "description": "The data of the style preset to update" + } + }, + "type": "object", + "required": ["data"], + "title": "Body_update_style_preset" + }, + "Body_update_workflow": { + "properties": { + "workflow": { + "$ref": "#/components/schemas/Workflow", + "description": "The updated workflow" + } + }, + "type": "object", + "required": ["workflow"], + "title": "Body_update_workflow" + }, + "Body_update_workflow_is_public": { + "properties": { + "is_public": { + "type": "boolean", + "title": "Is Public", + "description": "Whether the workflow should be shared publicly" + } + }, + "type": "object", + "required": ["is_public"], + "title": "Body_update_workflow_is_public" + }, + "Body_upload_image": { + "properties": { + "file": { + "type": "string", + "format": "binary", + "title": "File" }, - "source_api_response": { + "resize_to": { "anyOf": [ { "type": "string" @@ -10244,10 +11913,11 @@ "type": "null" } ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "title": "Resize To", + "description": "Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: 16777216", + "examples": ["\"[1024,1024]\""] }, - "cover_image": { + "metadata": { "anyOf": [ { "type": "string" @@ -10256,104 +11926,21 @@ "type": "null" } ], - "title": "Cover Image", - "description": "Url for image to preview model" - }, - "format": { - "type": "string", - "const": "diffusers", - "title": "Format", - "default": "diffusers" - }, - "repo_variant": { - "$ref": "#/components/schemas/ModelRepoVariant", - "default": "" - }, - "base": { - "type": "string", - "const": "any", - "title": "Base", - "default": "any" - }, - "type": { - "type": "string", - "const": "clip_vision", - "title": "Type", - "default": "clip_vision" - }, - "cpu_only": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "title": "Cpu Only", - "description": "Whether this model should run on CPU only" + "title": "Metadata", + "description": "The metadata to associate with the image, must be a stringified JSON dict" } }, "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "format", - "repo_variant", - "base", - "type", - "cpu_only" - ], - "title": "CLIPVision_Diffusers_Config", - "description": "Model config for CLIPVision." + "required": ["file"], + "title": "Body_upload_image" }, - "CV2InfillInvocation": { - "category": "inpaint", + "BooleanCollectionInvocation": { + "category": "primitives", "class": "invocation", "classification": "stable", - "description": "Infills transparent areas of an image using OpenCV Inpainting", + "description": "A collection of boolean primitive values", "node_pack": "invokeai", "properties": { - "board": { - "anyOf": [ - { - "$ref": "#/components/schemas/BoardField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false - }, - "metadata": { - "anyOf": [ - { - "$ref": "#/components/schemas/MetadataField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -10378,86 +11965,67 @@ "title": "Use Cache", "type": "boolean" }, - "image": { - "anyOf": [ - { - "$ref": "#/components/schemas/ImageField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The image to process", + "collection": { + "default": [], + "description": "The collection of boolean values", "field_kind": "input", "input": "any", - "orig_required": true + "items": { + "type": "boolean" + }, + "orig_default": [], + "orig_required": false, + "title": "Collection", + "type": "array" }, "type": { - "const": "infill_cv2", - "default": "infill_cv2", + "const": "boolean_collection", + "default": "boolean_collection", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["image", "inpaint"], - "title": "CV2 Infill", + "tags": ["primitives", "boolean", "collection"], + "title": "Boolean Collection Primitive", "type": "object", - "version": "1.2.2", + "version": "1.0.2", "output": { - "$ref": "#/components/schemas/ImageOutput" + "$ref": "#/components/schemas/BooleanCollectionOutput" } }, - "CacheStats": { + "BooleanCollectionOutput": { + "class": "output", + "description": "Base class for nodes that output a collection of booleans", "properties": { - "hits": { - "type": "integer", - "title": "Hits", - "default": 0 - }, - "misses": { - "type": "integer", - "title": "Misses", - "default": 0 - }, - "high_watermark": { - "type": "integer", - "title": "High Watermark", - "default": 0 - }, - "in_cache": { - "type": "integer", - "title": "In Cache", - "default": 0 - }, - "cleared": { - "type": "integer", - "title": "Cleared", - "default": 0 - }, - "cache_size": { - "type": "integer", - "title": "Cache Size", - "default": 0 - }, - "loaded_model_sizes": { - "additionalProperties": { - "type": "integer" + "collection": { + "description": "The output boolean collection", + "field_kind": "output", + "items": { + "type": "boolean" }, - "type": "object", - "title": "Loaded Model Sizes" + "title": "Collection", + "type": "array", + "ui_hidden": false + }, + "type": { + "const": "boolean_collection_output", + "default": "boolean_collection_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, - "type": "object", - "title": "CacheStats" + "required": ["output_meta", "collection", "type", "type"], + "title": "BooleanCollectionOutput", + "type": "object" }, - "CalculateImageTilesEvenSplitInvocation": { - "category": "tiles", + "BooleanInvocation": { + "category": "primitives", "class": "invocation", "classification": "stable", - "description": "Calculate the coordinates and overlaps of tiles that cover a target image shape.", + "description": "A boolean primitive value", "node_pack": "invokeai", "properties": { "id": { @@ -10484,187 +12052,130 @@ "title": "Use Cache", "type": "boolean" }, - "image_width": { - "default": 1024, - "description": "The image width, in pixels, to calculate tiles for.", - "field_kind": "input", - "input": "any", - "minimum": 1, - "orig_default": 1024, - "orig_required": false, - "title": "Image Width", - "type": "integer" - }, - "image_height": { - "default": 1024, - "description": "The image height, in pixels, to calculate tiles for.", - "field_kind": "input", - "input": "any", - "minimum": 1, - "orig_default": 1024, - "orig_required": false, - "title": "Image Height", - "type": "integer" - }, - "num_tiles_x": { - "default": 2, - "description": "Number of tiles to divide image into on the x axis", - "field_kind": "input", - "input": "any", - "minimum": 1, - "orig_default": 2, - "orig_required": false, - "title": "Num Tiles X", - "type": "integer" - }, - "num_tiles_y": { - "default": 2, - "description": "Number of tiles to divide image into on the y axis", - "field_kind": "input", - "input": "any", - "minimum": 1, - "orig_default": 2, - "orig_required": false, - "title": "Num Tiles Y", - "type": "integer" - }, - "overlap": { - "default": 128, - "description": "The overlap, in pixels, between adjacent tiles.", + "value": { + "default": false, + "description": "The boolean value", "field_kind": "input", "input": "any", - "minimum": 0, - "multipleOf": 8, - "orig_default": 128, + "orig_default": false, "orig_required": false, - "title": "Overlap", - "type": "integer" + "title": "Value", + "type": "boolean" }, "type": { - "const": "calculate_image_tiles_even_split", - "default": "calculate_image_tiles_even_split", + "const": "boolean", + "default": "boolean", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["tiles"], - "title": "Calculate Image Tiles Even Split", + "tags": ["primitives", "boolean"], + "title": "Boolean Primitive", "type": "object", - "version": "1.1.1", + "version": "1.0.1", "output": { - "$ref": "#/components/schemas/CalculateImageTilesOutput" + "$ref": "#/components/schemas/BooleanOutput" } }, - "CalculateImageTilesInvocation": { - "category": "tiles", - "class": "invocation", - "classification": "stable", - "description": "Calculate the coordinates and overlaps of tiles that cover a target image shape.", - "node_pack": "invokeai", + "BooleanOutput": { + "class": "output", + "description": "Base class for nodes that output a single boolean", "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", + "value": { + "description": "The output boolean", + "field_kind": "output", + "title": "Value", "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "ui_hidden": false }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", + "type": { + "const": "boolean_output", + "default": "boolean_output", "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" - }, - "image_width": { - "default": 1024, - "description": "The image width, in pixels, to calculate tiles for.", - "field_kind": "input", - "input": "any", - "minimum": 1, - "orig_default": 1024, - "orig_required": false, - "title": "Image Width", - "type": "integer" + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "value", "type", "type"], + "title": "BooleanOutput", + "type": "object" + }, + "BoundingBoxCollectionOutput": { + "class": "output", + "description": "Base class for nodes that output a collection of bounding boxes", + "properties": { + "collection": { + "description": "The output bounding boxes.", + "field_kind": "output", + "items": { + "$ref": "#/components/schemas/BoundingBoxField" + }, + "title": "Bounding Boxes", + "type": "array", + "ui_hidden": false }, - "image_height": { - "default": 1024, - "description": "The image height, in pixels, to calculate tiles for.", - "field_kind": "input", - "input": "any", - "minimum": 1, - "orig_default": 1024, - "orig_required": false, - "title": "Image Height", + "type": { + "const": "bounding_box_collection_output", + "default": "bounding_box_collection_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "collection", "type", "type"], + "title": "BoundingBoxCollectionOutput", + "type": "object" + }, + "BoundingBoxField": { + "description": "A bounding box primitive value.", + "properties": { + "x_min": { + "description": "The minimum x-coordinate of the bounding box (inclusive).", + "title": "X Min", "type": "integer" }, - "tile_width": { - "default": 576, - "description": "The tile width, in pixels.", - "field_kind": "input", - "input": "any", - "minimum": 1, - "orig_default": 576, - "orig_required": false, - "title": "Tile Width", + "x_max": { + "description": "The maximum x-coordinate of the bounding box (exclusive).", + "title": "X Max", "type": "integer" }, - "tile_height": { - "default": 576, - "description": "The tile height, in pixels.", - "field_kind": "input", - "input": "any", - "minimum": 1, - "orig_default": 576, - "orig_required": false, - "title": "Tile Height", + "y_min": { + "description": "The minimum y-coordinate of the bounding box (inclusive).", + "title": "Y Min", "type": "integer" }, - "overlap": { - "default": 128, - "description": "The target overlap, in pixels, between adjacent tiles. Adjacent tiles will overlap by at least this amount", - "field_kind": "input", - "input": "any", - "minimum": 0, - "orig_default": 128, - "orig_required": false, - "title": "Overlap", + "y_max": { + "description": "The maximum y-coordinate of the bounding box (exclusive).", + "title": "Y Max", "type": "integer" }, - "type": { - "const": "calculate_image_tiles", - "default": "calculate_image_tiles", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "score": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The score associated with the bounding box. In the range [0, 1]. This value is typically set when the bounding box was produced by a detector and has an associated confidence score.", + "title": "Score" } }, - "required": ["type", "id"], - "tags": ["tiles"], - "title": "Calculate Image Tiles", - "type": "object", - "version": "1.0.1", - "output": { - "$ref": "#/components/schemas/CalculateImageTilesOutput" - } + "required": ["x_min", "x_max", "y_min", "y_max"], + "title": "BoundingBoxField", + "type": "object" }, - "CalculateImageTilesMinimumOverlapInvocation": { - "category": "tiles", + "BoundingBoxInvocation": { + "category": "primitives", "class": "invocation", "classification": "stable", - "description": "Calculate the coordinates and overlaps of tiles that cover a target image shape.", + "description": "Create a bounding box manually by supplying box coordinates", "node_pack": "invokeai", "properties": { "id": { @@ -10691,436 +12202,607 @@ "title": "Use Cache", "type": "boolean" }, - "image_width": { - "default": 1024, - "description": "The image width, in pixels, to calculate tiles for.", - "field_kind": "input", - "input": "any", - "minimum": 1, - "orig_default": 1024, - "orig_required": false, - "title": "Image Width", - "type": "integer" - }, - "image_height": { - "default": 1024, - "description": "The image height, in pixels, to calculate tiles for.", + "x_min": { + "default": 0, + "description": "x-coordinate of the bounding box's top left vertex", "field_kind": "input", "input": "any", - "minimum": 1, - "orig_default": 1024, + "orig_default": 0, "orig_required": false, - "title": "Image Height", + "title": "X Min", "type": "integer" }, - "tile_width": { - "default": 576, - "description": "The tile width, in pixels.", + "y_min": { + "default": 0, + "description": "y-coordinate of the bounding box's top left vertex", "field_kind": "input", "input": "any", - "minimum": 1, - "orig_default": 576, + "orig_default": 0, "orig_required": false, - "title": "Tile Width", + "title": "Y Min", "type": "integer" }, - "tile_height": { - "default": 576, - "description": "The tile height, in pixels.", + "x_max": { + "default": 0, + "description": "x-coordinate of the bounding box's bottom right vertex", "field_kind": "input", "input": "any", - "minimum": 1, - "orig_default": 576, + "orig_default": 0, "orig_required": false, - "title": "Tile Height", + "title": "X Max", "type": "integer" }, - "min_overlap": { - "default": 128, - "description": "Minimum overlap between adjacent tiles, in pixels.", + "y_max": { + "default": 0, + "description": "y-coordinate of the bounding box's bottom right vertex", "field_kind": "input", "input": "any", - "minimum": 0, - "orig_default": 128, + "orig_default": 0, "orig_required": false, - "title": "Min Overlap", + "title": "Y Max", "type": "integer" }, "type": { - "const": "calculate_image_tiles_min_overlap", - "default": "calculate_image_tiles_min_overlap", + "const": "bounding_box", + "default": "bounding_box", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["tiles"], - "title": "Calculate Image Tiles Minimum Overlap", + "tags": ["primitives", "segmentation", "collection", "bounding box"], + "title": "Bounding Box", "type": "object", - "version": "1.0.1", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/CalculateImageTilesOutput" + "$ref": "#/components/schemas/BoundingBoxOutput" } }, - "CalculateImageTilesOutput": { + "BoundingBoxOutput": { "class": "output", + "description": "Base class for nodes that output a single bounding box", "properties": { - "tiles": { - "description": "The tiles coordinates that cover a particular image shape.", + "bounding_box": { + "$ref": "#/components/schemas/BoundingBoxField", + "description": "The output bounding box.", "field_kind": "output", - "items": { - "$ref": "#/components/schemas/Tile" - }, - "title": "Tiles", - "type": "array", "ui_hidden": false }, "type": { - "const": "calculate_image_tiles_output", - "default": "calculate_image_tiles_output", + "const": "bounding_box_output", + "default": "bounding_box_output", "field_kind": "node_attribute", "title": "type", "type": "string" } }, - "required": ["output_meta", "tiles", "type", "type"], - "title": "CalculateImageTilesOutput", + "required": ["output_meta", "bounding_box", "type", "type"], + "title": "BoundingBoxOutput", "type": "object" }, - "CancelAllExceptCurrentResult": { + "BulkDeleteModelsRequest": { "properties": { - "canceled": { - "type": "integer", - "title": "Canceled", - "description": "Number of queue items canceled" + "keys": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Keys", + "description": "List of model keys to delete" } }, "type": "object", - "required": ["canceled"], - "title": "CancelAllExceptCurrentResult", - "description": "Result of canceling all except current" + "required": ["keys"], + "title": "BulkDeleteModelsRequest", + "description": "Request body for bulk model deletion." }, - "CancelByBatchIDsResult": { + "BulkDeleteModelsResponse": { "properties": { - "canceled": { - "type": "integer", - "title": "Canceled", - "description": "Number of queue items canceled" + "deleted": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Deleted", + "description": "List of successfully deleted model keys" + }, + "failed": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Failed", + "description": "List of failed deletions with error messages" } }, "type": "object", - "required": ["canceled"], - "title": "CancelByBatchIDsResult", - "description": "Result of canceling by list of batch ids" + "required": ["deleted", "failed"], + "title": "BulkDeleteModelsResponse", + "description": "Response body for bulk model deletion." }, - "CancelByDestinationResult": { + "BulkDownloadCompleteEvent": { + "description": "Event model for bulk_download_complete", "properties": { - "canceled": { - "type": "integer", - "title": "Canceled", - "description": "Number of queue items canceled" + "timestamp": { + "description": "The timestamp of the event", + "title": "Timestamp", + "type": "integer" + }, + "bulk_download_id": { + "description": "The ID of the bulk image download", + "title": "Bulk Download Id", + "type": "string" + }, + "bulk_download_item_id": { + "description": "The ID of the bulk image download item", + "title": "Bulk Download Item Id", + "type": "string" + }, + "bulk_download_item_name": { + "description": "The name of the bulk image download item", + "title": "Bulk Download Item Name", + "type": "string" + }, + "user_id": { + "default": "system", + "description": "The ID of the user who initiated the download", + "title": "User Id", + "type": "string" + } + }, + "required": ["timestamp", "bulk_download_id", "bulk_download_item_id", "bulk_download_item_name", "user_id"], + "title": "BulkDownloadCompleteEvent", + "type": "object" + }, + "BulkDownloadErrorEvent": { + "description": "Event model for bulk_download_error", + "properties": { + "timestamp": { + "description": "The timestamp of the event", + "title": "Timestamp", + "type": "integer" + }, + "bulk_download_id": { + "description": "The ID of the bulk image download", + "title": "Bulk Download Id", + "type": "string" + }, + "bulk_download_item_id": { + "description": "The ID of the bulk image download item", + "title": "Bulk Download Item Id", + "type": "string" + }, + "bulk_download_item_name": { + "description": "The name of the bulk image download item", + "title": "Bulk Download Item Name", + "type": "string" + }, + "user_id": { + "default": "system", + "description": "The ID of the user who initiated the download", + "title": "User Id", + "type": "string" + }, + "error": { + "description": "The error message", + "title": "Error", + "type": "string" + } + }, + "required": [ + "timestamp", + "bulk_download_id", + "bulk_download_item_id", + "bulk_download_item_name", + "user_id", + "error" + ], + "title": "BulkDownloadErrorEvent", + "type": "object" + }, + "BulkDownloadStartedEvent": { + "description": "Event model for bulk_download_started", + "properties": { + "timestamp": { + "description": "The timestamp of the event", + "title": "Timestamp", + "type": "integer" + }, + "bulk_download_id": { + "description": "The ID of the bulk image download", + "title": "Bulk Download Id", + "type": "string" + }, + "bulk_download_item_id": { + "description": "The ID of the bulk image download item", + "title": "Bulk Download Item Id", + "type": "string" + }, + "bulk_download_item_name": { + "description": "The name of the bulk image download item", + "title": "Bulk Download Item Name", + "type": "string" + }, + "user_id": { + "default": "system", + "description": "The ID of the user who initiated the download", + "title": "User Id", + "type": "string" + } + }, + "required": ["timestamp", "bulk_download_id", "bulk_download_item_id", "bulk_download_item_name", "user_id"], + "title": "BulkDownloadStartedEvent", + "type": "object" + }, + "BulkReidentifyModelsRequest": { + "properties": { + "keys": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Keys", + "description": "List of model keys to reidentify" } }, "type": "object", - "required": ["canceled"], - "title": "CancelByDestinationResult", - "description": "Result of canceling by a destination" + "required": ["keys"], + "title": "BulkReidentifyModelsRequest", + "description": "Request body for bulk model reidentification." }, - "CannyEdgeDetectionInvocation": { - "category": "controlnet", - "class": "invocation", - "classification": "stable", - "description": "Geneartes an edge map using a cv2's Canny algorithm.", - "node_pack": "invokeai", + "BulkReidentifyModelsResponse": { "properties": { - "board": { + "succeeded": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Succeeded", + "description": "List of successfully reidentified model keys" + }, + "failed": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Failed", + "description": "List of failed reidentifications with error messages" + } + }, + "type": "object", + "required": ["succeeded", "failed"], + "title": "BulkReidentifyModelsResponse", + "description": "Response body for bulk model reidentification." + }, + "CLIPEmbed_Diffusers_G_Config": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "title": "Description", + "description": "Model description" }, - "metadata": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" - }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" - }, - "image": { + "cover_image": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The image to process", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Cover Image", + "description": "Url for image to preview model" }, - "low_threshold": { - "default": 100, - "description": "The low threshold of the Canny pixel gradient (0-255)", - "field_kind": "input", - "input": "any", - "maximum": 255, - "minimum": 0, - "orig_default": 100, - "orig_required": false, - "title": "Low Threshold", - "type": "integer" + "format": { + "type": "string", + "const": "diffusers", + "title": "Format", + "default": "diffusers" }, - "high_threshold": { - "default": 200, - "description": "The high threshold of the Canny pixel gradient (0-255)", - "field_kind": "input", - "input": "any", - "maximum": 255, - "minimum": 0, - "orig_default": 200, - "orig_required": false, - "title": "High Threshold", - "type": "integer" + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" + }, + "base": { + "type": "string", + "const": "any", + "title": "Base", + "default": "any" }, "type": { - "const": "canny_edge_detection", - "default": "canny_edge_detection", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["controlnet", "canny"], - "title": "Canny Edge Detection", - "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } - }, - "CanvasPasteBackInvocation": { - "category": "image", - "class": "invocation", - "classification": "stable", - "description": "Combines two images by using the mask provided. Intended for use on the Unified Canvas.", - "node_pack": "invokeai", - "properties": { - "board": { + "type": "string", + "const": "clip_embed", + "title": "Type", + "default": "clip_embed" + }, + "cpu_only": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "type": "boolean" }, { "type": "null" } ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "title": "Cpu Only", + "description": "Whether this model should run on CPU only" }, - "metadata": { + "variant": { + "type": "string", + "const": "gigantic", + "title": "Variant", + "default": "gigantic" + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "format", + "repo_variant", + "base", + "type", + "cpu_only", + "variant" + ], + "title": "CLIPEmbed_Diffusers_G_Config" + }, + "CLIPEmbed_Diffusers_L_Config": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "title": "Description", + "description": "Model description" }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" }, - "source_image": { + "source_api_response": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The source image", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "target_image": { + "cover_image": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The target image", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Cover Image", + "description": "Url for image to preview model" }, - "mask": { + "format": { + "type": "string", + "const": "diffusers", + "title": "Format", + "default": "diffusers" + }, + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" + }, + "base": { + "type": "string", + "const": "any", + "title": "Base", + "default": "any" + }, + "type": { + "type": "string", + "const": "clip_embed", + "title": "Type", + "default": "clip_embed" + }, + "cpu_only": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "boolean" }, { "type": "null" } ], - "default": null, - "description": "The mask to use when pasting", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Cpu Only", + "description": "Whether this model should run on CPU only" }, - "mask_blur": { - "default": 0, - "description": "The amount to blur the mask by", - "field_kind": "input", - "input": "any", - "minimum": 0, - "orig_default": 0, - "orig_required": false, - "title": "Mask Blur", + "variant": { + "type": "string", + "const": "large", + "title": "Variant", + "default": "large" + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "format", + "repo_variant", + "base", + "type", + "cpu_only", + "variant" + ], + "title": "CLIPEmbed_Diffusers_L_Config" + }, + "CLIPField": { + "properties": { + "tokenizer": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Info to load tokenizer submodel" + }, + "text_encoder": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Info to load text_encoder submodel" + }, + "skipped_layers": { + "description": "Number of skipped layers in text_encoder", + "title": "Skipped Layers", "type": "integer" }, + "loras": { + "description": "LoRAs to apply on model loading", + "items": { + "$ref": "#/components/schemas/LoRAField" + }, + "title": "Loras", + "type": "array" + } + }, + "required": ["tokenizer", "text_encoder", "skipped_layers", "loras"], + "title": "CLIPField", + "type": "object" + }, + "CLIPOutput": { + "class": "output", + "description": "Base class for invocations that output a CLIP field", + "properties": { + "clip": { + "$ref": "#/components/schemas/CLIPField", + "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", + "field_kind": "output", + "title": "CLIP", + "ui_hidden": false + }, "type": { - "const": "canvas_paste_back", - "default": "canvas_paste_back", + "const": "clip_output", + "default": "clip_output", "field_kind": "node_attribute", "title": "type", "type": "string" } }, - "required": ["type", "id"], - "tags": ["image", "combine"], - "title": "Canvas Paste Back", - "type": "object", - "version": "1.0.1", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } + "required": ["output_meta", "clip", "type", "type"], + "title": "CLIPOutput", + "type": "object" }, - "CanvasV2MaskAndCropInvocation": { - "category": "image", + "CLIPSkipInvocation": { + "category": "conditioning", "class": "invocation", - "classification": "deprecated", - "description": "Handles Canvas V2 image output masking and cropping", + "classification": "stable", + "description": "Skip layers in clip text_encoder model.", "node_pack": "invokeai", "properties": { - "board": { - "anyOf": [ - { - "$ref": "#/components/schemas/BoardField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false - }, - "metadata": { - "anyOf": [ - { - "$ref": "#/components/schemas/MetadataField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -11145,493 +12827,214 @@ "title": "Use Cache", "type": "boolean" }, - "source_image": { - "anyOf": [ - { - "$ref": "#/components/schemas/ImageField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The source image onto which the masked generated image is pasted. If omitted, the masked generated image is returned with transparency.", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false - }, - "generated_image": { - "anyOf": [ - { - "$ref": "#/components/schemas/ImageField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The image to apply the mask to", - "field_kind": "input", - "input": "any", - "orig_required": true - }, - "mask": { + "clip": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "$ref": "#/components/schemas/CLIPField" }, { "type": "null" } ], "default": null, - "description": "The mask to apply", + "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", "field_kind": "input", - "input": "any", - "orig_required": true + "input": "connection", + "orig_required": true, + "title": "CLIP" }, - "mask_blur": { + "skipped_layers": { "default": 0, - "description": "The amount to blur the mask by", + "description": "Number of layers to skip in text encoder", "field_kind": "input", "input": "any", "minimum": 0, "orig_default": 0, "orig_required": false, - "title": "Mask Blur", + "title": "Skipped Layers", "type": "integer" }, "type": { - "const": "canvas_v2_mask_and_crop", - "default": "canvas_v2_mask_and_crop", + "const": "clip_skip", + "default": "clip_skip", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["image", "mask", "id"], - "title": "Canvas V2 Mask and Crop", + "tags": ["clipskip", "clip", "skip"], + "title": "Apply CLIP Skip - SD1.5, SDXL", "type": "object", - "version": "1.0.0", + "version": "1.1.1", "output": { - "$ref": "#/components/schemas/ImageOutput" + "$ref": "#/components/schemas/CLIPSkipInvocationOutput" } }, - "CenterPadCropInvocation": { - "category": "image", - "class": "invocation", - "classification": "stable", - "description": "Pad or crop an image's sides from the center by specified pixels. Positive values are outside of the image.", - "node_pack": "invokeai", + "CLIPSkipInvocationOutput": { + "class": "output", + "description": "CLIP skip node output", "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" - }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" - }, - "image": { + "clip": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "$ref": "#/components/schemas/CLIPField" }, { "type": "null" } ], "default": null, - "description": "The image to crop", - "field_kind": "input", - "input": "any", - "orig_required": true - }, - "left": { - "default": 0, - "description": "Number of pixels to pad/crop from the left (negative values crop inwards, positive values pad outwards)", - "field_kind": "input", - "input": "any", - "orig_default": 0, - "orig_required": false, - "title": "Left", - "type": "integer" - }, - "right": { - "default": 0, - "description": "Number of pixels to pad/crop from the right (negative values crop inwards, positive values pad outwards)", - "field_kind": "input", - "input": "any", - "orig_default": 0, - "orig_required": false, - "title": "Right", - "type": "integer" - }, - "top": { - "default": 0, - "description": "Number of pixels to pad/crop from the top (negative values crop inwards, positive values pad outwards)", - "field_kind": "input", - "input": "any", - "orig_default": 0, - "orig_required": false, - "title": "Top", - "type": "integer" - }, - "bottom": { - "default": 0, - "description": "Number of pixels to pad/crop from the bottom (negative values crop inwards, positive values pad outwards)", - "field_kind": "input", - "input": "any", - "orig_default": 0, - "orig_required": false, - "title": "Bottom", - "type": "integer" + "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", + "field_kind": "output", + "title": "CLIP", + "ui_hidden": false }, "type": { - "const": "img_pad_crop", - "default": "img_pad_crop", + "const": "clip_skip_output", + "default": "clip_skip_output", "field_kind": "node_attribute", "title": "type", "type": "string" } }, - "required": ["type", "id"], - "tags": ["image", "pad", "crop"], - "title": "Center Pad or Crop Image", - "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } - }, - "Classification": { - "description": "The classification of an Invocation.\n- `Stable`: The invocation, including its inputs/outputs and internal logic, is stable. You may build workflows with it, having confidence that they will not break because of a change in this invocation.\n- `Beta`: The invocation is not yet stable, but is planned to be stable in the future. Workflows built around this invocation may break, but we are committed to supporting this invocation long-term.\n- `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation.\n- `Deprecated`: The invocation is deprecated and may be removed in a future version.\n- `Internal`: The invocation is not intended for use by end-users. It may be changed or removed at any time, but is exposed for users to play with.\n- `Special`: The invocation is a special case and does not fit into any of the other classifications.", - "enum": ["stable", "beta", "prototype", "deprecated", "internal", "special"], - "title": "Classification", - "type": "string" - }, - "ClearResult": { - "properties": { - "deleted": { - "type": "integer", - "title": "Deleted", - "description": "Number of queue items deleted" - } - }, - "type": "object", - "required": ["deleted"], - "title": "ClearResult", - "description": "Result of clearing the session queue" - }, - "ClipVariantType": { - "type": "string", - "enum": ["large", "gigantic"], - "title": "ClipVariantType", - "description": "Variant type." - }, - "CogView4ConditioningField": { - "description": "A conditioning tensor primitive value", - "properties": { - "conditioning_name": { - "description": "The name of conditioning tensor", - "title": "Conditioning Name", - "type": "string" - } - }, - "required": ["conditioning_name"], - "title": "CogView4ConditioningField", + "required": ["output_meta", "clip", "type", "type"], + "title": "CLIPSkipInvocationOutput", "type": "object" }, - "CogView4ConditioningOutput": { - "class": "output", - "description": "Base class for nodes that output a CogView text conditioning tensor.", + "CLIPVision_Diffusers_Config": { "properties": { - "conditioning": { - "$ref": "#/components/schemas/CogView4ConditioningField", - "description": "Conditioning tensor", - "field_kind": "output", - "ui_hidden": false + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." }, - "type": { - "const": "cogview4_conditioning_output", - "default": "cogview4_conditioning_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "conditioning", "type", "type"], - "title": "CogView4ConditioningOutput", - "type": "object" - }, - "CogView4DenoiseInvocation": { - "category": "image", - "class": "invocation", - "classification": "prototype", - "description": "Run the denoising process with a CogView4 model.", - "node_pack": "invokeai", - "properties": { - "board": { - "anyOf": [ - { - "$ref": "#/components/schemas/BoardField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." }, - "metadata": { + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "title": "Description", + "description": "Model description" }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" }, - "latents": { + "source_api_response": { "anyOf": [ { - "$ref": "#/components/schemas/LatentsField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Latents tensor", - "field_kind": "input", - "input": "connection", - "orig_default": null, - "orig_required": false + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "denoise_mask": { + "cover_image": { "anyOf": [ { - "$ref": "#/components/schemas/DenoiseMaskField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved.", - "field_kind": "input", - "input": "connection", - "orig_default": null, - "orig_required": false + "title": "Cover Image", + "description": "Url for image to preview model" }, - "denoising_start": { - "default": 0.0, - "description": "When to start denoising, expressed a percentage of total steps", - "field_kind": "input", - "input": "any", - "maximum": 1, - "minimum": 0, - "orig_default": 0.0, - "orig_required": false, - "title": "Denoising Start", - "type": "number" + "format": { + "type": "string", + "const": "diffusers", + "title": "Format", + "default": "diffusers" }, - "denoising_end": { - "default": 1.0, - "description": "When to stop denoising, expressed a percentage of total steps", - "field_kind": "input", - "input": "any", - "maximum": 1, - "minimum": 0, - "orig_default": 1.0, - "orig_required": false, - "title": "Denoising End", - "type": "number" + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" }, - "transformer": { - "anyOf": [ - { - "$ref": "#/components/schemas/TransformerField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "CogView4 model (Transformer) to load", - "field_kind": "input", - "input": "connection", - "orig_required": true, - "title": "Transformer" + "base": { + "type": "string", + "const": "any", + "title": "Base", + "default": "any" }, - "positive_conditioning": { - "anyOf": [ - { - "$ref": "#/components/schemas/CogView4ConditioningField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Positive conditioning tensor", - "field_kind": "input", - "input": "connection", - "orig_required": true + "type": { + "type": "string", + "const": "clip_vision", + "title": "Type", + "default": "clip_vision" }, - "negative_conditioning": { + "cpu_only": { "anyOf": [ { - "$ref": "#/components/schemas/CogView4ConditioningField" + "type": "boolean" }, { "type": "null" } ], - "default": null, - "description": "Negative conditioning tensor", - "field_kind": "input", - "input": "connection", - "orig_required": true - }, - "cfg_scale": { - "anyOf": [ - { - "type": "number" - }, - { - "items": { - "type": "number" - }, - "type": "array" - } - ], - "default": 3.5, - "description": "Classifier-Free Guidance scale", - "field_kind": "input", - "input": "any", - "orig_default": 3.5, - "orig_required": false, - "title": "CFG Scale" - }, - "width": { - "default": 1024, - "description": "Width of the generated image.", - "field_kind": "input", - "input": "any", - "multipleOf": 32, - "orig_default": 1024, - "orig_required": false, - "title": "Width", - "type": "integer" - }, - "height": { - "default": 1024, - "description": "Height of the generated image.", - "field_kind": "input", - "input": "any", - "multipleOf": 32, - "orig_default": 1024, - "orig_required": false, - "title": "Height", - "type": "integer" - }, - "steps": { - "default": 25, - "description": "Number of steps to run", - "exclusiveMinimum": 0, - "field_kind": "input", - "input": "any", - "orig_default": 25, - "orig_required": false, - "title": "Steps", - "type": "integer" - }, - "seed": { - "default": 0, - "description": "Randomness seed for reproducibility.", - "field_kind": "input", - "input": "any", - "orig_default": 0, - "orig_required": false, - "title": "Seed", - "type": "integer" - }, - "type": { - "const": "cogview4_denoise", - "default": "cogview4_denoise", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "title": "Cpu Only", + "description": "Whether this model should run on CPU only" } }, - "required": ["type", "id"], - "tags": ["image", "cogview4"], - "title": "Denoise - CogView4", "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/LatentsOutput" - } + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "format", + "repo_variant", + "base", + "type", + "cpu_only" + ], + "title": "CLIPVision_Diffusers_Config", + "description": "Model config for CLIPVision." }, - "CogView4ImageToLatentsInvocation": { - "category": "image", + "CV2InfillInvocation": { + "category": "inpaint", "class": "invocation", - "classification": "prototype", - "description": "Generates latents from an image.", + "classification": "stable", + "description": "Infills transparent areas of an image using OpenCV Inpainting", "node_pack": "invokeai", "properties": { "board": { @@ -11700,82 +13103,78 @@ } ], "default": null, - "description": "The image to encode.", + "description": "The image to process", "field_kind": "input", "input": "any", "orig_required": true }, - "vae": { - "anyOf": [ - { - "$ref": "#/components/schemas/VAEField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "VAE", - "field_kind": "input", - "input": "connection", - "orig_required": true - }, "type": { - "const": "cogview4_i2l", - "default": "cogview4_i2l", + "const": "infill_cv2", + "default": "infill_cv2", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["image", "latents", "vae", "i2l", "cogview4"], - "title": "Image to Latents - CogView4", + "tags": ["image", "inpaint"], + "title": "CV2 Infill", "type": "object", - "version": "1.0.0", + "version": "1.2.2", "output": { - "$ref": "#/components/schemas/LatentsOutput" + "$ref": "#/components/schemas/ImageOutput" } }, - "CogView4LatentsToImageInvocation": { - "category": "latents", - "class": "invocation", - "classification": "prototype", - "description": "Generates an image from latents.", - "node_pack": "invokeai", + "CacheStats": { "properties": { - "board": { - "anyOf": [ - { - "$ref": "#/components/schemas/BoardField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "hits": { + "type": "integer", + "title": "Hits", + "default": 0 }, - "metadata": { - "anyOf": [ - { - "$ref": "#/components/schemas/MetadataField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false + "misses": { + "type": "integer", + "title": "Misses", + "default": 0 + }, + "high_watermark": { + "type": "integer", + "title": "High Watermark", + "default": 0 + }, + "in_cache": { + "type": "integer", + "title": "In Cache", + "default": 0 + }, + "cleared": { + "type": "integer", + "title": "Cleared", + "default": 0 + }, + "cache_size": { + "type": "integer", + "title": "Cache Size", + "default": 0 }, + "loaded_model_sizes": { + "additionalProperties": { + "type": "integer" + }, + "type": "object", + "title": "Loaded Model Sizes" + } + }, + "type": "object", + "title": "CacheStats" + }, + "CalculateImageTilesEvenSplitInvocation": { + "category": "tiles", + "class": "invocation", + "classification": "stable", + "description": "Calculate the coordinates and overlaps of tiles that cover a target image shape.", + "node_pack": "invokeai", + "properties": { "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -11800,58 +13199,84 @@ "title": "Use Cache", "type": "boolean" }, - "latents": { - "anyOf": [ - { - "$ref": "#/components/schemas/LatentsField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Latents tensor", + "image_width": { + "default": 1024, + "description": "The image width, in pixels, to calculate tiles for.", "field_kind": "input", - "input": "connection", - "orig_required": true - }, - "vae": { - "anyOf": [ - { - "$ref": "#/components/schemas/VAEField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "VAE", + "input": "any", + "minimum": 1, + "orig_default": 1024, + "orig_required": false, + "title": "Image Width", + "type": "integer" + }, + "image_height": { + "default": 1024, + "description": "The image height, in pixels, to calculate tiles for.", "field_kind": "input", - "input": "connection", - "orig_required": true + "input": "any", + "minimum": 1, + "orig_default": 1024, + "orig_required": false, + "title": "Image Height", + "type": "integer" + }, + "num_tiles_x": { + "default": 2, + "description": "Number of tiles to divide image into on the x axis", + "field_kind": "input", + "input": "any", + "minimum": 1, + "orig_default": 2, + "orig_required": false, + "title": "Num Tiles X", + "type": "integer" + }, + "num_tiles_y": { + "default": 2, + "description": "Number of tiles to divide image into on the y axis", + "field_kind": "input", + "input": "any", + "minimum": 1, + "orig_default": 2, + "orig_required": false, + "title": "Num Tiles Y", + "type": "integer" + }, + "overlap": { + "default": 128, + "description": "The overlap, in pixels, between adjacent tiles.", + "field_kind": "input", + "input": "any", + "minimum": 0, + "multipleOf": 8, + "orig_default": 128, + "orig_required": false, + "title": "Overlap", + "type": "integer" }, "type": { - "const": "cogview4_l2i", - "default": "cogview4_l2i", + "const": "calculate_image_tiles_even_split", + "default": "calculate_image_tiles_even_split", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["latents", "image", "vae", "l2i", "cogview4"], - "title": "Latents to Image - CogView4", + "tags": ["tiles"], + "title": "Calculate Image Tiles Even Split", "type": "object", - "version": "1.0.0", + "version": "1.1.1", "output": { - "$ref": "#/components/schemas/ImageOutput" + "$ref": "#/components/schemas/CalculateImageTilesOutput" } }, - "CogView4ModelLoaderInvocation": { - "category": "model", + "CalculateImageTilesInvocation": { + "category": "tiles", "class": "invocation", - "classification": "prototype", - "description": "Loads a CogView4 base model, outputting its submodels.", + "classification": "stable", + "description": "Calculate the coordinates and overlaps of tiles that cover a target image shape.", "node_pack": "invokeai", "properties": { "id": { @@ -11878,154 +13303,83 @@ "title": "Use Cache", "type": "boolean" }, - "model": { - "$ref": "#/components/schemas/ModelIdentifierField", - "description": "CogView4 model (Transformer) to load", + "image_width": { + "default": 1024, + "description": "The image width, in pixels, to calculate tiles for.", "field_kind": "input", - "input": "direct", - "orig_required": true, - "ui_model_base": ["cogview4"], - "ui_model_type": ["main"] - }, - "type": { - "const": "cogview4_model_loader", - "default": "cogview4_model_loader", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["model", "type", "id"], - "tags": ["model", "cogview4"], - "title": "Main Model - CogView4", - "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/CogView4ModelLoaderOutput" - } - }, - "CogView4ModelLoaderOutput": { - "class": "output", - "description": "CogView4 base model loader output.", - "properties": { - "transformer": { - "$ref": "#/components/schemas/TransformerField", - "description": "Transformer", - "field_kind": "output", - "title": "Transformer", - "ui_hidden": false - }, - "glm_encoder": { - "$ref": "#/components/schemas/GlmEncoderField", - "description": "GLM (THUDM) tokenizer and text encoder", - "field_kind": "output", - "title": "GLM Encoder", - "ui_hidden": false - }, - "vae": { - "$ref": "#/components/schemas/VAEField", - "description": "VAE", - "field_kind": "output", - "title": "VAE", - "ui_hidden": false - }, - "type": { - "const": "cogview4_model_loader_output", - "default": "cogview4_model_loader_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "transformer", "glm_encoder", "vae", "type", "type"], - "title": "CogView4ModelLoaderOutput", - "type": "object" - }, - "CogView4TextEncoderInvocation": { - "category": "conditioning", - "class": "invocation", - "classification": "prototype", - "description": "Encodes and preps a prompt for a cogview4 image.", - "node_pack": "invokeai", - "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "input": "any", + "minimum": 1, + "orig_default": 1024, + "orig_required": false, + "title": "Image Width", + "type": "integer" }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "image_height": { + "default": 1024, + "description": "The image height, in pixels, to calculate tiles for.", + "field_kind": "input", + "input": "any", + "minimum": 1, + "orig_default": 1024, + "orig_required": false, + "title": "Image Height", + "type": "integer" }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "tile_width": { + "default": 576, + "description": "The tile width, in pixels.", + "field_kind": "input", + "input": "any", + "minimum": 1, + "orig_default": 576, + "orig_required": false, + "title": "Tile Width", + "type": "integer" }, - "prompt": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Text prompt to encode.", + "tile_height": { + "default": 576, + "description": "The tile height, in pixels.", "field_kind": "input", "input": "any", - "orig_required": true, - "title": "Prompt", - "ui_component": "textarea" + "minimum": 1, + "orig_default": 576, + "orig_required": false, + "title": "Tile Height", + "type": "integer" }, - "glm_encoder": { - "anyOf": [ - { - "$ref": "#/components/schemas/GlmEncoderField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "GLM (THUDM) tokenizer and text encoder", + "overlap": { + "default": 128, + "description": "The target overlap, in pixels, between adjacent tiles. Adjacent tiles will overlap by at least this amount", "field_kind": "input", - "input": "connection", - "orig_required": true, - "title": "GLM Encoder" + "input": "any", + "minimum": 0, + "orig_default": 128, + "orig_required": false, + "title": "Overlap", + "type": "integer" }, "type": { - "const": "cogview4_text_encoder", - "default": "cogview4_text_encoder", + "const": "calculate_image_tiles", + "default": "calculate_image_tiles", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["prompt", "conditioning", "cogview4"], - "title": "Prompt - CogView4", + "tags": ["tiles"], + "title": "Calculate Image Tiles", "type": "object", - "version": "1.0.0", + "version": "1.0.1", "output": { - "$ref": "#/components/schemas/CogView4ConditioningOutput" + "$ref": "#/components/schemas/CalculateImageTilesOutput" } }, - "CollectInvocation": { + "CalculateImageTilesMinimumOverlapInvocation": { + "category": "tiles", "class": "invocation", "classification": "stable", - "description": "Collects values into a collection", + "description": "Calculate the coordinates and overlaps of tiles that cover a target image shape.", "node_pack": "invokeai", "properties": { "id": { @@ -12052,105 +13406,147 @@ "title": "Use Cache", "type": "boolean" }, - "item": { - "anyOf": [ - {}, - { - "type": "null" - } - ], - "default": null, - "description": "The item to collect (all inputs must be of the same type)", + "image_width": { + "default": 1024, + "description": "The image width, in pixels, to calculate tiles for.", "field_kind": "input", - "input": "connection", - "orig_default": null, + "input": "any", + "minimum": 1, + "orig_default": 1024, "orig_required": false, - "title": "Collection Item", - "ui_type": "CollectionItemField" + "title": "Image Width", + "type": "integer" }, - "collection": { - "default": [], - "description": "The collection, will be provided on execution", + "image_height": { + "default": 1024, + "description": "The image height, in pixels, to calculate tiles for.", "field_kind": "input", "input": "any", - "items": {}, - "orig_default": [], + "minimum": 1, + "orig_default": 1024, "orig_required": false, - "title": "Collection", - "type": "array", - "ui_hidden": true + "title": "Image Height", + "type": "integer" + }, + "tile_width": { + "default": 576, + "description": "The tile width, in pixels.", + "field_kind": "input", + "input": "any", + "minimum": 1, + "orig_default": 576, + "orig_required": false, + "title": "Tile Width", + "type": "integer" + }, + "tile_height": { + "default": 576, + "description": "The tile height, in pixels.", + "field_kind": "input", + "input": "any", + "minimum": 1, + "orig_default": 576, + "orig_required": false, + "title": "Tile Height", + "type": "integer" + }, + "min_overlap": { + "default": 128, + "description": "Minimum overlap between adjacent tiles, in pixels.", + "field_kind": "input", + "input": "any", + "minimum": 0, + "orig_default": 128, + "orig_required": false, + "title": "Min Overlap", + "type": "integer" }, "type": { - "const": "collect", - "default": "collect", + "const": "calculate_image_tiles_min_overlap", + "default": "calculate_image_tiles_min_overlap", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "title": "CollectInvocation", + "tags": ["tiles"], + "title": "Calculate Image Tiles Minimum Overlap", "type": "object", - "version": "1.0.0", + "version": "1.0.1", "output": { - "$ref": "#/components/schemas/CollectInvocationOutput" + "$ref": "#/components/schemas/CalculateImageTilesOutput" } }, - "CollectInvocationOutput": { + "CalculateImageTilesOutput": { "class": "output", "properties": { - "collection": { - "description": "The collection of input items", + "tiles": { + "description": "The tiles coordinates that cover a particular image shape.", "field_kind": "output", - "items": {}, - "title": "Collection", + "items": { + "$ref": "#/components/schemas/Tile" + }, + "title": "Tiles", "type": "array", - "ui_hidden": false, - "ui_type": "CollectionField" + "ui_hidden": false }, "type": { - "const": "collect_output", - "default": "collect_output", + "const": "calculate_image_tiles_output", + "default": "calculate_image_tiles_output", "field_kind": "node_attribute", "title": "type", "type": "string" } }, - "required": ["output_meta", "collection", "type", "type"], - "title": "CollectInvocationOutput", + "required": ["output_meta", "tiles", "type", "type"], + "title": "CalculateImageTilesOutput", "type": "object" }, - "ColorCollectionOutput": { - "class": "output", - "description": "Base class for nodes that output a collection of colors", + "CancelAllExceptCurrentResult": { "properties": { - "collection": { - "description": "The output colors", - "field_kind": "output", - "items": { - "$ref": "#/components/schemas/ColorField" - }, - "title": "Collection", - "type": "array", - "ui_hidden": false - }, - "type": { - "const": "color_collection_output", - "default": "color_collection_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "canceled": { + "type": "integer", + "title": "Canceled", + "description": "Number of queue items canceled" } }, - "required": ["output_meta", "collection", "type", "type"], - "title": "ColorCollectionOutput", - "type": "object" + "type": "object", + "required": ["canceled"], + "title": "CancelAllExceptCurrentResult", + "description": "Result of canceling all except current" }, - "ColorCorrectInvocation": { - "category": "image", + "CancelByBatchIDsResult": { + "properties": { + "canceled": { + "type": "integer", + "title": "Canceled", + "description": "Number of queue items canceled" + } + }, + "type": "object", + "required": ["canceled"], + "title": "CancelByBatchIDsResult", + "description": "Result of canceling by list of batch ids" + }, + "CancelByDestinationResult": { + "properties": { + "canceled": { + "type": "integer", + "title": "Canceled", + "description": "Number of queue items canceled" + } + }, + "type": "object", + "required": ["canceled"], + "title": "CancelByDestinationResult", + "description": "Result of canceling by a destination" + }, + "CannyEdgeDetectionInvocation": { + "category": "controlnet", "class": "invocation", "classification": "stable", - "description": "Matches the color histogram of a base image to a reference image, optionally\nusing a mask to only color-correct certain regions of the base image.", + "description": "Geneartes an edge map using a cv2's Canny algorithm.", "node_pack": "invokeai", "properties": { "board": { @@ -12209,22 +13605,7 @@ "title": "Use Cache", "type": "boolean" }, - "base_image": { - "anyOf": [ - { - "$ref": "#/components/schemas/ImageField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The image to color-correct", - "field_kind": "input", - "input": "any", - "orig_required": true - }, - "color_reference": { + "image": { "anyOf": [ { "$ref": "#/components/schemas/ImageField" @@ -12234,103 +13615,64 @@ } ], "default": null, - "description": "Reference image for color-correction", + "description": "The image to process", "field_kind": "input", "input": "any", "orig_required": true }, - "mask": { - "anyOf": [ - { - "$ref": "#/components/schemas/ImageField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Optional mask to limit color correction area", + "low_threshold": { + "default": 100, + "description": "The low threshold of the Canny pixel gradient (0-255)", "field_kind": "input", "input": "any", - "orig_default": null, - "orig_required": false + "maximum": 255, + "minimum": 0, + "orig_default": 100, + "orig_required": false, + "title": "Low Threshold", + "type": "integer" }, - "colorspace": { - "default": "RGB", - "description": "Colorspace in which to apply histogram matching", - "enum": ["RGB", "YCbCr", "YCbCr-Chroma", "YCbCr-Luma"], + "high_threshold": { + "default": 200, + "description": "The high threshold of the Canny pixel gradient (0-255)", "field_kind": "input", "input": "any", - "orig_default": "RGB", + "maximum": 255, + "minimum": 0, + "orig_default": 200, "orig_required": false, - "title": "Color Space", - "type": "string" + "title": "High Threshold", + "type": "integer" }, "type": { - "const": "color_correct", - "default": "color_correct", + "const": "canny_edge_detection", + "default": "canny_edge_detection", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["image", "color"], - "title": "Color Correct", + "tags": ["controlnet", "canny"], + "title": "Canny Edge Detection", "type": "object", - "version": "2.0.0", + "version": "1.0.0", "output": { "$ref": "#/components/schemas/ImageOutput" } }, - "ColorField": { - "description": "A color primitive field", + "CanvasOutputInvocation": { + "category": "canvas", + "class": "invocation", + "classification": "stable", + "description": "Outputs an image to the canvas staging area.\n\nUse this node in workflows intended for canvas workflow integration.\nConnect the final image of your workflow to this node to send it\nto the canvas staging area when run via 'Run Workflow on Canvas'.", + "node_pack": "invokeai", "properties": { - "r": { - "description": "The red component", - "maximum": 255, - "minimum": 0, - "title": "R", - "type": "integer" - }, - "g": { - "description": "The green component", - "maximum": 255, - "minimum": 0, - "title": "G", - "type": "integer" - }, - "b": { - "description": "The blue component", - "maximum": 255, - "minimum": 0, - "title": "B", - "type": "integer" - }, - "a": { - "description": "The alpha component", - "maximum": 255, - "minimum": 0, - "title": "A", - "type": "integer" - } - }, - "required": ["r", "g", "b", "a"], - "title": "ColorField", - "type": "object" - }, - "ColorInvocation": { - "category": "primitives", - "class": "invocation", - "classification": "stable", - "description": "A color primitive value", - "node_pack": "invokeai", - "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, "is_intermediate": { "default": false, @@ -12344,53 +13686,49 @@ "ui_type": "IsIntermediate" }, "use_cache": { - "default": true, + "default": false, "description": "Whether or not to use the cache", "field_kind": "node_attribute", "title": "Use Cache", "type": "boolean" }, - "color": { - "$ref": "#/components/schemas/ColorField", - "default": { - "r": 0, - "g": 0, - "b": 0, - "a": 255 - }, - "description": "The color value", + "image": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The image to process", "field_kind": "input", "input": "any", - "orig_default": { - "a": 255, - "b": 0, - "g": 0, - "r": 0 - }, - "orig_required": false + "orig_required": true }, "type": { - "const": "color", - "default": "color", + "const": "canvas_output", + "default": "canvas_output", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["primitives", "color"], - "title": "Color Primitive", + "tags": ["canvas", "output", "image"], + "title": "Canvas Output", "type": "object", - "version": "1.0.1", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/ColorOutput" + "$ref": "#/components/schemas/ImageOutput" } }, - "ColorMapInvocation": { - "category": "controlnet", + "CanvasPasteBackInvocation": { + "category": "image", "class": "invocation", "classification": "stable", - "description": "Generates a color map from the provided image.", + "description": "Combines two images by using the mask provided. Intended for use on the Unified Canvas.", "node_pack": "invokeai", "properties": { "board": { @@ -12449,7 +13787,7 @@ "title": "Use Cache", "type": "boolean" }, - "image": { + "source_image": { "anyOf": [ { "$ref": "#/components/schemas/ImageField" @@ -12459,68 +13797,108 @@ } ], "default": null, - "description": "The image to process", + "description": "The source image", "field_kind": "input", "input": "any", "orig_required": true }, - "tile_size": { - "default": 64, - "description": "Tile size", + "target_image": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The target image", "field_kind": "input", "input": "any", - "minimum": 1, - "orig_default": 64, + "orig_required": true + }, + "mask": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The mask to use when pasting", + "field_kind": "input", + "input": "any", + "orig_required": true + }, + "mask_blur": { + "default": 0, + "description": "The amount to blur the mask by", + "field_kind": "input", + "input": "any", + "minimum": 0, + "orig_default": 0, "orig_required": false, - "title": "Tile Size", + "title": "Mask Blur", "type": "integer" }, "type": { - "const": "color_map", - "default": "color_map", + "const": "canvas_paste_back", + "default": "canvas_paste_back", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["controlnet"], - "title": "Color Map", + "tags": ["image", "combine"], + "title": "Canvas Paste Back", "type": "object", - "version": "1.0.0", + "version": "1.0.1", "output": { "$ref": "#/components/schemas/ImageOutput" } }, - "ColorOutput": { - "class": "output", - "description": "Base class for nodes that output a single color", - "properties": { - "color": { - "$ref": "#/components/schemas/ColorField", - "description": "The output color", - "field_kind": "output", - "ui_hidden": false - }, - "type": { - "const": "color_output", - "default": "color_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "color", "type", "type"], - "title": "ColorOutput", - "type": "object" - }, - "CompelInvocation": { - "category": "conditioning", + "CanvasV2MaskAndCropInvocation": { + "category": "image", "class": "invocation", - "classification": "stable", - "description": "Parse prompt using compel package to conditioning.", + "classification": "deprecated", + "description": "Handles Canvas V2 image output masking and cropping", "node_pack": "invokeai", "properties": { + "board": { + "anyOf": [ + { + "$ref": "#/components/schemas/BoardField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false + }, + "metadata": { + "anyOf": [ + { + "$ref": "#/components/schemas/MetadataField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -12545,71 +13923,85 @@ "title": "Use Cache", "type": "boolean" }, - "prompt": { - "default": "", - "description": "Prompt to be parsed by Compel to create a conditioning tensor", + "source_image": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The source image onto which the masked generated image is pasted. If omitted, the masked generated image is returned with transparency.", "field_kind": "input", "input": "any", - "orig_default": "", - "orig_required": false, - "title": "Prompt", - "type": "string", - "ui_component": "textarea" + "orig_default": null, + "orig_required": false }, - "clip": { + "generated_image": { "anyOf": [ { - "$ref": "#/components/schemas/CLIPField" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], "default": null, - "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", + "description": "The image to apply the mask to", "field_kind": "input", "input": "any", - "orig_required": true, - "title": "CLIP" + "orig_required": true }, "mask": { "anyOf": [ { - "$ref": "#/components/schemas/TensorField" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], "default": null, - "description": "A mask defining the region that this conditioning prompt applies to.", + "description": "The mask to apply", "field_kind": "input", "input": "any", - "orig_default": null, - "orig_required": false + "orig_required": true + }, + "mask_blur": { + "default": 0, + "description": "The amount to blur the mask by", + "field_kind": "input", + "input": "any", + "minimum": 0, + "orig_default": 0, + "orig_required": false, + "title": "Mask Blur", + "type": "integer" }, "type": { - "const": "compel", - "default": "compel", + "const": "canvas_v2_mask_and_crop", + "default": "canvas_v2_mask_and_crop", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["prompt", "compel"], - "title": "Prompt - SD1.5", + "tags": ["image", "mask", "id"], + "title": "Canvas V2 Mask and Crop", "type": "object", - "version": "1.2.1", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/ConditioningOutput" + "$ref": "#/components/schemas/ImageOutput" } }, - "ConditioningCollectionInvocation": { - "category": "primitives", + "CenterPadCropInvocation": { + "category": "image", "class": "invocation", "classification": "stable", - "description": "A collection of conditioning tensor primitive values", + "description": "Pad or crop an image's sides from the center by specified pixels. Positive values are outside of the image.", "node_pack": "invokeai", "properties": { "id": { @@ -12636,177 +14028,143 @@ "title": "Use Cache", "type": "boolean" }, - "collection": { - "default": [], - "description": "The collection of conditioning tensors", + "image": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The image to crop", "field_kind": "input", "input": "any", - "items": { - "$ref": "#/components/schemas/ConditioningField" - }, - "orig_default": [], + "orig_required": true + }, + "left": { + "default": 0, + "description": "Number of pixels to pad/crop from the left (negative values crop inwards, positive values pad outwards)", + "field_kind": "input", + "input": "any", + "orig_default": 0, "orig_required": false, - "title": "Collection", - "type": "array" + "title": "Left", + "type": "integer" + }, + "right": { + "default": 0, + "description": "Number of pixels to pad/crop from the right (negative values crop inwards, positive values pad outwards)", + "field_kind": "input", + "input": "any", + "orig_default": 0, + "orig_required": false, + "title": "Right", + "type": "integer" + }, + "top": { + "default": 0, + "description": "Number of pixels to pad/crop from the top (negative values crop inwards, positive values pad outwards)", + "field_kind": "input", + "input": "any", + "orig_default": 0, + "orig_required": false, + "title": "Top", + "type": "integer" + }, + "bottom": { + "default": 0, + "description": "Number of pixels to pad/crop from the bottom (negative values crop inwards, positive values pad outwards)", + "field_kind": "input", + "input": "any", + "orig_default": 0, + "orig_required": false, + "title": "Bottom", + "type": "integer" }, "type": { - "const": "conditioning_collection", - "default": "conditioning_collection", + "const": "img_pad_crop", + "default": "img_pad_crop", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["primitives", "conditioning", "collection"], - "title": "Conditioning Collection Primitive", + "tags": ["image", "pad", "crop"], + "title": "Center Pad or Crop Image", "type": "object", - "version": "1.0.2", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/ConditioningCollectionOutput" + "$ref": "#/components/schemas/ImageOutput" } }, - "ConditioningCollectionOutput": { - "class": "output", - "description": "Base class for nodes that output a collection of conditioning tensors", + "Classification": { + "description": "The classification of an Invocation.\n- `Stable`: The invocation, including its inputs/outputs and internal logic, is stable. You may build workflows with it, having confidence that they will not break because of a change in this invocation.\n- `Beta`: The invocation is not yet stable, but is planned to be stable in the future. Workflows built around this invocation may break, but we are committed to supporting this invocation long-term.\n- `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation.\n- `Deprecated`: The invocation is deprecated and may be removed in a future version.\n- `Internal`: The invocation is not intended for use by end-users. It may be changed or removed at any time, but is exposed for users to play with.\n- `Special`: The invocation is a special case and does not fit into any of the other classifications.", + "enum": ["stable", "beta", "prototype", "deprecated", "internal", "special"], + "title": "Classification", + "type": "string" + }, + "ClearResult": { "properties": { - "collection": { - "description": "The output conditioning tensors", - "field_kind": "output", - "items": { - "$ref": "#/components/schemas/ConditioningField" - }, - "title": "Collection", - "type": "array", - "ui_hidden": false - }, - "type": { - "const": "conditioning_collection_output", - "default": "conditioning_collection_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "deleted": { + "type": "integer", + "title": "Deleted", + "description": "Number of queue items deleted" } }, - "required": ["output_meta", "collection", "type", "type"], - "title": "ConditioningCollectionOutput", - "type": "object" + "type": "object", + "required": ["deleted"], + "title": "ClearResult", + "description": "Result of clearing the session queue" }, - "ConditioningField": { + "ClipVariantType": { + "type": "string", + "enum": ["large", "gigantic"], + "title": "ClipVariantType", + "description": "Variant type." + }, + "CogView4ConditioningField": { "description": "A conditioning tensor primitive value", "properties": { "conditioning_name": { "description": "The name of conditioning tensor", "title": "Conditioning Name", "type": "string" - }, - "mask": { - "anyOf": [ - { - "$ref": "#/components/schemas/TensorField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The mask associated with this conditioning tensor. Excluded regions should be set to False, included regions should be set to True." } }, "required": ["conditioning_name"], - "title": "ConditioningField", + "title": "CogView4ConditioningField", "type": "object" }, - "ConditioningInvocation": { - "category": "primitives", - "class": "invocation", - "classification": "stable", - "description": "A conditioning tensor primitive value", - "node_pack": "invokeai", - "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" - }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" - }, - "conditioning": { - "anyOf": [ - { - "$ref": "#/components/schemas/ConditioningField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Conditioning tensor", - "field_kind": "input", - "input": "connection", - "orig_required": true - }, - "type": { - "const": "conditioning", - "default": "conditioning", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["primitives", "conditioning"], - "title": "Conditioning Primitive", - "type": "object", - "version": "1.0.1", - "output": { - "$ref": "#/components/schemas/ConditioningOutput" - } - }, - "ConditioningOutput": { + "CogView4ConditioningOutput": { "class": "output", - "description": "Base class for nodes that output a single conditioning tensor", + "description": "Base class for nodes that output a CogView text conditioning tensor.", "properties": { "conditioning": { - "$ref": "#/components/schemas/ConditioningField", + "$ref": "#/components/schemas/CogView4ConditioningField", "description": "Conditioning tensor", "field_kind": "output", "ui_hidden": false }, "type": { - "const": "conditioning_output", - "default": "conditioning_output", + "const": "cogview4_conditioning_output", + "default": "cogview4_conditioning_output", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["output_meta", "conditioning", "type", "type"], - "title": "ConditioningOutput", + "title": "CogView4ConditioningOutput", "type": "object" }, - "ContentShuffleInvocation": { - "category": "controlnet", + "CogView4DenoiseInvocation": { + "category": "image", "class": "invocation", - "classification": "stable", - "description": "Shuffles the image, similar to a 'liquify' filter.", + "classification": "prototype", + "description": "Run the denoising process with a CogView4 model.", "node_pack": "invokeai", "properties": { "board": { @@ -12865,79 +14223,109 @@ "title": "Use Cache", "type": "boolean" }, - "image": { + "latents": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "$ref": "#/components/schemas/LatentsField" }, { "type": "null" } ], "default": null, - "description": "The image to process", + "description": "Latents tensor", "field_kind": "input", - "input": "any", - "orig_required": true + "input": "connection", + "orig_default": null, + "orig_required": false }, - "scale_factor": { - "default": 256, - "description": "The scale factor used for the shuffle", + "denoise_mask": { + "anyOf": [ + { + "$ref": "#/components/schemas/DenoiseMaskField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved.", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false + }, + "denoising_start": { + "default": 0.0, + "description": "When to start denoising, expressed a percentage of total steps", "field_kind": "input", "input": "any", + "maximum": 1, "minimum": 0, - "orig_default": 256, + "orig_default": 0.0, "orig_required": false, - "title": "Scale Factor", - "type": "integer" + "title": "Denoising Start", + "type": "number" }, - "type": { - "const": "content_shuffle", - "default": "content_shuffle", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["controlnet", "normal"], - "title": "Content Shuffle", - "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } - }, - "ControlAdapterDefaultSettings": { - "properties": { - "preprocessor": { + "denoising_end": { + "default": 1.0, + "description": "When to stop denoising, expressed a percentage of total steps", + "field_kind": "input", + "input": "any", + "maximum": 1, + "minimum": 0, + "orig_default": 1.0, + "orig_required": false, + "title": "Denoising End", + "type": "number" + }, + "transformer": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/TransformerField" }, { "type": "null" } ], - "title": "Preprocessor" - } - }, - "additionalProperties": false, - "type": "object", - "required": ["preprocessor"], - "title": "ControlAdapterDefaultSettings" - }, - "ControlField": { - "properties": { - "image": { - "$ref": "#/components/schemas/ImageField", - "description": "The control image" + "default": null, + "description": "CogView4 model (Transformer) to load", + "field_kind": "input", + "input": "connection", + "orig_required": true, + "title": "Transformer" }, - "control_model": { - "$ref": "#/components/schemas/ModelIdentifierField", - "description": "The ControlNet model to use" + "positive_conditioning": { + "anyOf": [ + { + "$ref": "#/components/schemas/CogView4ConditioningField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Positive conditioning tensor", + "field_kind": "input", + "input": "connection", + "orig_required": true }, - "control_weight": { + "negative_conditioning": { + "anyOf": [ + { + "$ref": "#/components/schemas/CogView4ConditioningField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Negative conditioning tensor", + "field_kind": "input", + "input": "connection", + "orig_required": true + }, + "cfg_scale": { "anyOf": [ { "type": "number" @@ -12949,209 +14337,223 @@ "type": "array" } ], - "default": 1, - "description": "The weight given to the ControlNet", - "title": "Control Weight" - }, - "begin_step_percent": { - "default": 0, - "description": "When the ControlNet is first applied (% of total steps)", - "maximum": 1, - "minimum": 0, - "title": "Begin Step Percent", - "type": "number" + "default": 3.5, + "description": "Classifier-Free Guidance scale", + "field_kind": "input", + "input": "any", + "orig_default": 3.5, + "orig_required": false, + "title": "CFG Scale" }, - "end_step_percent": { - "default": 1, - "description": "When the ControlNet is last applied (% of total steps)", - "maximum": 1, - "minimum": 0, - "title": "End Step Percent", - "type": "number" + "width": { + "default": 1024, + "description": "Width of the generated image.", + "field_kind": "input", + "input": "any", + "multipleOf": 32, + "orig_default": 1024, + "orig_required": false, + "title": "Width", + "type": "integer" }, - "control_mode": { - "default": "balanced", - "description": "The control mode to use", - "enum": ["balanced", "more_prompt", "more_control", "unbalanced"], - "title": "Control Mode", - "type": "string" + "height": { + "default": 1024, + "description": "Height of the generated image.", + "field_kind": "input", + "input": "any", + "multipleOf": 32, + "orig_default": 1024, + "orig_required": false, + "title": "Height", + "type": "integer" }, - "resize_mode": { - "default": "just_resize", - "description": "The resize mode to use", - "enum": ["just_resize", "crop_resize", "fill_resize", "just_resize_simple"], - "title": "Resize Mode", - "type": "string" - } - }, - "required": ["image", "control_model"], - "title": "ControlField", - "type": "object" - }, - "ControlLoRAField": { - "properties": { - "lora": { - "$ref": "#/components/schemas/ModelIdentifierField", - "description": "Info to load lora model" + "steps": { + "default": 25, + "description": "Number of steps to run", + "exclusiveMinimum": 0, + "field_kind": "input", + "input": "any", + "orig_default": 25, + "orig_required": false, + "title": "Steps", + "type": "integer" }, - "weight": { - "description": "Weight to apply to lora model", - "title": "Weight", - "type": "number" + "seed": { + "default": 0, + "description": "Randomness seed for reproducibility.", + "field_kind": "input", + "input": "any", + "orig_default": 0, + "orig_required": false, + "title": "Seed", + "type": "integer" }, - "img": { - "$ref": "#/components/schemas/ImageField", - "description": "Image to use in structural conditioning" + "type": { + "const": "cogview4_denoise", + "default": "cogview4_denoise", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, - "required": ["lora", "weight", "img"], - "title": "ControlLoRAField", - "type": "object" + "required": ["type", "id"], + "tags": ["image", "cogview4"], + "title": "Denoise - CogView4", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/LatentsOutput" + } }, - "ControlLoRA_LyCORIS_FLUX_Config": { + "CogView4ImageToLatentsInvocation": { + "category": "image", + "class": "invocation", + "classification": "prototype", + "description": "Generates latents from an image.", + "node_pack": "invokeai", "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." - }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." - }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." + "board": { + "anyOf": [ + { + "$ref": "#/components/schemas/BoardField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "description": { + "metadata": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "title": "Description", - "description": "Model description" + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "source_api_response": { + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "image": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "default": null, + "description": "The image to encode.", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "cover_image": { + "vae": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/VAEField" }, { "type": "null" } ], - "title": "Cover Image", - "description": "Url for image to preview model" + "default": null, + "description": "VAE", + "field_kind": "input", + "input": "connection", + "orig_required": true }, - "default_settings": { + "type": { + "const": "cogview4_i2l", + "default": "cogview4_i2l", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["image", "latents", "vae", "i2l", "cogview4"], + "title": "Image to Latents - CogView4", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/LatentsOutput" + } + }, + "CogView4LatentsToImageInvocation": { + "category": "latents", + "class": "invocation", + "classification": "prototype", + "description": "Generates an image from latents.", + "node_pack": "invokeai", + "properties": { + "board": { "anyOf": [ { - "$ref": "#/components/schemas/ControlAdapterDefaultSettings" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } - ] - }, - "base": { - "type": "string", - "const": "flux", - "title": "Base", - "default": "flux" - }, - "type": { - "type": "string", - "const": "control_lora", - "title": "Type", - "default": "control_lora" - }, - "format": { - "type": "string", - "const": "lycoris", - "title": "Format", - "default": "lycoris" + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "trigger_phrases": { + "metadata": { "anyOf": [ { - "items": { - "type": "string" - }, - "type": "array", - "uniqueItems": true + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "title": "Trigger Phrases" - } - }, - "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "default_settings", - "base", - "type", - "format", - "trigger_phrases" - ], - "title": "ControlLoRA_LyCORIS_FLUX_Config", - "description": "Model config for Control LoRA models." - }, - "ControlNetInvocation": { - "category": "controlnet", - "class": "invocation", - "classification": "stable", - "description": "Collects ControlNet info to pass to other nodes", - "node_pack": "invokeai", - "properties": { + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -13176,224 +14578,179 @@ "title": "Use Cache", "type": "boolean" }, - "image": { + "latents": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "$ref": "#/components/schemas/LatentsField" }, { "type": "null" } ], "default": null, - "description": "The control image", + "description": "Latents tensor", "field_kind": "input", - "input": "any", + "input": "connection", "orig_required": true }, - "control_model": { + "vae": { "anyOf": [ { - "$ref": "#/components/schemas/ModelIdentifierField" + "$ref": "#/components/schemas/VAEField" }, { "type": "null" } ], "default": null, - "description": "ControlNet model to load", - "field_kind": "input", - "input": "any", - "orig_required": true, - "ui_model_base": ["sd-1", "sd-2", "sdxl"], - "ui_model_type": ["controlnet"] - }, - "control_weight": { - "anyOf": [ - { - "type": "number" - }, - { - "items": { - "type": "number" - }, - "type": "array" - } - ], - "default": 1.0, - "description": "The weight given to the ControlNet", - "field_kind": "input", - "ge": -1, - "input": "any", - "le": 2, - "orig_default": 1.0, - "orig_required": false, - "title": "Control Weight" - }, - "begin_step_percent": { - "default": 0, - "description": "When the ControlNet is first applied (% of total steps)", - "field_kind": "input", - "input": "any", - "maximum": 1, - "minimum": 0, - "orig_default": 0, - "orig_required": false, - "title": "Begin Step Percent", - "type": "number" - }, - "end_step_percent": { - "default": 1, - "description": "When the ControlNet is last applied (% of total steps)", - "field_kind": "input", - "input": "any", - "maximum": 1, - "minimum": 0, - "orig_default": 1, - "orig_required": false, - "title": "End Step Percent", - "type": "number" - }, - "control_mode": { - "default": "balanced", - "description": "The control mode used", - "enum": ["balanced", "more_prompt", "more_control", "unbalanced"], - "field_kind": "input", - "input": "any", - "orig_default": "balanced", - "orig_required": false, - "title": "Control Mode", - "type": "string" - }, - "resize_mode": { - "default": "just_resize", - "description": "The resize mode used", - "enum": ["just_resize", "crop_resize", "fill_resize", "just_resize_simple"], + "description": "VAE", "field_kind": "input", - "input": "any", - "orig_default": "just_resize", - "orig_required": false, - "title": "Resize Mode", - "type": "string" + "input": "connection", + "orig_required": true }, "type": { - "const": "controlnet", - "default": "controlnet", + "const": "cogview4_l2i", + "default": "cogview4_l2i", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["controlnet"], - "title": "ControlNet - SD1.5, SD2, SDXL", + "tags": ["latents", "image", "vae", "l2i", "cogview4"], + "title": "Latents to Image - CogView4", "type": "object", - "version": "1.1.3", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/ControlOutput" + "$ref": "#/components/schemas/ImageOutput" } }, - "ControlNetMetadataField": { + "CogView4ModelLoaderInvocation": { + "category": "model", + "class": "invocation", + "classification": "prototype", + "description": "Loads a CogView4 base model, outputting its submodels.", + "node_pack": "invokeai", "properties": { - "image": { - "$ref": "#/components/schemas/ImageField", - "description": "The control image" + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "processed_image": { - "anyOf": [ - { - "$ref": "#/components/schemas/ImageField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The control image, after processing." + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "control_model": { - "$ref": "#/components/schemas/ModelIdentifierField", - "description": "The ControlNet model to use" + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "control_weight": { - "anyOf": [ - { - "type": "number" - }, - { - "items": { - "type": "number" - }, - "type": "array" - } - ], - "default": 1, - "description": "The weight given to the ControlNet", - "title": "Control Weight" + "model": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "CogView4 model (Transformer) to load", + "field_kind": "input", + "input": "direct", + "orig_required": true, + "ui_model_base": ["cogview4"], + "ui_model_type": ["main"] }, - "begin_step_percent": { - "default": 0, - "description": "When the ControlNet is first applied (% of total steps)", - "maximum": 1, - "minimum": 0, - "title": "Begin Step Percent", - "type": "number" + "type": { + "const": "cogview4_model_loader", + "default": "cogview4_model_loader", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["model", "type", "id"], + "tags": ["model", "cogview4"], + "title": "Main Model - CogView4", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/CogView4ModelLoaderOutput" + } + }, + "CogView4ModelLoaderOutput": { + "class": "output", + "description": "CogView4 base model loader output.", + "properties": { + "transformer": { + "$ref": "#/components/schemas/TransformerField", + "description": "Transformer", + "field_kind": "output", + "title": "Transformer", + "ui_hidden": false }, - "end_step_percent": { - "default": 1, - "description": "When the ControlNet is last applied (% of total steps)", - "maximum": 1, - "minimum": 0, - "title": "End Step Percent", - "type": "number" + "glm_encoder": { + "$ref": "#/components/schemas/GlmEncoderField", + "description": "GLM (THUDM) tokenizer and text encoder", + "field_kind": "output", + "title": "GLM Encoder", + "ui_hidden": false }, - "control_mode": { - "default": "balanced", - "description": "The control mode to use", - "enum": ["balanced", "more_prompt", "more_control", "unbalanced"], - "title": "Control Mode", - "type": "string" + "vae": { + "$ref": "#/components/schemas/VAEField", + "description": "VAE", + "field_kind": "output", + "title": "VAE", + "ui_hidden": false }, - "resize_mode": { - "default": "just_resize", - "description": "The resize mode to use", - "enum": ["just_resize", "crop_resize", "fill_resize", "just_resize_simple"], - "title": "Resize Mode", + "type": { + "const": "cogview4_model_loader_output", + "default": "cogview4_model_loader_output", + "field_kind": "node_attribute", + "title": "type", "type": "string" } }, - "required": ["image", "control_model"], - "title": "ControlNetMetadataField", + "required": ["output_meta", "transformer", "glm_encoder", "vae", "type", "type"], + "title": "CogView4ModelLoaderOutput", "type": "object" }, - "ControlNet_Checkpoint_FLUX_Config": { + "CogView4TextEncoderInvocation": { + "category": "conditioning", + "class": "invocation", + "classification": "prototype", + "description": "Encodes and preps a prompt for a cogview4 image.", + "node_pack": "invokeai", "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "description": { + "prompt": { "anyOf": [ { "type": "string" @@ -13402,637 +14759,1034 @@ "type": "null" } ], - "title": "Description", - "description": "Model description" - }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." - }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" + "default": null, + "description": "Text prompt to encode.", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "Prompt", + "ui_component": "textarea" }, - "source_api_response": { + "glm_encoder": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/GlmEncoderField" }, { "type": "null" } ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "default": null, + "description": "GLM (THUDM) tokenizer and text encoder", + "field_kind": "input", + "input": "connection", + "orig_required": true, + "title": "GLM Encoder" }, - "cover_image": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Cover Image", - "description": "Url for image to preview model" + "type": { + "const": "cogview4_text_encoder", + "default": "cogview4_text_encoder", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["prompt", "conditioning", "cogview4"], + "title": "Prompt - CogView4", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/CogView4ConditioningOutput" + } + }, + "CollectInvocation": { + "class": "invocation", + "classification": "stable", + "description": "Collects values into a collection", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "config_path": { + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "item": { "anyOf": [ - { - "type": "string" - }, + {}, { "type": "null" } ], - "title": "Config Path", - "description": "Path to the config for this model, if any." + "default": null, + "description": "The item to collect (all inputs must be of the same type)", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "Collection Item", + "ui_type": "CollectionItemField" + }, + "collection": { + "default": [], + "description": "An optional collection to append to", + "field_kind": "input", + "input": "connection", + "items": {}, + "orig_default": [], + "orig_required": false, + "title": "Collection", + "type": "array", + "ui_type": "CollectionField" }, "type": { - "type": "string", - "const": "controlnet", - "title": "Type", - "default": "controlnet" + "const": "collect", + "default": "collect", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "title": "CollectInvocation", + "type": "object", + "version": "1.1.0", + "output": { + "$ref": "#/components/schemas/CollectInvocationOutput" + } + }, + "CollectInvocationOutput": { + "class": "output", + "properties": { + "collection": { + "description": "The collection of input items", + "field_kind": "output", + "items": {}, + "title": "Collection", + "type": "array", + "ui_hidden": false, + "ui_type": "CollectionField" }, - "format": { - "type": "string", - "const": "checkpoint", - "title": "Format", - "default": "checkpoint" + "type": { + "const": "collect_output", + "default": "collect_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "collection", "type", "type"], + "title": "CollectInvocationOutput", + "type": "object" + }, + "ColorCollectionOutput": { + "class": "output", + "description": "Base class for nodes that output a collection of colors", + "properties": { + "collection": { + "description": "The output colors", + "field_kind": "output", + "items": { + "$ref": "#/components/schemas/ColorField" + }, + "title": "Collection", + "type": "array", + "ui_hidden": false }, - "default_settings": { + "type": { + "const": "color_collection_output", + "default": "color_collection_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "collection", "type", "type"], + "title": "ColorCollectionOutput", + "type": "object" + }, + "ColorCorrectInvocation": { + "category": "image", + "class": "invocation", + "classification": "stable", + "description": "Matches the color histogram of a base image to a reference image, optionally\nusing a mask to only color-correct certain regions of the base image.", + "node_pack": "invokeai", + "properties": { + "board": { "anyOf": [ { - "$ref": "#/components/schemas/ControlAdapterDefaultSettings" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } - ] - }, - "base": { - "type": "string", - "const": "flux", - "title": "Base", - "default": "flux" - } - }, - "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "config_path", - "type", - "format", - "default_settings", - "base" - ], - "title": "ControlNet_Checkpoint_FLUX_Config" - }, - "ControlNet_Checkpoint_SD1_Config": { - "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." - }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." - }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "description": { + "metadata": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "title": "Description", - "description": "Model description" + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "source_api_response": { + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "base_image": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "default": null, + "description": "The image to color-correct", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "cover_image": { + "color_reference": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], - "title": "Cover Image", - "description": "Url for image to preview model" + "default": null, + "description": "Reference image for color-correction", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "config_path": { + "mask": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], - "title": "Config Path", - "description": "Path to the config for this model, if any." - }, - "type": { - "type": "string", - "const": "controlnet", - "title": "Type", - "default": "controlnet" - }, - "format": { - "type": "string", - "const": "checkpoint", - "title": "Format", - "default": "checkpoint" + "default": null, + "description": "Optional mask to limit color correction area", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false }, - "default_settings": { - "anyOf": [ - { - "$ref": "#/components/schemas/ControlAdapterDefaultSettings" - }, - { - "type": "null" - } - ] + "colorspace": { + "default": "RGB", + "description": "Colorspace in which to apply histogram matching", + "enum": ["RGB", "YCbCr", "YCbCr-Chroma", "YCbCr-Luma"], + "field_kind": "input", + "input": "any", + "orig_default": "RGB", + "orig_required": false, + "title": "Color Space", + "type": "string" }, - "base": { - "type": "string", - "const": "sd-1", - "title": "Base", - "default": "sd-1" + "type": { + "const": "color_correct", + "default": "color_correct", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["type", "id"], + "tags": ["image", "color"], + "title": "Color Correct", "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "config_path", - "type", - "format", - "default_settings", - "base" - ], - "title": "ControlNet_Checkpoint_SD1_Config" + "version": "2.0.0", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } }, - "ControlNet_Checkpoint_SD2_Config": { + "ColorField": { + "description": "A color primitive field", "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." + "r": { + "description": "The red component", + "maximum": 255, + "minimum": 0, + "title": "R", + "type": "integer" }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + "g": { + "description": "The green component", + "maximum": 255, + "minimum": 0, + "title": "G", + "type": "integer" }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." + "b": { + "description": "The blue component", + "maximum": 255, + "minimum": 0, + "title": "B", + "type": "integer" }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." + "a": { + "description": "The alpha component", + "maximum": 255, + "minimum": 0, + "title": "A", + "type": "integer" + } + }, + "required": ["r", "g", "b", "a"], + "title": "ColorField", + "type": "object" + }, + "ColorInvocation": { + "category": "primitives", + "class": "invocation", + "classification": "stable", + "description": "A color primitive value", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "description": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Description", - "description": "Model description" + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" + "color": { + "$ref": "#/components/schemas/ColorField", + "default": { + "r": 0, + "g": 0, + "b": 0, + "a": 255 + }, + "description": "The color value", + "field_kind": "input", + "input": "any", + "orig_default": { + "a": 255, + "b": 0, + "g": 0, + "r": 0 + }, + "orig_required": false }, - "source_api_response": { + "type": { + "const": "color", + "default": "color", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["primitives", "color"], + "title": "Color Primitive", + "type": "object", + "version": "1.0.1", + "output": { + "$ref": "#/components/schemas/ColorOutput" + } + }, + "ColorMapInvocation": { + "category": "controlnet", + "class": "invocation", + "classification": "stable", + "description": "Generates a color map from the provided image.", + "node_pack": "invokeai", + "properties": { + "board": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "cover_image": { + "metadata": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "title": "Cover Image", - "description": "Url for image to preview model" + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "config_path": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Config Path", - "description": "Path to the config for this model, if any." + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "type": { - "type": "string", - "const": "controlnet", - "title": "Type", - "default": "controlnet" + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "format": { - "type": "string", - "const": "checkpoint", - "title": "Format", - "default": "checkpoint" + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "default_settings": { + "image": { "anyOf": [ { - "$ref": "#/components/schemas/ControlAdapterDefaultSettings" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } - ] + ], + "default": null, + "description": "The image to process", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "base": { - "type": "string", - "const": "sd-2", - "title": "Base", - "default": "sd-2" + "tile_size": { + "default": 64, + "description": "Tile size", + "field_kind": "input", + "input": "any", + "minimum": 1, + "orig_default": 64, + "orig_required": false, + "title": "Tile Size", + "type": "integer" + }, + "type": { + "const": "color_map", + "default": "color_map", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["type", "id"], + "tags": ["controlnet"], + "title": "Color Map", "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "config_path", - "type", - "format", - "default_settings", - "base" - ], - "title": "ControlNet_Checkpoint_SD2_Config" + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } }, - "ControlNet_Checkpoint_SDXL_Config": { + "ColorOutput": { + "class": "output", + "description": "Base class for nodes that output a single color", "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + "color": { + "$ref": "#/components/schemas/ColorField", + "description": "The output color", + "field_kind": "output", + "ui_hidden": false }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." + "type": { + "const": "color_output", + "default": "color_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "color", "type", "type"], + "title": "ColorOutput", + "type": "object" + }, + "CompelInvocation": { + "category": "conditioning", + "class": "invocation", + "classification": "stable", + "description": "Parse prompt using compel package to conditioning.", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "description": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Description", - "description": "Model description" + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "source": { + "prompt": { + "default": "", + "description": "Prompt to be parsed by Compel to create a conditioning tensor", + "field_kind": "input", + "input": "any", + "orig_default": "", + "orig_required": false, + "title": "Prompt", "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." - }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" - }, - "source_api_response": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "ui_component": "textarea" }, - "cover_image": { + "clip": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/CLIPField" }, { "type": "null" } ], - "title": "Cover Image", - "description": "Url for image to preview model" + "default": null, + "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "CLIP" }, - "config_path": { + "mask": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/TensorField" }, { "type": "null" } ], - "title": "Config Path", - "description": "Path to the config for this model, if any." + "default": null, + "description": "A mask defining the region that this conditioning prompt applies to.", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false }, "type": { - "type": "string", - "const": "controlnet", - "title": "Type", - "default": "controlnet" - }, - "format": { - "type": "string", - "const": "checkpoint", - "title": "Format", - "default": "checkpoint" - }, - "default_settings": { - "anyOf": [ - { - "$ref": "#/components/schemas/ControlAdapterDefaultSettings" - }, - { - "type": "null" - } - ] - }, - "base": { - "type": "string", - "const": "sdxl", - "title": "Base", - "default": "sdxl" - } + "const": "compel", + "default": "compel", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } }, + "required": ["type", "id"], + "tags": ["prompt", "compel"], + "title": "Prompt - SD1.5", "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "config_path", - "type", - "format", - "default_settings", - "base" - ], - "title": "ControlNet_Checkpoint_SDXL_Config" + "version": "1.2.1", + "output": { + "$ref": "#/components/schemas/ConditioningOutput" + } }, - "ControlNet_Checkpoint_ZImage_Config": { + "ConditioningCollectionInvocation": { + "category": "primitives", + "class": "invocation", + "classification": "stable", + "description": "A collection of conditioning tensor primitive values", + "node_pack": "invokeai", "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." + "collection": { + "default": [], + "description": "The collection of conditioning tensors", + "field_kind": "input", + "input": "any", + "items": { + "$ref": "#/components/schemas/ConditioningField" + }, + "orig_default": [], + "orig_required": false, + "title": "Collection", + "type": "array" }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." + "type": { + "const": "conditioning_collection", + "default": "conditioning_collection", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["primitives", "conditioning", "collection"], + "title": "Conditioning Collection Primitive", + "type": "object", + "version": "1.0.2", + "output": { + "$ref": "#/components/schemas/ConditioningCollectionOutput" + } + }, + "ConditioningCollectionOutput": { + "class": "output", + "description": "Base class for nodes that output a collection of conditioning tensors", + "properties": { + "collection": { + "description": "The output conditioning tensors", + "field_kind": "output", + "items": { + "$ref": "#/components/schemas/ConditioningField" + }, + "title": "Collection", + "type": "array", + "ui_hidden": false }, - "description": { + "type": { + "const": "conditioning_collection_output", + "default": "conditioning_collection_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "collection", "type", "type"], + "title": "ConditioningCollectionOutput", + "type": "object" + }, + "ConditioningField": { + "description": "A conditioning tensor primitive value", + "properties": { + "conditioning_name": { + "description": "The name of conditioning tensor", + "title": "Conditioning Name", + "type": "string" + }, + "mask": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/TensorField" }, { "type": "null" } ], - "title": "Description", - "description": "Model description" + "default": null, + "description": "The mask associated with this conditioning tensor. Excluded regions should be set to False, included regions should be set to True." + } + }, + "required": ["conditioning_name"], + "title": "ConditioningField", + "type": "object" + }, + "ConditioningInvocation": { + "category": "primitives", + "class": "invocation", + "classification": "stable", + "description": "A conditioning tensor primitive value", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "source_api_response": { + "conditioning": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ConditioningField" }, { "type": "null" } ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "default": null, + "description": "Conditioning tensor", + "field_kind": "input", + "input": "connection", + "orig_required": true }, - "cover_image": { + "type": { + "const": "conditioning", + "default": "conditioning", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["primitives", "conditioning"], + "title": "Conditioning Primitive", + "type": "object", + "version": "1.0.1", + "output": { + "$ref": "#/components/schemas/ConditioningOutput" + } + }, + "ConditioningOutput": { + "class": "output", + "description": "Base class for nodes that output a single conditioning tensor", + "properties": { + "conditioning": { + "$ref": "#/components/schemas/ConditioningField", + "description": "Conditioning tensor", + "field_kind": "output", + "ui_hidden": false + }, + "type": { + "const": "conditioning_output", + "default": "conditioning_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "conditioning", "type", "type"], + "title": "ConditioningOutput", + "type": "object" + }, + "ContentShuffleInvocation": { + "category": "controlnet", + "class": "invocation", + "classification": "stable", + "description": "Shuffles the image, similar to a 'liquify' filter.", + "node_pack": "invokeai", + "properties": { + "board": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } ], - "title": "Cover Image", - "description": "Url for image to preview model" + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "config_path": { + "metadata": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "title": "Config Path", - "description": "Path to the config for this model, if any." + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "type": { - "type": "string", - "const": "controlnet", - "title": "Type", - "default": "controlnet" + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "format": { - "type": "string", - "const": "checkpoint", - "title": "Format", - "default": "checkpoint" + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "base": { - "type": "string", - "const": "z-image", - "title": "Base", - "default": "z-image" + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "default_settings": { + "image": { "anyOf": [ { - "$ref": "#/components/schemas/ControlAdapterDefaultSettings" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } - ] + ], + "default": null, + "description": "The image to process", + "field_kind": "input", + "input": "any", + "orig_required": true + }, + "scale_factor": { + "default": 256, + "description": "The scale factor used for the shuffle", + "field_kind": "input", + "input": "any", + "minimum": 0, + "orig_default": 256, + "orig_required": false, + "title": "Scale Factor", + "type": "integer" + }, + "type": { + "const": "content_shuffle", + "default": "content_shuffle", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["type", "id"], + "tags": ["controlnet", "normal"], + "title": "Content Shuffle", "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "config_path", - "type", - "format", - "base", - "default_settings" - ], - "title": "ControlNet_Checkpoint_ZImage_Config", - "description": "Model config for Z-Image Control adapter models (Safetensors checkpoint).\n\nZ-Image Control models are standalone adapters containing only the control layers\n(control_layers, control_all_x_embedder, control_noise_refiner) that extend\nthe base Z-Image transformer with spatial conditioning capabilities.\n\nSupports: Canny, HED, Depth, Pose, MLSD.\nRecommended control_context_scale: 0.65-0.80." + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } }, - "ControlNet_Diffusers_FLUX_Config": { + "ControlAdapterDefaultSettings": { + "properties": { + "preprocessor": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Preprocessor" + } + }, + "additionalProperties": false, + "type": "object", + "required": ["preprocessor"], + "title": "ControlAdapterDefaultSettings" + }, + "ControlField": { + "properties": { + "image": { + "$ref": "#/components/schemas/ImageField", + "description": "The control image" + }, + "control_model": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "The ControlNet model to use" + }, + "control_weight": { + "anyOf": [ + { + "type": "number" + }, + { + "items": { + "type": "number" + }, + "type": "array" + } + ], + "default": 1, + "description": "The weight given to the ControlNet", + "title": "Control Weight" + }, + "begin_step_percent": { + "default": 0, + "description": "When the ControlNet is first applied (% of total steps)", + "maximum": 1, + "minimum": 0, + "title": "Begin Step Percent", + "type": "number" + }, + "end_step_percent": { + "default": 1, + "description": "When the ControlNet is last applied (% of total steps)", + "maximum": 1, + "minimum": 0, + "title": "End Step Percent", + "type": "number" + }, + "control_mode": { + "default": "balanced", + "description": "The control mode to use", + "enum": ["balanced", "more_prompt", "more_control", "unbalanced"], + "title": "Control Mode", + "type": "string" + }, + "resize_mode": { + "default": "just_resize", + "description": "The resize mode to use", + "enum": ["just_resize", "crop_resize", "fill_resize", "just_resize_simple"], + "title": "Resize Mode", + "type": "string" + } + }, + "required": ["image", "control_model"], + "title": "ControlField", + "type": "object" + }, + "ControlLoRAField": { + "properties": { + "lora": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Info to load lora model" + }, + "weight": { + "description": "Weight to apply to lora model", + "title": "Weight", + "type": "number" + }, + "img": { + "$ref": "#/components/schemas/ImageField", + "description": "Image to use in structural conditioning" + } + }, + "required": ["lora", "weight", "img"], + "title": "ControlLoRAField", + "type": "object" + }, + "ControlLoRA_LyCORIS_FLUX_Config": { "properties": { "key": { "type": "string", @@ -14104,22 +15858,6 @@ "title": "Cover Image", "description": "Url for image to preview model" }, - "format": { - "type": "string", - "const": "diffusers", - "title": "Format", - "default": "diffusers" - }, - "repo_variant": { - "$ref": "#/components/schemas/ModelRepoVariant", - "default": "" - }, - "type": { - "type": "string", - "const": "controlnet", - "title": "Type", - "default": "controlnet" - }, "default_settings": { "anyOf": [ { @@ -14135,6 +15873,33 @@ "const": "flux", "title": "Base", "default": "flux" + }, + "type": { + "type": "string", + "const": "control_lora", + "title": "Type", + "default": "control_lora" + }, + "format": { + "type": "string", + "const": "lycoris", + "title": "Format", + "default": "lycoris" + }, + "trigger_phrases": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + { + "type": "null" + } + ], + "title": "Trigger Phrases" } }, "type": "object", @@ -14149,15 +15914,311 @@ "source_type", "source_api_response", "cover_image", - "format", - "repo_variant", - "type", "default_settings", - "base" - ], - "title": "ControlNet_Diffusers_FLUX_Config" + "base", + "type", + "format", + "trigger_phrases" + ], + "title": "ControlLoRA_LyCORIS_FLUX_Config", + "description": "Model config for Control LoRA models." }, - "ControlNet_Diffusers_SD1_Config": { + "ControlNetInvocation": { + "category": "controlnet", + "class": "invocation", + "classification": "stable", + "description": "Collects ControlNet info to pass to other nodes", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "image": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The control image", + "field_kind": "input", + "input": "any", + "orig_required": true + }, + "control_model": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelIdentifierField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ControlNet model to load", + "field_kind": "input", + "input": "any", + "orig_required": true, + "ui_model_base": ["sd-1", "sd-2", "sdxl"], + "ui_model_type": ["controlnet"] + }, + "control_weight": { + "anyOf": [ + { + "type": "number" + }, + { + "items": { + "type": "number" + }, + "type": "array" + } + ], + "default": 1.0, + "description": "The weight given to the ControlNet", + "field_kind": "input", + "ge": -1, + "input": "any", + "le": 2, + "orig_default": 1.0, + "orig_required": false, + "title": "Control Weight" + }, + "begin_step_percent": { + "default": 0, + "description": "When the ControlNet is first applied (% of total steps)", + "field_kind": "input", + "input": "any", + "maximum": 1, + "minimum": 0, + "orig_default": 0, + "orig_required": false, + "title": "Begin Step Percent", + "type": "number" + }, + "end_step_percent": { + "default": 1, + "description": "When the ControlNet is last applied (% of total steps)", + "field_kind": "input", + "input": "any", + "maximum": 1, + "minimum": 0, + "orig_default": 1, + "orig_required": false, + "title": "End Step Percent", + "type": "number" + }, + "control_mode": { + "default": "balanced", + "description": "The control mode used", + "enum": ["balanced", "more_prompt", "more_control", "unbalanced"], + "field_kind": "input", + "input": "any", + "orig_default": "balanced", + "orig_required": false, + "title": "Control Mode", + "type": "string" + }, + "resize_mode": { + "default": "just_resize", + "description": "The resize mode used", + "enum": ["just_resize", "crop_resize", "fill_resize", "just_resize_simple"], + "field_kind": "input", + "input": "any", + "orig_default": "just_resize", + "orig_required": false, + "title": "Resize Mode", + "type": "string" + }, + "type": { + "const": "controlnet", + "default": "controlnet", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["controlnet"], + "title": "ControlNet - SD1.5, SD2, SDXL", + "type": "object", + "version": "1.1.3", + "output": { + "$ref": "#/components/schemas/ControlOutput" + } + }, + "ControlNetMetadataField": { + "properties": { + "image": { + "$ref": "#/components/schemas/ImageField", + "description": "The control image" + }, + "processed_image": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The control image, after processing." + }, + "control_model": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "The ControlNet model to use" + }, + "control_weight": { + "anyOf": [ + { + "type": "number" + }, + { + "items": { + "type": "number" + }, + "type": "array" + } + ], + "default": 1, + "description": "The weight given to the ControlNet", + "title": "Control Weight" + }, + "begin_step_percent": { + "default": 0, + "description": "When the ControlNet is first applied (% of total steps)", + "maximum": 1, + "minimum": 0, + "title": "Begin Step Percent", + "type": "number" + }, + "end_step_percent": { + "default": 1, + "description": "When the ControlNet is last applied (% of total steps)", + "maximum": 1, + "minimum": 0, + "title": "End Step Percent", + "type": "number" + }, + "control_mode": { + "default": "balanced", + "description": "The control mode to use", + "enum": ["balanced", "more_prompt", "more_control", "unbalanced"], + "title": "Control Mode", + "type": "string" + }, + "resize_mode": { + "default": "just_resize", + "description": "The resize mode to use", + "enum": ["just_resize", "crop_resize", "fill_resize", "just_resize_simple"], + "title": "Resize Mode", + "type": "string" + } + }, + "required": ["image", "control_model"], + "title": "ControlNetMetadataField", + "type": "object" + }, + "ControlNetRecallParameter": { + "properties": { + "model_name": { + "type": "string", + "title": "Model Name", + "description": "The name of the ControlNet/T2I Adapter/Control LoRA model" + }, + "image_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Name", + "description": "The filename of the control image in outputs/images" + }, + "weight": { + "type": "number", + "maximum": 2.0, + "minimum": -1.0, + "title": "Weight", + "description": "The weight for the control adapter", + "default": 1.0 + }, + "begin_step_percent": { + "anyOf": [ + { + "type": "number", + "maximum": 1.0, + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Begin Step Percent", + "description": "When the control adapter is first applied (% of total steps)" + }, + "end_step_percent": { + "anyOf": [ + { + "type": "number", + "maximum": 1.0, + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "End Step Percent", + "description": "When the control adapter is last applied (% of total steps)" + }, + "control_mode": { + "anyOf": [ + { + "type": "string", + "enum": ["balanced", "more_prompt", "more_control"] + }, + { + "type": "null" + } + ], + "title": "Control Mode", + "description": "The control mode (ControlNet only)" + } + }, + "type": "object", + "required": ["model_name"], + "title": "ControlNetRecallParameter", + "description": "ControlNet configuration for recall" + }, + "ControlNet_Checkpoint_FLUX_Config": { "properties": { "key": { "type": "string", @@ -14229,15 +16290,17 @@ "title": "Cover Image", "description": "Url for image to preview model" }, - "format": { - "type": "string", - "const": "diffusers", - "title": "Format", - "default": "diffusers" - }, - "repo_variant": { - "$ref": "#/components/schemas/ModelRepoVariant", - "default": "" + "config_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Config Path", + "description": "Path to the config for this model, if any." }, "type": { "type": "string", @@ -14245,6 +16308,12 @@ "title": "Type", "default": "controlnet" }, + "format": { + "type": "string", + "const": "checkpoint", + "title": "Format", + "default": "checkpoint" + }, "default_settings": { "anyOf": [ { @@ -14257,9 +16326,9 @@ }, "base": { "type": "string", - "const": "sd-1", + "const": "flux", "title": "Base", - "default": "sd-1" + "default": "flux" } }, "type": "object", @@ -14274,15 +16343,15 @@ "source_type", "source_api_response", "cover_image", - "format", - "repo_variant", + "config_path", "type", + "format", "default_settings", "base" ], - "title": "ControlNet_Diffusers_SD1_Config" + "title": "ControlNet_Checkpoint_FLUX_Config" }, - "ControlNet_Diffusers_SD2_Config": { + "ControlNet_Checkpoint_SD1_Config": { "properties": { "key": { "type": "string", @@ -14354,15 +16423,17 @@ "title": "Cover Image", "description": "Url for image to preview model" }, - "format": { - "type": "string", - "const": "diffusers", - "title": "Format", - "default": "diffusers" - }, - "repo_variant": { - "$ref": "#/components/schemas/ModelRepoVariant", - "default": "" + "config_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Config Path", + "description": "Path to the config for this model, if any." }, "type": { "type": "string", @@ -14370,6 +16441,12 @@ "title": "Type", "default": "controlnet" }, + "format": { + "type": "string", + "const": "checkpoint", + "title": "Format", + "default": "checkpoint" + }, "default_settings": { "anyOf": [ { @@ -14382,9 +16459,9 @@ }, "base": { "type": "string", - "const": "sd-2", + "const": "sd-1", "title": "Base", - "default": "sd-2" + "default": "sd-1" } }, "type": "object", @@ -14399,15 +16476,15 @@ "source_type", "source_api_response", "cover_image", - "format", - "repo_variant", + "config_path", "type", + "format", "default_settings", "base" ], - "title": "ControlNet_Diffusers_SD2_Config" + "title": "ControlNet_Checkpoint_SD1_Config" }, - "ControlNet_Diffusers_SDXL_Config": { + "ControlNet_Checkpoint_SD2_Config": { "properties": { "key": { "type": "string", @@ -14479,15 +16556,17 @@ "title": "Cover Image", "description": "Url for image to preview model" }, - "format": { - "type": "string", - "const": "diffusers", - "title": "Format", - "default": "diffusers" - }, - "repo_variant": { - "$ref": "#/components/schemas/ModelRepoVariant", - "default": "" + "config_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Config Path", + "description": "Path to the config for this model, if any." }, "type": { "type": "string", @@ -14495,6 +16574,12 @@ "title": "Type", "default": "controlnet" }, + "format": { + "type": "string", + "const": "checkpoint", + "title": "Format", + "default": "checkpoint" + }, "default_settings": { "anyOf": [ { @@ -14507,9 +16592,9 @@ }, "base": { "type": "string", - "const": "sdxl", + "const": "sd-2", "title": "Base", - "default": "sdxl" + "default": "sd-2" } }, "type": "object", @@ -14524,116 +16609,63 @@ "source_type", "source_api_response", "cover_image", - "format", - "repo_variant", + "config_path", "type", + "format", "default_settings", "base" ], - "title": "ControlNet_Diffusers_SDXL_Config" + "title": "ControlNet_Checkpoint_SD2_Config" }, - "ControlOutput": { - "class": "output", - "description": "node output for ControlNet info", + "ControlNet_Checkpoint_SDXL_Config": { "properties": { - "control": { - "$ref": "#/components/schemas/ControlField", - "description": "ControlNet(s) to apply", - "field_kind": "output", - "ui_hidden": false + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." }, - "type": { - "const": "control_output", - "default": "control_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "control", "type", "type"], - "title": "ControlOutput", - "type": "object" - }, - "CoreMetadataInvocation": { - "additionalProperties": true, - "category": "metadata", - "class": "invocation", - "classification": "internal", - "description": "Used internally by Invoke to collect metadata for generations.", - "node_pack": "invokeai", - "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." }, - "generation_mode": { + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "enum": [ - "txt2img", - "img2img", - "inpaint", - "outpaint", - "sdxl_txt2img", - "sdxl_img2img", - "sdxl_inpaint", - "sdxl_outpaint", - "flux_txt2img", - "flux_img2img", - "flux_inpaint", - "flux_outpaint", - "flux2_txt2img", - "flux2_img2img", - "flux2_inpaint", - "flux2_outpaint", - "sd3_txt2img", - "sd3_img2img", - "sd3_inpaint", - "sd3_outpaint", - "cogview4_txt2img", - "cogview4_img2img", - "cogview4_inpaint", - "cogview4_outpaint", - "z_image_txt2img", - "z_image_img2img", - "z_image_inpaint", - "z_image_outpaint" - ], "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The generation mode that output this image", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Generation Mode" + "title": "Description", + "description": "Model description" }, - "positive_prompt": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { "type": "string" @@ -14642,15 +16674,10 @@ "type": "null" } ], - "default": null, - "description": "The positive prompt parameter", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Positive Prompt" + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "negative_prompt": { + "cover_image": { "anyOf": [ { "type": "string" @@ -14659,66 +16686,98 @@ "type": "null" } ], - "default": null, - "description": "The negative prompt parameter", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Negative Prompt" + "title": "Cover Image", + "description": "Url for image to preview model" }, - "width": { + "config_path": { "anyOf": [ { - "type": "integer" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The width parameter", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Width" + "title": "Config Path", + "description": "Path to the config for this model, if any." }, - "height": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The height parameter", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Height" + "type": { + "type": "string", + "const": "controlnet", + "title": "Type", + "default": "controlnet" }, - "seed": { + "format": { + "type": "string", + "const": "checkpoint", + "title": "Format", + "default": "checkpoint" + }, + "default_settings": { "anyOf": [ { - "type": "integer" + "$ref": "#/components/schemas/ControlAdapterDefaultSettings" }, { "type": "null" } - ], - "default": null, - "description": "The seed used for noise generation", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Seed" + ] }, - "rand_device": { + "base": { + "type": "string", + "const": "sdxl", + "title": "Base", + "default": "sdxl" + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "config_path", + "type", + "format", + "default_settings", + "base" + ], + "title": "ControlNet_Checkpoint_SDXL_Config" + }, + "ControlNet_Checkpoint_ZImage_Config": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { "type": "string" @@ -14727,66 +16786,43 @@ "type": "null" } ], - "default": null, - "description": "The device used for random number generation", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Rand Device" + "title": "Description", + "description": "Model description" }, - "cfg_scale": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The classifier-free guidance scale parameter", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Cfg Scale" + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." }, - "cfg_rescale_multiplier": { + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "type": "number" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Cfg Rescale Multiplier" + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "steps": { + "cover_image": { "anyOf": [ { - "type": "integer" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The number of steps used for inference", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Steps" + "title": "Cover Image", + "description": "Url for image to preview model" }, - "scheduler": { + "config_path": { "anyOf": [ { "type": "string" @@ -14795,179 +16831,337 @@ "type": "null" } ], - "default": null, - "description": "The scheduler used for inference", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Scheduler" + "title": "Config Path", + "description": "Path to the config for this model, if any." }, - "seamless_x": { + "type": { + "type": "string", + "const": "controlnet", + "title": "Type", + "default": "controlnet" + }, + "format": { + "type": "string", + "const": "checkpoint", + "title": "Format", + "default": "checkpoint" + }, + "base": { + "type": "string", + "const": "z-image", + "title": "Base", + "default": "z-image" + }, + "default_settings": { "anyOf": [ { - "type": "boolean" + "$ref": "#/components/schemas/ControlAdapterDefaultSettings" }, { "type": "null" } - ], - "default": null, - "description": "Whether seamless tiling was used on the X axis", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Seamless X" + ] + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "config_path", + "type", + "format", + "base", + "default_settings" + ], + "title": "ControlNet_Checkpoint_ZImage_Config", + "description": "Model config for Z-Image Control adapter models (Safetensors checkpoint).\n\nZ-Image Control models are standalone adapters containing only the control layers\n(control_layers, control_all_x_embedder, control_noise_refiner) that extend\nthe base Z-Image transformer with spatial conditioning capabilities.\n\nSupports: Canny, HED, Depth, Pose, MLSD.\nRecommended control_context_scale: 0.65-0.80." + }, + "ControlNet_Diffusers_FLUX_Config": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." }, - "seamless_y": { + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "type": "boolean" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Whether seamless tiling was used on the Y axis", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Seamless Y" + "title": "Description", + "description": "Model description" }, - "clip_skip": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "type": "integer" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The number of skipped CLIP layers", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Clip Skip" + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "model": { + "cover_image": { "anyOf": [ { - "$ref": "#/components/schemas/ModelIdentifierField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The main model used for inference", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false + "title": "Cover Image", + "description": "Url for image to preview model" }, - "controlnets": { + "format": { + "type": "string", + "const": "diffusers", + "title": "Format", + "default": "diffusers" + }, + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" + }, + "type": { + "type": "string", + "const": "controlnet", + "title": "Type", + "default": "controlnet" + }, + "default_settings": { "anyOf": [ { - "items": { - "$ref": "#/components/schemas/ControlNetMetadataField" - }, - "type": "array" + "$ref": "#/components/schemas/ControlAdapterDefaultSettings" }, { "type": "null" } - ], - "default": null, - "description": "The ControlNets used for inference", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Controlnets" + ] }, - "ipAdapters": { + "base": { + "type": "string", + "const": "flux", + "title": "Base", + "default": "flux" + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "format", + "repo_variant", + "type", + "default_settings", + "base" + ], + "title": "ControlNet_Diffusers_FLUX_Config" + }, + "ControlNet_Diffusers_SD1_Config": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "items": { - "$ref": "#/components/schemas/IPAdapterMetadataField" - }, - "type": "array" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The IP Adapters used for inference", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Ipadapters" + "title": "Description", + "description": "Model description" }, - "t2iAdapters": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "items": { - "$ref": "#/components/schemas/T2IAdapterMetadataField" - }, - "type": "array" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The IP Adapters used for inference", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "T2Iadapters" + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "loras": { + "cover_image": { "anyOf": [ { - "items": { - "$ref": "#/components/schemas/LoRAMetadataField" - }, - "type": "array" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The LoRAs used for inference", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Loras" + "title": "Cover Image", + "description": "Url for image to preview model" }, - "strength": { + "format": { + "type": "string", + "const": "diffusers", + "title": "Format", + "default": "diffusers" + }, + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" + }, + "type": { + "type": "string", + "const": "controlnet", + "title": "Type", + "default": "controlnet" + }, + "default_settings": { "anyOf": [ { - "type": "number" + "$ref": "#/components/schemas/ControlAdapterDefaultSettings" }, { "type": "null" } - ], - "default": null, - "description": "The strength used for latents-to-latents", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Strength" + ] }, - "init_image": { + "base": { + "type": "string", + "const": "sd-1", + "title": "Base", + "default": "sd-1" + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "format", + "repo_variant", + "type", + "default_settings", + "base" + ], + "title": "ControlNet_Diffusers_SD1_Config" + }, + "ControlNet_Diffusers_SD2_Config": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { "type": "string" @@ -14976,64 +17170,144 @@ "type": "null" } ], - "default": null, - "description": "The name of the initial image", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Init Image" + "title": "Description", + "description": "Model description" }, - "vae": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "$ref": "#/components/schemas/ModelIdentifierField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The VAE used for decoding, if the main model's default was not used", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "qwen3_encoder": { + "cover_image": { "anyOf": [ { - "$ref": "#/components/schemas/ModelIdentifierField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The Qwen3 text encoder model used for Z-Image inference", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false + "title": "Cover Image", + "description": "Url for image to preview model" }, - "hrf_enabled": { + "format": { + "type": "string", + "const": "diffusers", + "title": "Format", + "default": "diffusers" + }, + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" + }, + "type": { + "type": "string", + "const": "controlnet", + "title": "Type", + "default": "controlnet" + }, + "default_settings": { "anyOf": [ { - "type": "boolean" + "$ref": "#/components/schemas/ControlAdapterDefaultSettings" + }, + { + "type": "null" + } + ] + }, + "base": { + "type": "string", + "const": "sd-2", + "title": "Base", + "default": "sd-2" + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "format", + "repo_variant", + "type", + "default_settings", + "base" + ], + "title": "ControlNet_Diffusers_SD2_Config" + }, + "ControlNet_Diffusers_SDXL_Config": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { + "anyOf": [ + { + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Whether or not high resolution fix was enabled.", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Hrf Enabled" + "title": "Description", + "description": "Model description" }, - "hrf_method": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { "type": "string" @@ -15042,32 +17316,184 @@ "type": "null" } ], - "default": null, - "description": "The high resolution fix upscale method.", - "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Hrf Method" + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "hrf_strength": { + "cover_image": { "anyOf": [ { - "type": "number" + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Cover Image", + "description": "Url for image to preview model" + }, + "format": { + "type": "string", + "const": "diffusers", + "title": "Format", + "default": "diffusers" + }, + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" + }, + "type": { + "type": "string", + "const": "controlnet", + "title": "Type", + "default": "controlnet" + }, + "default_settings": { + "anyOf": [ + { + "$ref": "#/components/schemas/ControlAdapterDefaultSettings" + }, + { + "type": "null" + } + ] + }, + "base": { + "type": "string", + "const": "sdxl", + "title": "Base", + "default": "sdxl" + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "format", + "repo_variant", + "type", + "default_settings", + "base" + ], + "title": "ControlNet_Diffusers_SDXL_Config" + }, + "ControlOutput": { + "class": "output", + "description": "node output for ControlNet info", + "properties": { + "control": { + "$ref": "#/components/schemas/ControlField", + "description": "ControlNet(s) to apply", + "field_kind": "output", + "ui_hidden": false + }, + "type": { + "const": "control_output", + "default": "control_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "control", "type", "type"], + "title": "ControlOutput", + "type": "object" + }, + "CoreMetadataInvocation": { + "additionalProperties": true, + "category": "metadata", + "class": "invocation", + "classification": "internal", + "description": "Used internally by Invoke to collect metadata for generations.", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "generation_mode": { + "anyOf": [ + { + "enum": [ + "txt2img", + "img2img", + "inpaint", + "outpaint", + "sdxl_txt2img", + "sdxl_img2img", + "sdxl_inpaint", + "sdxl_outpaint", + "flux_txt2img", + "flux_img2img", + "flux_inpaint", + "flux_outpaint", + "flux2_txt2img", + "flux2_img2img", + "flux2_inpaint", + "flux2_outpaint", + "sd3_txt2img", + "sd3_img2img", + "sd3_inpaint", + "sd3_outpaint", + "cogview4_txt2img", + "cogview4_img2img", + "cogview4_inpaint", + "cogview4_outpaint", + "z_image_txt2img", + "z_image_img2img", + "z_image_inpaint", + "z_image_outpaint", + "qwen_image_txt2img", + "qwen_image_img2img", + "qwen_image_inpaint", + "qwen_image_outpaint", + "anima_txt2img", + "anima_img2img", + "anima_inpaint", + "anima_outpaint" + ], + "type": "string" }, { "type": "null" } ], "default": null, - "description": "The high resolution fix img2img strength used in the upscale pass.", + "description": "The generation mode that output this image", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false, - "title": "Hrf Strength" + "title": "Generation Mode" }, - "positive_style_prompt": { + "positive_prompt": { "anyOf": [ { "type": "string" @@ -15077,14 +17503,14 @@ } ], "default": null, - "description": "The positive style prompt parameter", + "description": "The positive prompt parameter", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false, - "title": "Positive Style Prompt" + "title": "Positive Prompt" }, - "negative_style_prompt": { + "negative_prompt": { "anyOf": [ { "type": "string" @@ -15094,47 +17520,48 @@ } ], "default": null, - "description": "The negative style prompt parameter", + "description": "The negative prompt parameter", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false, - "title": "Negative Style Prompt" + "title": "Negative Prompt" }, - "refiner_model": { + "width": { "anyOf": [ { - "$ref": "#/components/schemas/ModelIdentifierField" + "type": "integer" }, { "type": "null" } ], "default": null, - "description": "The SDXL Refiner model used", + "description": "The width parameter", "field_kind": "input", "input": "any", "orig_default": null, - "orig_required": false + "orig_required": false, + "title": "Width" }, - "refiner_cfg_scale": { + "height": { "anyOf": [ { - "type": "number" + "type": "integer" }, { "type": "null" } ], "default": null, - "description": "The classifier-free guidance scale parameter used for the refiner", + "description": "The height parameter", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false, - "title": "Refiner Cfg Scale" + "title": "Height" }, - "refiner_steps": { + "seed": { "anyOf": [ { "type": "integer" @@ -15144,14 +17571,14 @@ } ], "default": null, - "description": "The number of steps used for the refiner", + "description": "The seed used for noise generation", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false, - "title": "Refiner Steps" + "title": "Seed" }, - "refiner_scheduler": { + "rand_device": { "anyOf": [ { "type": "string" @@ -15161,14 +17588,14 @@ } ], "default": null, - "description": "The scheduler used for the refiner", + "description": "The device used for random number generation", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false, - "title": "Refiner Scheduler" + "title": "Rand Device" }, - "refiner_positive_aesthetic_score": { + "cfg_scale": { "anyOf": [ { "type": "number" @@ -15178,14 +17605,14 @@ } ], "default": null, - "description": "The aesthetic score used for the refiner", + "description": "The classifier-free guidance scale parameter", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false, - "title": "Refiner Positive Aesthetic Score" + "title": "Cfg Scale" }, - "refiner_negative_aesthetic_score": { + "cfg_rescale_multiplier": { "anyOf": [ { "type": "number" @@ -15195,131 +17622,564 @@ } ], "default": null, - "description": "The aesthetic score used for the refiner", + "description": "Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false, - "title": "Refiner Negative Aesthetic Score" + "title": "Cfg Rescale Multiplier" }, - "refiner_start": { + "steps": { "anyOf": [ { - "type": "number" + "type": "integer" }, { "type": "null" } ], "default": null, - "description": "The start value used for refiner denoising", + "description": "The number of steps used for inference", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false, - "title": "Refiner Start" - }, - "type": { - "const": "core_metadata", - "default": "core_metadata", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["metadata"], - "title": "Core Metadata", - "type": "object", - "version": "2.1.0", - "output": { - "$ref": "#/components/schemas/MetadataOutput" - } - }, - "CreateDenoiseMaskInvocation": { - "category": "latents", - "class": "invocation", - "classification": "stable", - "description": "Creates mask for denoising model run.", - "node_pack": "invokeai", - "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" - }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "title": "Steps" }, - "vae": { + "scheduler": { "anyOf": [ { - "$ref": "#/components/schemas/VAEField" + "type": "string" }, { "type": "null" } ], "default": null, - "description": "VAE", + "description": "The scheduler used for inference", "field_kind": "input", - "input": "connection", - "orig_required": true, - "ui_order": 0 + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Scheduler" }, - "image": { + "seamless_x": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "boolean" }, { "type": "null" } ], "default": null, - "description": "Image which will be masked", + "description": "Whether seamless tiling was used on the X axis", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false, - "ui_order": 1 + "title": "Seamless X" }, - "mask": { + "seamless_y": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "boolean" }, { "type": "null" } ], "default": null, - "description": "The mask to use when pasting", + "description": "Whether seamless tiling was used on the Y axis", "field_kind": "input", "input": "any", - "orig_required": true, - "ui_order": 2 + "orig_default": null, + "orig_required": false, + "title": "Seamless Y" }, - "tiled": { - "default": false, - "description": "Processing using overlapping tiles (reduce memory consumption)", - "field_kind": "input", + "clip_skip": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The number of skipped CLIP layers", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Clip Skip" + }, + "model": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelIdentifierField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The main model used for inference", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false + }, + "controlnets": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/ControlNetMetadataField" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The ControlNets used for inference", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Controlnets" + }, + "ipAdapters": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/IPAdapterMetadataField" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The IP Adapters used for inference", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Ipadapters" + }, + "t2iAdapters": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/T2IAdapterMetadataField" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The IP Adapters used for inference", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "T2Iadapters" + }, + "loras": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/LoRAMetadataField" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The LoRAs used for inference", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Loras" + }, + "strength": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The strength used for latents-to-latents", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Strength" + }, + "init_image": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The name of the initial image", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Init Image" + }, + "vae": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelIdentifierField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The VAE used for decoding, if the main model's default was not used", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false + }, + "qwen3_encoder": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelIdentifierField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The Qwen3 text encoder model used for Z-Image inference", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false + }, + "hrf_enabled": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether or not high resolution fix was enabled.", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Hrf Enabled" + }, + "hrf_method": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The high resolution fix upscale method.", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Hrf Method" + }, + "hrf_strength": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The high resolution fix img2img strength used in the upscale pass.", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Hrf Strength" + }, + "positive_style_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The positive style prompt parameter", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Positive Style Prompt" + }, + "negative_style_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The negative style prompt parameter", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Negative Style Prompt" + }, + "refiner_model": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelIdentifierField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The SDXL Refiner model used", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false + }, + "refiner_cfg_scale": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The classifier-free guidance scale parameter used for the refiner", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Refiner Cfg Scale" + }, + "refiner_steps": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The number of steps used for the refiner", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Refiner Steps" + }, + "refiner_scheduler": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The scheduler used for the refiner", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Refiner Scheduler" + }, + "refiner_positive_aesthetic_score": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The aesthetic score used for the refiner", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Refiner Positive Aesthetic Score" + }, + "refiner_negative_aesthetic_score": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The aesthetic score used for the refiner", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Refiner Negative Aesthetic Score" + }, + "refiner_start": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The start value used for refiner denoising", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Refiner Start" + }, + "type": { + "const": "core_metadata", + "default": "core_metadata", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["metadata"], + "title": "Core Metadata", + "type": "object", + "version": "2.1.0", + "output": { + "$ref": "#/components/schemas/MetadataOutput" + } + }, + "CreateDenoiseMaskInvocation": { + "category": "latents", + "class": "invocation", + "classification": "stable", + "description": "Creates mask for denoising model run.", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "vae": { + "anyOf": [ + { + "$ref": "#/components/schemas/VAEField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "VAE", + "field_kind": "input", + "input": "connection", + "orig_required": true, + "ui_order": 0 + }, + "image": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Image which will be masked", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "ui_order": 1 + }, + "mask": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The mask to use when pasting", + "field_kind": "input", + "input": "any", + "orig_required": true, + "ui_order": 2 + }, + "tiled": { + "default": false, + "description": "Processing using overlapping tiles (reduce memory consumption)", + "field_kind": "input", "input": "any", "orig_default": false, "orig_required": false, @@ -16009,28 +18869,101 @@ "$ref": "#/components/schemas/ImageOutput" } }, - "DeleteAllExceptCurrentResult": { - "properties": { - "deleted": { - "type": "integer", - "title": "Deleted", - "description": "Number of queue items deleted" - } - }, - "type": "object", - "required": ["deleted"], - "title": "DeleteAllExceptCurrentResult", - "description": "Result of deleting all except current" - }, - "DeleteBoardResult": { + "DecodeInvisibleWatermarkInvocation": { + "category": "image", + "class": "invocation", + "classification": "stable", + "description": "Decode an invisible watermark from an image.", + "node_pack": "invokeai", "properties": { - "board_id": { - "type": "string", - "title": "Board Id", - "description": "The id of the board that was deleted." + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "deleted_board_images": { - "items": { + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "image": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The image to decode the watermark from", + "field_kind": "input", + "input": "any", + "orig_required": true + }, + "length": { + "default": 8, + "description": "The expected watermark length in bytes", + "field_kind": "input", + "input": "any", + "orig_default": 8, + "orig_required": false, + "title": "Length", + "type": "integer" + }, + "type": { + "const": "decode_watermark", + "default": "decode_watermark", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["image", "watermark"], + "title": "Decode Invisible Watermark", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/StringOutput" + } + }, + "DeleteAllExceptCurrentResult": { + "properties": { + "deleted": { + "type": "integer", + "title": "Deleted", + "description": "Number of queue items deleted" + } + }, + "type": "object", + "required": ["deleted"], + "title": "DeleteAllExceptCurrentResult", + "description": "Result of deleting all except current" + }, + "DeleteBoardResult": { + "properties": { + "board_id": { + "type": "string", + "title": "Board Id", + "description": "The id of the board that was deleted." + }, + "deleted_board_images": { + "items": { "type": "string" }, "type": "array", @@ -19450,16 +22383,16 @@ "tags": ["image", "flux", "flux2", "klein", "denoise"], "title": "FLUX2 Denoise", "type": "object", - "version": "1.3.0", + "version": "1.4.0", "output": { "$ref": "#/components/schemas/LatentsOutput" } }, - "Flux2KleinModelLoaderInvocation": { + "Flux2KleinLoRACollectionLoader": { "category": "model", "class": "invocation", "classification": "prototype", - "description": "Loads a Flux2 Klein model, outputting its submodels.\n\nFlux2 Klein uses Qwen3 as the text encoder instead of CLIP+T5.\nIt uses a 32-channel VAE (AutoencoderKLFlux2) instead of the 16-channel FLUX.1 VAE.\n\nWhen using a Diffusers format model, both VAE and Qwen3 encoder are extracted\nautomatically from the main model. You can override with standalone models:\n- Transformer: Always from Flux2 Klein main model\n- VAE: From main model (Diffusers) or standalone VAE\n- Qwen3 Encoder: From main model (Diffusers) or standalone Qwen3 model", + "description": "Applies a collection of LoRAs to a FLUX.2 Klein transformer and/or Qwen3 text encoder.", "node_pack": "invokeai", "properties": { "id": { @@ -19486,54 +22419,112 @@ "title": "Use Cache", "type": "boolean" }, - "model": { - "$ref": "#/components/schemas/ModelIdentifierField", - "description": "Flux model (Transformer) to load", + "loras": { + "anyOf": [ + { + "$ref": "#/components/schemas/LoRAField" + }, + { + "items": { + "$ref": "#/components/schemas/LoRAField" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "LoRA models and weights. May be a single LoRA or collection.", "field_kind": "input", - "input": "direct", - "orig_required": true, - "title": "Transformer", - "ui_model_base": ["flux2"], - "ui_model_type": ["main"] + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "LoRAs" }, - "vae_model": { + "transformer": { "anyOf": [ { - "$ref": "#/components/schemas/ModelIdentifierField" + "$ref": "#/components/schemas/TransformerField" }, { "type": "null" } ], "default": null, - "description": "Standalone VAE model. Flux2 Klein uses the same VAE as FLUX (16-channel). If not provided, VAE will be loaded from the Qwen3 Source model.", + "description": "Transformer", "field_kind": "input", - "input": "direct", + "input": "connection", "orig_default": null, "orig_required": false, - "title": "VAE", - "ui_model_base": ["flux", "flux2"], - "ui_model_type": ["vae"] + "title": "Transformer" }, - "qwen3_encoder_model": { + "qwen3_encoder": { "anyOf": [ { - "$ref": "#/components/schemas/ModelIdentifierField" + "$ref": "#/components/schemas/Qwen3EncoderField" }, { "type": "null" } ], "default": null, - "description": "Standalone Qwen3 Encoder model. If not provided, encoder will be loaded from the Qwen3 Source model.", + "description": "Qwen3 tokenizer and text encoder", "field_kind": "input", - "input": "direct", + "input": "connection", "orig_default": null, "orig_required": false, - "title": "Qwen3 Encoder", - "ui_model_type": ["qwen3_encoder"] + "title": "Qwen3 Encoder" }, - "qwen3_source_model": { + "type": { + "const": "flux2_klein_lora_collection_loader", + "default": "flux2_klein_lora_collection_loader", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["lora", "model", "flux", "klein", "flux2"], + "title": "Apply LoRA Collection - Flux2 Klein", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/Flux2KleinLoRALoaderOutput" + } + }, + "Flux2KleinLoRALoaderInvocation": { + "category": "model", + "class": "invocation", + "classification": "prototype", + "description": "Apply a LoRA model to a FLUX.2 Klein transformer and/or Qwen3 text encoder.", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "lora": { "anyOf": [ { "$ref": "#/components/schemas/ModelIdentifierField" @@ -19543,94 +22534,297 @@ } ], "default": null, - "description": "Diffusers Flux2 Klein model to extract VAE and/or Qwen3 encoder from. Use this if you don't have separate VAE/Qwen3 models. Ignored if both VAE and Qwen3 Encoder are provided separately.", + "description": "LoRA model to load", "field_kind": "input", - "input": "direct", - "orig_default": null, - "orig_required": false, - "title": "Qwen3 Source (Diffusers)", + "input": "any", + "orig_required": true, + "title": "LoRA", "ui_model_base": ["flux2"], - "ui_model_format": ["diffusers"], - "ui_model_type": ["main"] + "ui_model_type": ["lora"] }, - "max_seq_len": { - "default": 512, - "description": "Max sequence length for the Qwen3 encoder.", - "enum": [256, 512], + "weight": { + "default": 0.75, + "description": "The weight at which the LoRA is applied to each model", "field_kind": "input", "input": "any", - "orig_default": 512, + "orig_default": 0.75, "orig_required": false, - "title": "Max Seq Length", - "type": "integer" + "title": "Weight", + "type": "number" + }, + "transformer": { + "anyOf": [ + { + "$ref": "#/components/schemas/TransformerField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Transformer", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "Transformer" + }, + "qwen3_encoder": { + "anyOf": [ + { + "$ref": "#/components/schemas/Qwen3EncoderField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Qwen3 tokenizer and text encoder", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "Qwen3 Encoder" }, "type": { - "const": "flux2_klein_model_loader", - "default": "flux2_klein_model_loader", + "const": "flux2_klein_lora_loader", + "default": "flux2_klein_lora_loader", "field_kind": "node_attribute", "title": "type", "type": "string" } }, - "required": ["model", "type", "id"], - "tags": ["model", "flux", "klein", "qwen3"], - "title": "Main Model - Flux2 Klein", + "required": ["type", "id"], + "tags": ["lora", "model", "flux", "klein", "flux2"], + "title": "Apply LoRA - Flux2 Klein", "type": "object", "version": "1.0.0", "output": { - "$ref": "#/components/schemas/Flux2KleinModelLoaderOutput" + "$ref": "#/components/schemas/Flux2KleinLoRALoaderOutput" } }, - "Flux2KleinModelLoaderOutput": { + "Flux2KleinLoRALoaderOutput": { "class": "output", - "description": "Flux2 Klein model loader output.", + "description": "FLUX.2 Klein LoRA Loader Output", "properties": { "transformer": { - "$ref": "#/components/schemas/TransformerField", + "anyOf": [ + { + "$ref": "#/components/schemas/TransformerField" + }, + { + "type": "null" + } + ], + "default": null, "description": "Transformer", "field_kind": "output", "title": "Transformer", "ui_hidden": false }, "qwen3_encoder": { - "$ref": "#/components/schemas/Qwen3EncoderField", + "anyOf": [ + { + "$ref": "#/components/schemas/Qwen3EncoderField" + }, + { + "type": "null" + } + ], + "default": null, "description": "Qwen3 tokenizer and text encoder", "field_kind": "output", "title": "Qwen3 Encoder", "ui_hidden": false }, - "vae": { - "$ref": "#/components/schemas/VAEField", - "description": "VAE", - "field_kind": "output", - "title": "VAE", - "ui_hidden": false - }, - "max_seq_len": { - "description": "The max sequence length for the Qwen3 encoder.", - "enum": [256, 512], - "field_kind": "output", - "title": "Max Seq Length", - "type": "integer", - "ui_hidden": false - }, "type": { - "const": "flux2_klein_model_loader_output", - "default": "flux2_klein_model_loader_output", + "const": "flux2_klein_lora_loader_output", + "default": "flux2_klein_lora_loader_output", "field_kind": "node_attribute", "title": "type", "type": "string" } }, - "required": ["output_meta", "transformer", "qwen3_encoder", "vae", "max_seq_len", "type", "type"], - "title": "Flux2KleinModelLoaderOutput", + "required": ["output_meta", "transformer", "qwen3_encoder", "type", "type"], + "title": "Flux2KleinLoRALoaderOutput", "type": "object" }, - "Flux2KleinTextEncoderInvocation": { - "category": "conditioning", + "Flux2KleinModelLoaderInvocation": { + "category": "model", "class": "invocation", "classification": "prototype", - "description": "Encodes and preps a prompt for Flux2 Klein image generation.\n\nFlux2 Klein uses Qwen3 as the text encoder, extracting hidden states from\nlayers (9, 18, 27) and stacking them for richer text representations.\nThis matches the diffusers Flux2KleinPipeline implementation exactly.", + "description": "Loads a Flux2 Klein model, outputting its submodels.\n\nFlux2 Klein uses Qwen3 as the text encoder instead of CLIP+T5.\nIt uses a 32-channel VAE (AutoencoderKLFlux2) instead of the 16-channel FLUX.1 VAE.\n\nWhen using a Diffusers format model, both VAE and Qwen3 encoder are extracted\nautomatically from the main model. You can override with standalone models:\n- Transformer: Always from Flux2 Klein main model\n- VAE: From main model (Diffusers) or standalone VAE\n- Qwen3 Encoder: From main model (Diffusers) or standalone Qwen3 model", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "model": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Flux model (Transformer) to load", + "field_kind": "input", + "input": "direct", + "orig_required": true, + "title": "Transformer", + "ui_model_base": ["flux2"], + "ui_model_type": ["main"] + }, + "vae_model": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelIdentifierField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Standalone VAE model. Flux2 Klein uses the same VAE as FLUX (16-channel). If not provided, VAE will be loaded from the Qwen3 Source model.", + "field_kind": "input", + "input": "direct", + "orig_default": null, + "orig_required": false, + "title": "VAE", + "ui_model_base": ["flux", "flux2"], + "ui_model_type": ["vae"] + }, + "qwen3_encoder_model": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelIdentifierField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Standalone Qwen3 Encoder model. If not provided, encoder will be loaded from the Qwen3 Source model.", + "field_kind": "input", + "input": "direct", + "orig_default": null, + "orig_required": false, + "title": "Qwen3 Encoder", + "ui_model_type": ["qwen3_encoder"] + }, + "qwen3_source_model": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelIdentifierField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Diffusers Flux2 Klein model to extract VAE and/or Qwen3 encoder from. Use this if you don't have separate VAE/Qwen3 models. Ignored if both VAE and Qwen3 Encoder are provided separately.", + "field_kind": "input", + "input": "direct", + "orig_default": null, + "orig_required": false, + "title": "Qwen3 Source (Diffusers)", + "ui_model_base": ["flux2"], + "ui_model_format": ["diffusers"], + "ui_model_type": ["main"] + }, + "max_seq_len": { + "default": 512, + "description": "Max sequence length for the Qwen3 encoder.", + "enum": [256, 512], + "field_kind": "input", + "input": "any", + "orig_default": 512, + "orig_required": false, + "title": "Max Seq Length", + "type": "integer" + }, + "type": { + "const": "flux2_klein_model_loader", + "default": "flux2_klein_model_loader", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["model", "type", "id"], + "tags": ["model", "flux", "klein", "qwen3"], + "title": "Main Model - Flux2 Klein", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/Flux2KleinModelLoaderOutput" + } + }, + "Flux2KleinModelLoaderOutput": { + "class": "output", + "description": "Flux2 Klein model loader output.", + "properties": { + "transformer": { + "$ref": "#/components/schemas/TransformerField", + "description": "Transformer", + "field_kind": "output", + "title": "Transformer", + "ui_hidden": false + }, + "qwen3_encoder": { + "$ref": "#/components/schemas/Qwen3EncoderField", + "description": "Qwen3 tokenizer and text encoder", + "field_kind": "output", + "title": "Qwen3 Encoder", + "ui_hidden": false + }, + "vae": { + "$ref": "#/components/schemas/VAEField", + "description": "VAE", + "field_kind": "output", + "title": "VAE", + "ui_hidden": false + }, + "max_seq_len": { + "description": "The max sequence length for the Qwen3 encoder.", + "enum": [256, 512], + "field_kind": "output", + "title": "Max Seq Length", + "type": "integer", + "ui_hidden": false + }, + "type": { + "const": "flux2_klein_model_loader_output", + "default": "flux2_klein_model_loader_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "transformer", "qwen3_encoder", "vae", "max_seq_len", "type", "type"], + "title": "Flux2KleinModelLoaderOutput", + "type": "object" + }, + "Flux2KleinTextEncoderInvocation": { + "category": "conditioning", + "class": "invocation", + "classification": "prototype", + "description": "Encodes and preps a prompt for Flux2 Klein image generation.\n\nFlux2 Klein uses Qwen3 as the text encoder, extracting hidden states from\nlayers (9, 18, 27) and stacking them for richer text representations.\nThis matches the diffusers Flux2KleinPipeline implementation exactly.", "node_pack": "invokeai", "properties": { "id": { @@ -19729,7 +22923,7 @@ "tags": ["prompt", "conditioning", "flux", "klein", "qwen3"], "title": "Prompt - Flux2 Klein", "type": "object", - "version": "1.1.0", + "version": "1.1.1", "output": { "$ref": "#/components/schemas/FluxConditioningOutput" } @@ -19924,7 +23118,7 @@ }, "Flux2VariantType": { "type": "string", - "enum": ["klein_4b", "klein_9b", "klein_9b_base"], + "enum": ["klein_4b", "klein_4b_base", "klein_9b", "klein_9b_base"], "title": "Flux2VariantType", "description": "FLUX.2 model variants." }, @@ -22828,6 +26022,19 @@ "$ref": "#/components/schemas/UNetOutput" } }, + "GeneratePasswordResponse": { + "properties": { + "password": { + "type": "string", + "title": "Password", + "description": "Generated strong password" + } + }, + "type": "object", + "required": ["password"], + "title": "GeneratePasswordResponse", + "description": "Response containing a generated password." + }, "GetMaskBoundingBoxInvocation": { "category": "mask", "class": "invocation", @@ -22979,6 +26186,27 @@ { "$ref": "#/components/schemas/AlphaMaskToTensorInvocation" }, + { + "$ref": "#/components/schemas/AnimaDenoiseInvocation" + }, + { + "$ref": "#/components/schemas/AnimaImageToLatentsInvocation" + }, + { + "$ref": "#/components/schemas/AnimaLatentsToImageInvocation" + }, + { + "$ref": "#/components/schemas/AnimaLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/AnimaLoRALoaderInvocation" + }, + { + "$ref": "#/components/schemas/AnimaModelLoaderInvocation" + }, + { + "$ref": "#/components/schemas/AnimaTextEncoderInvocation" + }, { "$ref": "#/components/schemas/ApplyMaskTensorToImageInvocation" }, @@ -23018,6 +26246,9 @@ { "$ref": "#/components/schemas/CannyEdgeDetectionInvocation" }, + { + "$ref": "#/components/schemas/CanvasOutputInvocation" + }, { "$ref": "#/components/schemas/CanvasPasteBackInvocation" }, @@ -23090,6 +26321,9 @@ { "$ref": "#/components/schemas/DWOpenposeDetectionInvocation" }, + { + "$ref": "#/components/schemas/DecodeInvisibleWatermarkInvocation" + }, { "$ref": "#/components/schemas/DenoiseLatentsInvocation" }, @@ -23147,6 +26381,12 @@ { "$ref": "#/components/schemas/Flux2DenoiseInvocation" }, + { + "$ref": "#/components/schemas/Flux2KleinLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/Flux2KleinLoRALoaderInvocation" + }, { "$ref": "#/components/schemas/Flux2KleinModelLoaderInvocation" }, @@ -23222,6 +26462,9 @@ { "$ref": "#/components/schemas/IdealSizeInvocation" }, + { + "$ref": "#/components/schemas/IfInvocation" + }, { "$ref": "#/components/schemas/ImageBatchInvocation" }, @@ -23501,6 +26744,27 @@ { "$ref": "#/components/schemas/PromptsFromFileInvocation" }, + { + "$ref": "#/components/schemas/QwenImageDenoiseInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageImageToLatentsInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageLatentsToImageInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/QwenImageLoRALoaderInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageModelLoaderInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageTextEncoderInvocation" + }, { "$ref": "#/components/schemas/RandomFloatInvocation" }, @@ -23673,7 +26937,8 @@ } }, "type": "object", - "title": "Graph" + "title": "Graph", + "description": "A validated invocation graph made of nodes and typed edges." }, "GraphExecutionState": { "properties": { @@ -23710,6 +26975,15 @@ "results": { "additionalProperties": { "oneOf": [ + { + "$ref": "#/components/schemas/AnimaConditioningOutput" + }, + { + "$ref": "#/components/schemas/AnimaLoRALoaderOutput" + }, + { + "$ref": "#/components/schemas/AnimaModelLoaderOutput" + }, { "$ref": "#/components/schemas/BooleanCollectionOutput" }, @@ -23773,6 +27047,9 @@ { "$ref": "#/components/schemas/FloatOutput" }, + { + "$ref": "#/components/schemas/Flux2KleinLoRALoaderOutput" + }, { "$ref": "#/components/schemas/Flux2KleinModelLoaderOutput" }, @@ -23812,6 +27089,9 @@ { "$ref": "#/components/schemas/IdealSizeOutput" }, + { + "$ref": "#/components/schemas/IfInvocationOutput" + }, { "$ref": "#/components/schemas/ImageCollectionOutput" }, @@ -23896,6 +27176,15 @@ { "$ref": "#/components/schemas/PromptTemplateOutput" }, + { + "$ref": "#/components/schemas/QwenImageConditioningOutput" + }, + { + "$ref": "#/components/schemas/QwenImageLoRALoaderOutput" + }, + { + "$ref": "#/components/schemas/QwenImageModelLoaderOutput" + }, { "$ref": "#/components/schemas/SD3ConditioningOutput" }, @@ -24019,7 +27308,7 @@ "source_prepared_mapping" ], "title": "GraphExecutionState", - "description": "Tracks the state of a graph execution" + "description": "Tracks source-graph expansion, execution progress, and runtime results." }, "GroundingDinoInvocation": { "category": "image", @@ -24830,6 +28119,93 @@ "title": "IPAdapterOutput", "type": "object" }, + "IPAdapterRecallParameter": { + "properties": { + "model_name": { + "type": "string", + "title": "Model Name", + "description": "The name of the IP Adapter model" + }, + "image_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Name", + "description": "The filename of the reference image in outputs/images" + }, + "weight": { + "type": "number", + "maximum": 2.0, + "minimum": -1.0, + "title": "Weight", + "description": "The weight for the IP Adapter", + "default": 1.0 + }, + "begin_step_percent": { + "anyOf": [ + { + "type": "number", + "maximum": 1.0, + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Begin Step Percent", + "description": "When the IP Adapter is first applied (% of total steps)" + }, + "end_step_percent": { + "anyOf": [ + { + "type": "number", + "maximum": 1.0, + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "End Step Percent", + "description": "When the IP Adapter is last applied (% of total steps)" + }, + "method": { + "anyOf": [ + { + "type": "string", + "enum": ["full", "style", "composition"] + }, + { + "type": "null" + } + ], + "title": "Method", + "description": "The IP Adapter method" + }, + "image_influence": { + "anyOf": [ + { + "type": "string", + "enum": ["lowest", "low", "medium", "high", "highest"] + }, + { + "type": "null" + } + ], + "title": "Image Influence", + "description": "FLUX Redux image influence (if model is flux_redux)" + } + }, + "type": "object", + "required": ["model_name"], + "title": "IPAdapterRecallParameter", + "description": "IP Adapter configuration for recall" + }, "IPAdapter_Checkpoint_FLUX_Config": { "properties": { "key": { @@ -25730,6 +29106,125 @@ "title": "IdealSizeOutput", "type": "object" }, + "IfInvocation": { + "category": "logic", + "class": "invocation", + "classification": "stable", + "description": "Selects between two optional inputs based on a boolean condition.", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "condition": { + "default": false, + "description": "The condition used to select an input", + "field_kind": "input", + "input": "any", + "orig_default": false, + "orig_required": false, + "title": "Condition", + "type": "boolean" + }, + "true_input": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Selected when the condition is true", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "True Input", + "ui_type": "AnyField" + }, + "false_input": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Selected when the condition is false", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "False Input", + "ui_type": "AnyField" + }, + "type": { + "const": "if", + "default": "if", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["logic", "conditional"], + "title": "If", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/IfInvocationOutput" + } + }, + "IfInvocationOutput": { + "class": "output", + "properties": { + "value": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "The selected value", + "field_kind": "output", + "title": "Output", + "ui_hidden": false, + "ui_type": "AnyField" + }, + "type": { + "const": "if_output", + "default": "if_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "value", "type", "type"], + "title": "IfInvocationOutput", + "type": "object" + }, "ImageBatchInvocation": { "category": "primitives", "class": "invocation", @@ -29919,6 +33414,12 @@ "description": "The destination of the queue item", "title": "Destination" }, + "user_id": { + "default": "system", + "description": "The ID of the user who created the queue item", + "title": "User Id", + "type": "string" + }, "session_id": { "description": "The ID of the session (aka graph execution state)", "title": "Session Id", @@ -29933,6 +33434,27 @@ { "$ref": "#/components/schemas/AlphaMaskToTensorInvocation" }, + { + "$ref": "#/components/schemas/AnimaDenoiseInvocation" + }, + { + "$ref": "#/components/schemas/AnimaImageToLatentsInvocation" + }, + { + "$ref": "#/components/schemas/AnimaLatentsToImageInvocation" + }, + { + "$ref": "#/components/schemas/AnimaLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/AnimaLoRALoaderInvocation" + }, + { + "$ref": "#/components/schemas/AnimaModelLoaderInvocation" + }, + { + "$ref": "#/components/schemas/AnimaTextEncoderInvocation" + }, { "$ref": "#/components/schemas/ApplyMaskTensorToImageInvocation" }, @@ -29972,6 +33494,9 @@ { "$ref": "#/components/schemas/CannyEdgeDetectionInvocation" }, + { + "$ref": "#/components/schemas/CanvasOutputInvocation" + }, { "$ref": "#/components/schemas/CanvasPasteBackInvocation" }, @@ -30044,6 +33569,9 @@ { "$ref": "#/components/schemas/DWOpenposeDetectionInvocation" }, + { + "$ref": "#/components/schemas/DecodeInvisibleWatermarkInvocation" + }, { "$ref": "#/components/schemas/DenoiseLatentsInvocation" }, @@ -30101,6 +33629,12 @@ { "$ref": "#/components/schemas/Flux2DenoiseInvocation" }, + { + "$ref": "#/components/schemas/Flux2KleinLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/Flux2KleinLoRALoaderInvocation" + }, { "$ref": "#/components/schemas/Flux2KleinModelLoaderInvocation" }, @@ -30176,6 +33710,9 @@ { "$ref": "#/components/schemas/IdealSizeInvocation" }, + { + "$ref": "#/components/schemas/IfInvocation" + }, { "$ref": "#/components/schemas/ImageBatchInvocation" }, @@ -30455,6 +33992,27 @@ { "$ref": "#/components/schemas/PromptsFromFileInvocation" }, + { + "$ref": "#/components/schemas/QwenImageDenoiseInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageImageToLatentsInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageLatentsToImageInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/QwenImageLoRALoaderInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageModelLoaderInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageTextEncoderInvocation" + }, { "$ref": "#/components/schemas/RandomFloatInvocation" }, @@ -30622,6 +34180,15 @@ "result": { "description": "The result of the invocation", "oneOf": [ + { + "$ref": "#/components/schemas/AnimaConditioningOutput" + }, + { + "$ref": "#/components/schemas/AnimaLoRALoaderOutput" + }, + { + "$ref": "#/components/schemas/AnimaModelLoaderOutput" + }, { "$ref": "#/components/schemas/BooleanCollectionOutput" }, @@ -30685,6 +34252,9 @@ { "$ref": "#/components/schemas/FloatOutput" }, + { + "$ref": "#/components/schemas/Flux2KleinLoRALoaderOutput" + }, { "$ref": "#/components/schemas/Flux2KleinModelLoaderOutput" }, @@ -30724,6 +34294,9 @@ { "$ref": "#/components/schemas/IdealSizeOutput" }, + { + "$ref": "#/components/schemas/IfInvocationOutput" + }, { "$ref": "#/components/schemas/ImageCollectionOutput" }, @@ -30808,6 +34381,15 @@ { "$ref": "#/components/schemas/PromptTemplateOutput" }, + { + "$ref": "#/components/schemas/QwenImageConditioningOutput" + }, + { + "$ref": "#/components/schemas/QwenImageLoRALoaderOutput" + }, + { + "$ref": "#/components/schemas/QwenImageModelLoaderOutput" + }, { "$ref": "#/components/schemas/SD3ConditioningOutput" }, @@ -30879,6 +34461,7 @@ "batch_id", "origin", "destination", + "user_id", "session_id", "invocation", "invocation_source_id", @@ -30936,6 +34519,12 @@ "description": "The destination of the queue item", "title": "Destination" }, + "user_id": { + "default": "system", + "description": "The ID of the user who created the queue item", + "title": "User Id", + "type": "string" + }, "session_id": { "description": "The ID of the session (aka graph execution state)", "title": "Session Id", @@ -30950,6 +34539,27 @@ { "$ref": "#/components/schemas/AlphaMaskToTensorInvocation" }, + { + "$ref": "#/components/schemas/AnimaDenoiseInvocation" + }, + { + "$ref": "#/components/schemas/AnimaImageToLatentsInvocation" + }, + { + "$ref": "#/components/schemas/AnimaLatentsToImageInvocation" + }, + { + "$ref": "#/components/schemas/AnimaLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/AnimaLoRALoaderInvocation" + }, + { + "$ref": "#/components/schemas/AnimaModelLoaderInvocation" + }, + { + "$ref": "#/components/schemas/AnimaTextEncoderInvocation" + }, { "$ref": "#/components/schemas/ApplyMaskTensorToImageInvocation" }, @@ -30989,6 +34599,9 @@ { "$ref": "#/components/schemas/CannyEdgeDetectionInvocation" }, + { + "$ref": "#/components/schemas/CanvasOutputInvocation" + }, { "$ref": "#/components/schemas/CanvasPasteBackInvocation" }, @@ -31061,6 +34674,9 @@ { "$ref": "#/components/schemas/DWOpenposeDetectionInvocation" }, + { + "$ref": "#/components/schemas/DecodeInvisibleWatermarkInvocation" + }, { "$ref": "#/components/schemas/DenoiseLatentsInvocation" }, @@ -31118,6 +34734,12 @@ { "$ref": "#/components/schemas/Flux2DenoiseInvocation" }, + { + "$ref": "#/components/schemas/Flux2KleinLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/Flux2KleinLoRALoaderInvocation" + }, { "$ref": "#/components/schemas/Flux2KleinModelLoaderInvocation" }, @@ -31193,6 +34815,9 @@ { "$ref": "#/components/schemas/IdealSizeInvocation" }, + { + "$ref": "#/components/schemas/IfInvocation" + }, { "$ref": "#/components/schemas/ImageBatchInvocation" }, @@ -31472,6 +35097,27 @@ { "$ref": "#/components/schemas/PromptsFromFileInvocation" }, + { + "$ref": "#/components/schemas/QwenImageDenoiseInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageImageToLatentsInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageLatentsToImageInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/QwenImageLoRALoaderInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageModelLoaderInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageTextEncoderInvocation" + }, { "$ref": "#/components/schemas/RandomFloatInvocation" }, @@ -31659,6 +35305,7 @@ "batch_id", "origin", "destination", + "user_id", "session_id", "invocation", "invocation_source_id", @@ -31678,6 +35325,27 @@ "alpha_mask_to_tensor": { "$ref": "#/components/schemas/MaskOutput" }, + "anima_denoise": { + "$ref": "#/components/schemas/LatentsOutput" + }, + "anima_i2l": { + "$ref": "#/components/schemas/LatentsOutput" + }, + "anima_l2i": { + "$ref": "#/components/schemas/ImageOutput" + }, + "anima_lora_collection_loader": { + "$ref": "#/components/schemas/AnimaLoRALoaderOutput" + }, + "anima_lora_loader": { + "$ref": "#/components/schemas/AnimaLoRALoaderOutput" + }, + "anima_model_loader": { + "$ref": "#/components/schemas/AnimaModelLoaderOutput" + }, + "anima_text_encoder": { + "$ref": "#/components/schemas/AnimaConditioningOutput" + }, "apply_mask_to_image": { "$ref": "#/components/schemas/ImageOutput" }, @@ -31708,6 +35376,9 @@ "canny_edge_detection": { "$ref": "#/components/schemas/ImageOutput" }, + "canvas_output": { + "$ref": "#/components/schemas/ImageOutput" + }, "canvas_paste_back": { "$ref": "#/components/schemas/ImageOutput" }, @@ -31777,6 +35448,9 @@ "cv_inpaint": { "$ref": "#/components/schemas/ImageOutput" }, + "decode_watermark": { + "$ref": "#/components/schemas/StringOutput" + }, "denoise_latents": { "$ref": "#/components/schemas/LatentsOutput" }, @@ -31834,6 +35508,12 @@ "flux2_denoise": { "$ref": "#/components/schemas/LatentsOutput" }, + "flux2_klein_lora_collection_loader": { + "$ref": "#/components/schemas/Flux2KleinLoRALoaderOutput" + }, + "flux2_klein_lora_loader": { + "$ref": "#/components/schemas/Flux2KleinLoRALoaderOutput" + }, "flux2_klein_model_loader": { "$ref": "#/components/schemas/Flux2KleinModelLoaderOutput" }, @@ -31912,6 +35592,9 @@ "ideal_size": { "$ref": "#/components/schemas/IdealSizeOutput" }, + "if": { + "$ref": "#/components/schemas/IfInvocationOutput" + }, "image": { "$ref": "#/components/schemas/ImageOutput" }, @@ -32200,6 +35883,27 @@ "prompt_template": { "$ref": "#/components/schemas/PromptTemplateOutput" }, + "qwen_image_denoise": { + "$ref": "#/components/schemas/LatentsOutput" + }, + "qwen_image_i2l": { + "$ref": "#/components/schemas/LatentsOutput" + }, + "qwen_image_l2i": { + "$ref": "#/components/schemas/ImageOutput" + }, + "qwen_image_lora_collection_loader": { + "$ref": "#/components/schemas/QwenImageLoRALoaderOutput" + }, + "qwen_image_lora_loader": { + "$ref": "#/components/schemas/QwenImageLoRALoaderOutput" + }, + "qwen_image_model_loader": { + "$ref": "#/components/schemas/QwenImageModelLoaderOutput" + }, + "qwen_image_text_encoder": { + "$ref": "#/components/schemas/QwenImageConditioningOutput" + }, "rand_float": { "$ref": "#/components/schemas/FloatOutput" }, @@ -32358,234 +36062,253 @@ } }, "required": [ - "integer_batch", - "img_ilerp", + "z_image_denoise", + "dynamic_prompt", + "boolean", + "sdxl_lora_collection_loader", + "collect", + "conditioning", + "mediapipe_face_detection", + "color_map", "image_panel_layout", - "t2i_adapter", - "metadata_to_model", + "img_paste", + "z_image_control", + "z_image_text_encoder", + "sd3_text_encoder", + "metadata_to_loras", + "img_pad_crop", + "color", + "pbr_maps", + "sd3_i2l", + "metadata_to_sdxl_model", + "cv_inpaint", + "float_to_int", + "heuristic_resize", + "latents_collection", + "scheduler", + "z_image_i2l", + "compel", + "flux_lora_collection_loader", + "flux2_klein_text_encoder", + "flux_denoise", + "l2i", + "z_image_l2i", + "crop_latents", + "image_batch", "flux2_klein_model_loader", - "bounding_box", - "integer_generator", - "flux_text_encoder", - "img_resize", - "z_image_seed_variance_enhancer", - "metadata_to_integer", - "apply_tensor_mask_to_image", - "metadata_to_bool_collection", - "vae_loader", + "metadata_to_integer_collection", + "depth_anything_depth_estimation", + "metadata_field_extractor", + "mask_edge", "float_batch", - "paste_image_into_bounding_box", - "unsharp_mask", - "invokeai_ealightness", - "flux_denoise", - "color_map", - "face_identifier", - "float_math", - "round_float", - "z_image_denoise_meta", - "flux_controlnet", - "metadata_to_float", - "cogview4_text_encoder", - "calculate_image_tiles_even_split", - "lineart_edge_detection", + "img_mul", + "metadata", + "llava_onevision_vllm", + "metadata_to_string_collection", + "random_range", + "infill_cv2", + "esrgan", + "tomask", + "anima_model_loader", + "flux_text_encoder", + "content_shuffle", + "grounding_dino", + "spandrel_image_to_image", + "sdxl_compel_prompt", + "prompt_template", + "infill_rgba", "img_noise", - "color", + "lineart_edge_detection", + "string_collection", + "anima_lora_loader", + "apply_mask_to_image", + "img_blur", + "create_denoise_mask", + "metadata_item_linked", + "img_lerp", + "qwen_image_lora_collection_loader", "i2l", - "metadata_item", - "metadata_to_string_collection", "integer", + "decode_watermark", + "infill_tile", + "lora_loader", + "metadata_to_lora_collection", + "flux2_vae_decode", + "denoise_latents_meta", + "mlsd_detection", + "integer_generator", + "flux2_vae_encode", + "rand_float", + "img_resize", + "metadata_to_float_collection", + "pair_tile_image", + "anima_l2i", "clip_skip", - "cogview4_i2l", - "image_mask_to_tensor", + "range_of_size", + "create_gradient_mask", + "paste_image_into_bounding_box", + "float", + "canny_edge_detection", + "calculate_image_tiles_min_overlap", + "cogview4_model_loader", + "img_scale", "dw_openpose_detection", - "crop_latents", - "img_blur", - "invokeai_img_blend", - "grounding_dino", - "core_metadata", - "metadata_to_t2i_adapters", - "z_image_lora_loader", - "sd3_denoise", - "lineart_anime_edge_detection", - "lblend", - "image_collection", - "metadata_to_loras", - "flux_fill", - "cogview4_l2i", + "flux_control_lora_loader", + "string_split_neg", "sdxl_refiner_model_loader", - "mediapipe_face_detection", - "infill_tile", - "alpha_mask_to_tensor", - "create_denoise_mask", - "noise", - "collect", - "calculate_image_tiles_min_overlap", - "string_join", - "div", - "invert_tensor_mask", - "crop_image_to_bounding_box", + "qwen_image_lora_loader", + "z_image_lora_loader", + "string", + "tensor_mask_to_image", "latents", - "z_image_text_encoder", - "metadata", - "flux2_klein_text_encoder", + "denoise_latents", + "z_image_denoise_meta", + "metadata_to_vae", + "metadata_to_controlnets", + "cogview4_denoise", + "flux2_klein_lora_collection_loader", + "qwen_image_model_loader", + "flux2_denoise", + "sdxl_lora_loader", + "metadata_to_bool", + "flux_lora_loader", + "rectangle_mask", + "img_ilerp", + "add", + "sd3_denoise", "img_channel_offset", + "t2i_adapter", + "string_join", + "boolean_collection", "metadata_to_scheduler", - "dynamic_prompt", - "seamless", - "spandrel_image_to_image", + "show_image", + "integer_collection", "string_join_three", - "depth_anything_depth_estimation", - "cv_inpaint", - "float_collection", - "invokeai_img_dilate_erode", - "lresize", + "alpha_mask_to_tensor", + "metadata_to_string", + "img_watermark", + "metadata_to_ip_adapters", + "flux2_klein_lora_loader", + "mul", + "blank_image", + "prompt_from_file", + "float_range", + "flux_redux", + "invokeai_img_enhance", + "freeu", + "anima_denoise", + "invokeai_img_blend", + "canvas_paste_back", + "mask_from_id", + "canvas_v2_mask_and_crop", + "crop_image_to_bounding_box", + "main_model_loader", + "canvas_output", + "ideal_size", + "merge_metadata", + "lineart_anime_edge_detection", + "mask_combine", "iterate", - "flux2_vae_decode", - "metadata_to_sdxl_model", - "apply_mask_to_image", - "img_chan", - "esrgan", - "string_split", - "lscale", - "image_generator", + "metadata_to_t2i_adapters", + "unsharp_mask", + "qwen_image_i2l", + "qwen_image_text_encoder", + "flux_controlnet", + "string_generator", + "metadata_from_image", + "z_image_seed_variance_enhancer", + "invokeai_img_composite", + "metadata_item", + "model_identifier", + "integer_math", + "lora_selector", + "string_batch", + "lresize", + "string_replace", + "invokeai_img_dilate_erode", + "color_correct", + "round_float", + "core_metadata", "img_channel_multiply", - "img_pad_crop", - "show_image", + "image_collection", + "lscale", + "conditioning_collection", + "flux_vae_encode", + "invokeai_ealightness", + "lblend", + "rand_int", + "flux_denoise_meta", + "img_hue_adjust", "range", - "calculate_image_tiles", - "get_image_mask_bounding_box", - "sdxl_model_loader", - "metadata_to_integer_collection", - "prompt_from_file", - "rectangle_mask", - "canny_edge_detection", - "heuristic_resize", - "blank_image", - "latents_collection", - "flux_model_loader", - "denoise_latents_meta", + "float_math", + "calculate_image_tiles_even_split", + "segment_anything", + "flux_ip_adapter", + "save_image", + "metadata_to_model", "z_image_model_loader", - "rand_float", - "l2i", - "img_conv", - "controlnet", - "flux_redux", - "boolean_collection", "flux_kontext", - "metadata_to_ip_adapters", - "metadata_to_string", - "cogview4_model_loader", - "z_image_l2i", - "add", - "lora_collection_loader", - "string_batch", - "img_nsfw", - "content_shuffle", - "conditioning_collection", - "string", - "metadata_to_float_collection", - "random_range", - "flux2_denoise", + "flux_fill", + "img_chan", + "seamless", + "metadata_to_sdlx_loras", + "div", + "image_generator", + "anima_text_encoder", "tile_to_properties", - "mask_edge", - "ideal_size", - "sdxl_refiner_compel_prompt", - "string_split_neg", - "sub", - "sd3_i2l", - "string_generator", - "img_mul", + "flux_kontext_image_prep", + "face_mask_detection", "invokeai_img_val_thresholds", - "pidi_edge_detection", - "flux_lora_loader", - "metadata_to_lora_collection", - "canvas_paste_back", - "tomask", - "string_collection", - "metadata_to_sdlx_loras", - "compel", - "sd3_model_loader", - "denoise_latents", - "flux_kontext_image_prep", - "mlsd_detection", - "pair_tile_image", - "flux2_vae_encode", - "flux_denoise_meta", - "float_to_int", - "img_lerp", - "flux_vae_encode", - "mul", - "z_image_denoise", + "float_collection", + "controlnet", + "face_off", + "get_image_mask_bounding_box", + "float_generator", + "ip_adapter", + "cogview4_i2l", + "tiled_multi_diffusion_denoise_latents", + "metadata_to_bool_collection", + "qwen_image_denoise", + "sdxl_model_loader", + "calculate_image_tiles", + "invert_tensor_mask", + "infill_patchmatch", + "face_identifier", "img_crop", - "invokeai_img_composite", - "prompt_template", - "sdxl_compel_prompt", - "metadata_to_bool", - "sdxl_lora_loader", - "lora_loader", - "metadata_from_image", - "string_replace", - "infill_lama", - "img_paste", - "invokeai_img_enhance", + "vae_loader", "hed_edge_detection", - "merge_metadata", - "float_range", - "range_of_size", - "integer_math", - "sd3_text_encoder", - "model_identifier", - "infill_patchmatch", - "img_watermark", - "scheduler", - "flux_control_lora_loader", - "spandrel_image_to_image_autoscale", - "image_batch", - "image", + "infill_lama", + "noise", + "anima_i2l", "flux_vae_decode", - "float", - "cogview4_denoise", - "segment_anything", - "mask_from_id", - "img_hue_adjust", - "normal_map", - "flux_lora_collection_loader", - "z_image_lora_collection_loader", - "metadata_item_linked", - "freeu", - "face_off", - "z_image_control", - "lora_selector", - "llava_onevision_vllm", - "metadata_field_extractor", - "metadata_to_vae", - "mask_combine", - "infill_rgba", - "img_scale", - "conditioning", - "boolean", - "rand_int", - "sd3_l2i", + "cogview4_text_encoder", + "sd3_model_loader", + "flux_model_loader", + "bounding_box", + "pidi_edge_detection", + "integer_batch", + "apply_tensor_mask_to_image", + "image", "expand_mask_with_fade", - "create_gradient_mask", - "color_correct", - "metadata_to_controlnets", - "float_generator", - "save_image", + "sub", + "spandrel_image_to_image_autoscale", + "normal_map", "invokeai_img_hue_adjust_plus", - "flux_ip_adapter", - "tiled_multi_diffusion_denoise_latents", - "sdxl_lora_collection_loader", - "z_image_i2l", - "infill_cv2", - "ip_adapter", - "canvas_v2_mask_and_crop", - "pbr_maps", - "integer_collection", + "metadata_to_integer", "merge_tiles_to_image", - "face_mask_detection", - "tensor_mask_to_image", - "main_model_loader" + "img_nsfw", + "string_split", + "metadata_to_float", + "lora_collection_loader", + "image_mask_to_tensor", + "qwen_image_l2i", + "z_image_lora_collection_loader", + "img_conv", + "sdxl_refiner_compel_prompt", + "cogview4_l2i", + "if", + "sd3_l2i", + "anima_lora_collection_loader" ] }, "InvocationProgressEvent": { @@ -32637,6 +36360,12 @@ "description": "The destination of the queue item", "title": "Destination" }, + "user_id": { + "default": "system", + "description": "The ID of the user who created the queue item", + "title": "User Id", + "type": "string" + }, "session_id": { "description": "The ID of the session (aka graph execution state)", "title": "Session Id", @@ -32651,6 +36380,27 @@ { "$ref": "#/components/schemas/AlphaMaskToTensorInvocation" }, + { + "$ref": "#/components/schemas/AnimaDenoiseInvocation" + }, + { + "$ref": "#/components/schemas/AnimaImageToLatentsInvocation" + }, + { + "$ref": "#/components/schemas/AnimaLatentsToImageInvocation" + }, + { + "$ref": "#/components/schemas/AnimaLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/AnimaLoRALoaderInvocation" + }, + { + "$ref": "#/components/schemas/AnimaModelLoaderInvocation" + }, + { + "$ref": "#/components/schemas/AnimaTextEncoderInvocation" + }, { "$ref": "#/components/schemas/ApplyMaskTensorToImageInvocation" }, @@ -32690,6 +36440,9 @@ { "$ref": "#/components/schemas/CannyEdgeDetectionInvocation" }, + { + "$ref": "#/components/schemas/CanvasOutputInvocation" + }, { "$ref": "#/components/schemas/CanvasPasteBackInvocation" }, @@ -32762,6 +36515,9 @@ { "$ref": "#/components/schemas/DWOpenposeDetectionInvocation" }, + { + "$ref": "#/components/schemas/DecodeInvisibleWatermarkInvocation" + }, { "$ref": "#/components/schemas/DenoiseLatentsInvocation" }, @@ -32819,6 +36575,12 @@ { "$ref": "#/components/schemas/Flux2DenoiseInvocation" }, + { + "$ref": "#/components/schemas/Flux2KleinLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/Flux2KleinLoRALoaderInvocation" + }, { "$ref": "#/components/schemas/Flux2KleinModelLoaderInvocation" }, @@ -32894,6 +36656,9 @@ { "$ref": "#/components/schemas/IdealSizeInvocation" }, + { + "$ref": "#/components/schemas/IfInvocation" + }, { "$ref": "#/components/schemas/ImageBatchInvocation" }, @@ -33173,6 +36938,27 @@ { "$ref": "#/components/schemas/PromptsFromFileInvocation" }, + { + "$ref": "#/components/schemas/QwenImageDenoiseInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageImageToLatentsInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageLatentsToImageInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/QwenImageLoRALoaderInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageModelLoaderInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageTextEncoderInvocation" + }, { "$ref": "#/components/schemas/RandomFloatInvocation" }, @@ -33377,6 +37163,7 @@ "batch_id", "origin", "destination", + "user_id", "session_id", "invocation", "invocation_source_id", @@ -33436,6 +37223,12 @@ "description": "The destination of the queue item", "title": "Destination" }, + "user_id": { + "default": "system", + "description": "The ID of the user who created the queue item", + "title": "User Id", + "type": "string" + }, "session_id": { "description": "The ID of the session (aka graph execution state)", "title": "Session Id", @@ -33450,6 +37243,27 @@ { "$ref": "#/components/schemas/AlphaMaskToTensorInvocation" }, + { + "$ref": "#/components/schemas/AnimaDenoiseInvocation" + }, + { + "$ref": "#/components/schemas/AnimaImageToLatentsInvocation" + }, + { + "$ref": "#/components/schemas/AnimaLatentsToImageInvocation" + }, + { + "$ref": "#/components/schemas/AnimaLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/AnimaLoRALoaderInvocation" + }, + { + "$ref": "#/components/schemas/AnimaModelLoaderInvocation" + }, + { + "$ref": "#/components/schemas/AnimaTextEncoderInvocation" + }, { "$ref": "#/components/schemas/ApplyMaskTensorToImageInvocation" }, @@ -33489,6 +37303,9 @@ { "$ref": "#/components/schemas/CannyEdgeDetectionInvocation" }, + { + "$ref": "#/components/schemas/CanvasOutputInvocation" + }, { "$ref": "#/components/schemas/CanvasPasteBackInvocation" }, @@ -33561,6 +37378,9 @@ { "$ref": "#/components/schemas/DWOpenposeDetectionInvocation" }, + { + "$ref": "#/components/schemas/DecodeInvisibleWatermarkInvocation" + }, { "$ref": "#/components/schemas/DenoiseLatentsInvocation" }, @@ -33618,6 +37438,12 @@ { "$ref": "#/components/schemas/Flux2DenoiseInvocation" }, + { + "$ref": "#/components/schemas/Flux2KleinLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/Flux2KleinLoRALoaderInvocation" + }, { "$ref": "#/components/schemas/Flux2KleinModelLoaderInvocation" }, @@ -33693,6 +37519,9 @@ { "$ref": "#/components/schemas/IdealSizeInvocation" }, + { + "$ref": "#/components/schemas/IfInvocation" + }, { "$ref": "#/components/schemas/ImageBatchInvocation" }, @@ -33972,6 +37801,27 @@ { "$ref": "#/components/schemas/PromptsFromFileInvocation" }, + { + "$ref": "#/components/schemas/QwenImageDenoiseInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageImageToLatentsInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageLatentsToImageInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageLoRACollectionLoader" + }, + { + "$ref": "#/components/schemas/QwenImageLoRALoaderInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageModelLoaderInvocation" + }, + { + "$ref": "#/components/schemas/QwenImageTextEncoderInvocation" + }, { "$ref": "#/components/schemas/RandomFloatInvocation" }, @@ -34144,6 +37994,7 @@ "batch_id", "origin", "destination", + "user_id", "session_id", "invocation", "invocation_source_id" @@ -34643,12 +38494,24 @@ "title": "Allow Unknown Models", "description": "Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation.", "default": true + }, + "multiuser": { + "type": "boolean", + "title": "Multiuser", + "description": "Enable multiuser support. When disabled, the application runs in single-user mode using a default system account with administrator privileges. When enabled, requires user authentication and authorization.", + "default": false + }, + "strict_password_checking": { + "type": "boolean", + "title": "Strict Password Checking", + "description": "Enforce strict password requirements. When True, passwords must contain uppercase, lowercase, and numbers. When False (default), any password is accepted but its strength (weak/moderate/strong) is reported to the user.", + "default": false } }, "additionalProperties": false, "type": "object", "title": "InvokeAIAppConfig", - "description": "Invoke's global app configuration.\n\nTypically, you won't need to interact with this class directly. Instead, use the `get_config` function from `invokeai.app.services.config` to get a singleton config object.\n\nAttributes:\n host: IP address to bind to. Use `0.0.0.0` to serve to your local network.\n port: Port to bind to.\n allow_origins: Allowed CORS origins.\n allow_credentials: Allow CORS credentials.\n allow_methods: Methods allowed for CORS.\n allow_headers: Headers allowed for CORS.\n ssl_certfile: SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https.\n ssl_keyfile: SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https.\n log_tokenization: Enable logging of parsed prompt tokens.\n patchmatch: Enable patchmatch inpaint code.\n models_dir: Path to the models directory.\n convert_cache_dir: Path to the converted models cache directory (DEPRECATED, but do not delete because it is needed for migration from previous versions).\n download_cache_dir: Path to the directory that contains dynamically downloaded models.\n legacy_conf_dir: Path to directory of legacy checkpoint config files.\n db_dir: Path to InvokeAI databases directory.\n outputs_dir: Path to directory for outputs.\n custom_nodes_dir: Path to directory for custom nodes.\n style_presets_dir: Path to directory for style presets.\n workflow_thumbnails_dir: Path to directory for workflow thumbnails.\n log_handlers: Log handler. Valid options are \"console\", \"file=\", \"syslog=path|address:host:port\", \"http=\".\n log_format: Log format. Use \"plain\" for text-only, \"color\" for colorized output, \"legacy\" for 2.3-style logging and \"syslog\" for syslog-style.
Valid values: `plain`, `color`, `syslog`, `legacy`\n log_level: Emit logging messages at this level or higher.
Valid values: `debug`, `info`, `warning`, `error`, `critical`\n log_sql: Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.\n log_level_network: Log level for network-related messages. 'info' and 'debug' are very verbose.
Valid values: `debug`, `info`, `warning`, `error`, `critical`\n use_memory_db: Use in-memory database. Useful for development.\n dev_reload: Automatically reload when Python sources are changed. Does not reload node definitions.\n profile_graphs: Enable graph profiling using `cProfile`.\n profile_prefix: An optional prefix for profile output files.\n profiles_dir: Path to profiles output directory.\n max_cache_ram_gb: The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset.\n max_cache_vram_gb: The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset.\n log_memory_usage: If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.\n model_cache_keep_alive_min: How long to keep models in cache after last use, in minutes. A value of 0 (the default) means models are kept in cache indefinitely. If no model generations occur within the timeout period, the model cache is cleared using the same logic as the 'Clear Model Cache' button.\n device_working_mem_gb: The amount of working memory to keep available on the compute device (in GB). Has no effect if running on CPU. If you are experiencing OOM errors, try increasing this value.\n enable_partial_loading: Enable partial loading of models. This enables models to run with reduced VRAM requirements (at the cost of slower speed) by streaming the model from RAM to VRAM as its used. In some edge cases, partial loading can cause models to run more slowly if they were previously being fully loaded into VRAM.\n keep_ram_copy_of_weights: Whether to keep a full RAM copy of a model's weights when the model is loaded in VRAM. Keeping a RAM copy increases average RAM usage, but speeds up model switching and LoRA patching (assuming there is sufficient RAM). Set this to False if RAM pressure is consistently high.\n ram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.\n vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.\n lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.\n pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.\n device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)\n precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
Valid values: `auto`, `float16`, `bfloat16`, `float32`\n sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.\n attention_type: Attention type.
Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`\n attention_slice_size: Slice size, valid when attention_type==\"sliced\".
Valid values: `auto`, `balanced`, `max`, `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`\n force_tiled_decode: Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty).\n pil_compress_level: The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting.\n max_queue_size: Maximum number of items in the session queue.\n clear_queue_on_startup: Empties session queue on startup.\n allow_nodes: List of nodes to allow. Omit to allow all.\n deny_nodes: List of nodes to deny. Omit to deny none.\n node_cache_size: How many cached nodes to keep in memory.\n hashing_algorithm: Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.
Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`\n remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.\n scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.\n unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.\n allow_unknown_models: Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation." + "description": "Invoke's global app configuration.\n\nTypically, you won't need to interact with this class directly. Instead, use the `get_config` function from `invokeai.app.services.config` to get a singleton config object.\n\nAttributes:\n host: IP address to bind to. Use `0.0.0.0` to serve to your local network.\n port: Port to bind to.\n allow_origins: Allowed CORS origins.\n allow_credentials: Allow CORS credentials.\n allow_methods: Methods allowed for CORS.\n allow_headers: Headers allowed for CORS.\n ssl_certfile: SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https.\n ssl_keyfile: SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https.\n log_tokenization: Enable logging of parsed prompt tokens.\n patchmatch: Enable patchmatch inpaint code.\n models_dir: Path to the models directory.\n convert_cache_dir: Path to the converted models cache directory (DEPRECATED, but do not delete because it is needed for migration from previous versions).\n download_cache_dir: Path to the directory that contains dynamically downloaded models.\n legacy_conf_dir: Path to directory of legacy checkpoint config files.\n db_dir: Path to InvokeAI databases directory.\n outputs_dir: Path to directory for outputs.\n custom_nodes_dir: Path to directory for custom nodes.\n style_presets_dir: Path to directory for style presets.\n workflow_thumbnails_dir: Path to directory for workflow thumbnails.\n log_handlers: Log handler. Valid options are \"console\", \"file=\", \"syslog=path|address:host:port\", \"http=\".\n log_format: Log format. Use \"plain\" for text-only, \"color\" for colorized output, \"legacy\" for 2.3-style logging and \"syslog\" for syslog-style.
Valid values: `plain`, `color`, `syslog`, `legacy`\n log_level: Emit logging messages at this level or higher.
Valid values: `debug`, `info`, `warning`, `error`, `critical`\n log_sql: Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.\n log_level_network: Log level for network-related messages. 'info' and 'debug' are very verbose.
Valid values: `debug`, `info`, `warning`, `error`, `critical`\n use_memory_db: Use in-memory database. Useful for development.\n dev_reload: Automatically reload when Python sources are changed. Does not reload node definitions.\n profile_graphs: Enable graph profiling using `cProfile`.\n profile_prefix: An optional prefix for profile output files.\n profiles_dir: Path to profiles output directory.\n max_cache_ram_gb: The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset.\n max_cache_vram_gb: The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset.\n log_memory_usage: If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.\n model_cache_keep_alive_min: How long to keep models in cache after last use, in minutes. A value of 0 (the default) means models are kept in cache indefinitely. If no model generations occur within the timeout period, the model cache is cleared using the same logic as the 'Clear Model Cache' button.\n device_working_mem_gb: The amount of working memory to keep available on the compute device (in GB). Has no effect if running on CPU. If you are experiencing OOM errors, try increasing this value.\n enable_partial_loading: Enable partial loading of models. This enables models to run with reduced VRAM requirements (at the cost of slower speed) by streaming the model from RAM to VRAM as its used. In some edge cases, partial loading can cause models to run more slowly if they were previously being fully loaded into VRAM.\n keep_ram_copy_of_weights: Whether to keep a full RAM copy of a model's weights when the model is loaded in VRAM. Keeping a RAM copy increases average RAM usage, but speeds up model switching and LoRA patching (assuming there is sufficient RAM). Set this to False if RAM pressure is consistently high.\n ram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.\n vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.\n lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.\n pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.\n device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)\n precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
Valid values: `auto`, `float16`, `bfloat16`, `float32`\n sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.\n attention_type: Attention type.
Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`\n attention_slice_size: Slice size, valid when attention_type==\"sliced\".
Valid values: `auto`, `balanced`, `max`, `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`\n force_tiled_decode: Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty).\n pil_compress_level: The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting.\n max_queue_size: Maximum number of items in the session queue.\n clear_queue_on_startup: Empties session queue on startup.\n allow_nodes: List of nodes to allow. Omit to allow all.\n deny_nodes: List of nodes to deny. Omit to deny none.\n node_cache_size: How many cached nodes to keep in memory.\n hashing_algorithm: Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.
Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`\n remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.\n scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.\n unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.\n allow_unknown_models: Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation.\n multiuser: Enable multiuser support. When disabled, the application runs in single-user mode using a default system account with administrator privileges. When enabled, requires user authentication and authorization.\n strict_password_checking: Enforce strict password requirements. When True, passwords must contain uppercase, lowercase, and numbers. When False (default), any password is accepted but its strength (weak/moderate/strong) is reported to the user." }, "InvokeAIAppConfigWithSetFields": { "properties": { @@ -37091,6 +40954,33 @@ "title": "LoRAMetadataField", "type": "object" }, + "LoRARecallParameter": { + "properties": { + "model_name": { + "type": "string", + "title": "Model Name", + "description": "The name of the LoRA model" + }, + "weight": { + "type": "number", + "maximum": 10.0, + "minimum": -10.0, + "title": "Weight", + "description": "The weight for the LoRA", + "default": 0.75 + }, + "is_enabled": { + "type": "boolean", + "title": "Is Enabled", + "description": "Whether the LoRA is enabled", + "default": true + } + }, + "type": "object", + "required": ["model_name"], + "title": "LoRARecallParameter", + "description": "LoRA configuration for recall" + }, "LoRASelectorInvocation": { "category": "model", "class": "invocation", @@ -37327,7 +41217,7 @@ ], "title": "LoRA_Diffusers_FLUX_Config" }, - "LoRA_Diffusers_SD1_Config": { + "LoRA_Diffusers_Flux2_Config": { "properties": { "key": { "type": "string", @@ -37440,9 +41330,19 @@ }, "base": { "type": "string", - "const": "sd-1", + "const": "flux2", "title": "Base", - "default": "sd-1" + "default": "flux2" + }, + "variant": { + "anyOf": [ + { + "$ref": "#/components/schemas/Flux2VariantType" + }, + { + "type": "null" + } + ] } }, "type": "object", @@ -37461,11 +41361,13 @@ "trigger_phrases", "default_settings", "format", - "base" + "base", + "variant" ], - "title": "LoRA_Diffusers_SD1_Config" + "title": "LoRA_Diffusers_Flux2_Config", + "description": "Model config for FLUX.2 (Klein) LoRA models in Diffusers format." }, - "LoRA_Diffusers_SD2_Config": { + "LoRA_Diffusers_SD1_Config": { "properties": { "key": { "type": "string", @@ -37578,9 +41480,9 @@ }, "base": { "type": "string", - "const": "sd-2", + "const": "sd-1", "title": "Base", - "default": "sd-2" + "default": "sd-1" } }, "type": "object", @@ -37601,9 +41503,9 @@ "format", "base" ], - "title": "LoRA_Diffusers_SD2_Config" + "title": "LoRA_Diffusers_SD1_Config" }, - "LoRA_Diffusers_SDXL_Config": { + "LoRA_Diffusers_SD2_Config": { "properties": { "key": { "type": "string", @@ -37716,9 +41618,9 @@ }, "base": { "type": "string", - "const": "sdxl", + "const": "sd-2", "title": "Base", - "default": "sdxl" + "default": "sd-2" } }, "type": "object", @@ -37739,9 +41641,9 @@ "format", "base" ], - "title": "LoRA_Diffusers_SDXL_Config" + "title": "LoRA_Diffusers_SD2_Config" }, - "LoRA_Diffusers_ZImage_Config": { + "LoRA_Diffusers_SDXL_Config": { "properties": { "key": { "type": "string", @@ -37854,9 +41756,9 @@ }, "base": { "type": "string", - "const": "z-image", + "const": "sdxl", "title": "Base", - "default": "z-image" + "default": "sdxl" } }, "type": "object", @@ -37877,10 +41779,9 @@ "format", "base" ], - "title": "LoRA_Diffusers_ZImage_Config", - "description": "Model config for Z-Image LoRA models in Diffusers format." + "title": "LoRA_Diffusers_SDXL_Config" }, - "LoRA_LyCORIS_FLUX_Config": { + "LoRA_Diffusers_ZImage_Config": { "properties": { "key": { "type": "string", @@ -37987,153 +41888,25 @@ }, "format": { "type": "string", - "const": "lycoris", + "const": "diffusers", "title": "Format", - "default": "lycoris" + "default": "diffusers" }, "base": { "type": "string", - "const": "flux", + "const": "z-image", "title": "Base", - "default": "flux" - } - }, - "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "type", - "trigger_phrases", - "default_settings", - "format", - "base" - ], - "title": "LoRA_LyCORIS_FLUX_Config" - }, - "LoRA_LyCORIS_SD1_Config": { - "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." - }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." - }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." - }, - "description": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Description", - "description": "Model description" - }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." - }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" - }, - "source_api_response": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." - }, - "cover_image": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Cover Image", - "description": "Url for image to preview model" - }, - "type": { - "type": "string", - "const": "lora", - "title": "Type", - "default": "lora" - }, - "trigger_phrases": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array", - "uniqueItems": true - }, - { - "type": "null" - } - ], - "title": "Trigger Phrases", - "description": "Set of trigger phrases for this model" + "default": "z-image" }, - "default_settings": { + "variant": { "anyOf": [ { - "$ref": "#/components/schemas/LoraModelDefaultSettings" + "$ref": "#/components/schemas/ZImageVariantType" }, { "type": "null" } - ], - "description": "Default settings for this model" - }, - "format": { - "type": "string", - "const": "lycoris", - "title": "Format", - "default": "lycoris" - }, - "base": { - "type": "string", - "const": "sd-1", - "title": "Base", - "default": "sd-1" + ] } }, "type": "object", @@ -38152,11 +41925,13 @@ "trigger_phrases", "default_settings", "format", - "base" + "base", + "variant" ], - "title": "LoRA_LyCORIS_SD1_Config" + "title": "LoRA_Diffusers_ZImage_Config", + "description": "Model config for Z-Image LoRA models in Diffusers format." }, - "LoRA_LyCORIS_SD2_Config": { + "LoRA_LyCORIS_Anima_Config": { "properties": { "key": { "type": "string", @@ -38269,9 +42044,9 @@ }, "base": { "type": "string", - "const": "sd-2", + "const": "anima", "title": "Base", - "default": "sd-2" + "default": "anima" } }, "type": "object", @@ -38292,9 +42067,10 @@ "format", "base" ], - "title": "LoRA_LyCORIS_SD2_Config" + "title": "LoRA_LyCORIS_Anima_Config", + "description": "Model config for Anima LoRA models in LyCORIS format." }, - "LoRA_LyCORIS_SDXL_Config": { + "LoRA_LyCORIS_FLUX_Config": { "properties": { "key": { "type": "string", @@ -38407,9 +42183,9 @@ }, "base": { "type": "string", - "const": "sdxl", + "const": "flux", "title": "Base", - "default": "sdxl" + "default": "flux" } }, "type": "object", @@ -38430,9 +42206,9 @@ "format", "base" ], - "title": "LoRA_LyCORIS_SDXL_Config" + "title": "LoRA_LyCORIS_FLUX_Config" }, - "LoRA_LyCORIS_ZImage_Config": { + "LoRA_LyCORIS_Flux2_Config": { "properties": { "key": { "type": "string", @@ -38545,148 +42321,19 @@ }, "base": { "type": "string", - "const": "z-image", + "const": "flux2", "title": "Base", - "default": "z-image" - } - }, - "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "type", - "trigger_phrases", - "default_settings", - "format", - "base" - ], - "title": "LoRA_LyCORIS_ZImage_Config", - "description": "Model config for Z-Image LoRA models in LyCORIS format." - }, - "LoRA_OMI_FLUX_Config": { - "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." - }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." - }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." - }, - "description": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Description", - "description": "Model description" - }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." - }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" - }, - "source_api_response": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." - }, - "cover_image": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Cover Image", - "description": "Url for image to preview model" - }, - "type": { - "type": "string", - "const": "lora", - "title": "Type", - "default": "lora" - }, - "trigger_phrases": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array", - "uniqueItems": true - }, - { - "type": "null" - } - ], - "title": "Trigger Phrases", - "description": "Set of trigger phrases for this model" + "default": "flux2" }, - "default_settings": { + "variant": { "anyOf": [ { - "$ref": "#/components/schemas/LoraModelDefaultSettings" + "$ref": "#/components/schemas/Flux2VariantType" }, { "type": "null" } - ], - "description": "Default settings for this model" - }, - "format": { - "type": "string", - "const": "omi", - "title": "Format", - "default": "omi" - }, - "base": { - "type": "string", - "const": "flux", - "title": "Base", - "default": "flux" + ] } }, "type": "object", @@ -38705,11 +42352,13 @@ "trigger_phrases", "default_settings", "format", - "base" + "base", + "variant" ], - "title": "LoRA_OMI_FLUX_Config" + "title": "LoRA_LyCORIS_Flux2_Config", + "description": "Model config for FLUX.2 (Klein) LoRA models in LyCORIS format." }, - "LoRA_OMI_SDXL_Config": { + "LoRA_LyCORIS_QwenImage_Config": { "properties": { "key": { "type": "string", @@ -38816,15 +42465,15 @@ }, "format": { "type": "string", - "const": "omi", + "const": "lycoris", "title": "Format", - "default": "omi" + "default": "lycoris" }, "base": { "type": "string", - "const": "sdxl", + "const": "qwen-image", "title": "Base", - "default": "sdxl" + "default": "qwen-image" } }, "type": "object", @@ -38845,297 +42494,313 @@ "format", "base" ], - "title": "LoRA_OMI_SDXL_Config" + "title": "LoRA_LyCORIS_QwenImage_Config", + "description": "Model config for Qwen Image Edit LoRA models in LyCORIS format." }, - "LocalModelSource": { + "LoRA_LyCORIS_SD1_Config": { "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { "type": "string" }, { - "type": "string", - "format": "path" + "type": "null" } ], - "title": "Path" + "title": "Description", + "description": "Model description" }, - "inplace": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "type": "boolean" + "type": "string" }, { "type": "null" } ], - "title": "Inplace", - "default": false + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "type": { - "type": "string", - "const": "local", - "title": "Type", - "default": "local" - } - }, - "type": "object", - "required": ["path"], - "title": "LocalModelSource", - "description": "A local file or directory path." - }, - "LogLevel": { - "type": "integer", - "enum": [0, 10, 20, 30, 40, 50], - "title": "LogLevel" - }, - "LoraModelDefaultSettings": { - "properties": { - "weight": { + "cover_image": { "anyOf": [ { - "type": "number", - "maximum": 2.0, - "minimum": -1.0 + "type": "string" }, { "type": "null" } ], - "title": "Weight", - "description": "Default weight for this model" - } - }, - "additionalProperties": false, - "type": "object", - "title": "LoraModelDefaultSettings" - }, - "MDControlListOutput": { - "class": "output", - "properties": { - "control_list": { + "title": "Cover Image", + "description": "Url for image to preview model" + }, + "type": { + "type": "string", + "const": "lora", + "title": "Type", + "default": "lora" + }, + "trigger_phrases": { "anyOf": [ - { - "$ref": "#/components/schemas/ControlField" - }, { "items": { - "$ref": "#/components/schemas/ControlField" + "type": "string" }, - "type": "array" + "type": "array", + "uniqueItems": true }, { "type": "null" } ], - "description": "ControlNet(s) to apply", - "field_kind": "output", - "title": "ControlNet-List", - "ui_hidden": false + "title": "Trigger Phrases", + "description": "Set of trigger phrases for this model" }, - "type": { - "const": "md_control_list_output", - "default": "md_control_list_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "control_list", "type", "type"], - "title": "MDControlListOutput", - "type": "object" - }, - "MDIPAdapterListOutput": { - "class": "output", - "properties": { - "ip_adapter_list": { + "default_settings": { "anyOf": [ { - "$ref": "#/components/schemas/IPAdapterField" - }, - { - "items": { - "$ref": "#/components/schemas/IPAdapterField" - }, - "type": "array" + "$ref": "#/components/schemas/LoraModelDefaultSettings" }, { "type": "null" } ], - "description": "IP-Adapter to apply", - "field_kind": "output", - "title": "IP-Adapter-List", - "ui_hidden": false + "description": "Default settings for this model" }, - "type": { - "const": "md_ip_adapter_list_output", - "default": "md_ip_adapter_list_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "format": { + "type": "string", + "const": "lycoris", + "title": "Format", + "default": "lycoris" + }, + "base": { + "type": "string", + "const": "sd-1", + "title": "Base", + "default": "sd-1" } }, - "required": ["output_meta", "ip_adapter_list", "type", "type"], - "title": "MDIPAdapterListOutput", - "type": "object" + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "type", + "trigger_phrases", + "default_settings", + "format", + "base" + ], + "title": "LoRA_LyCORIS_SD1_Config" }, - "MDT2IAdapterListOutput": { - "class": "output", + "LoRA_LyCORIS_SD2_Config": { "properties": { - "t2i_adapter_list": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "$ref": "#/components/schemas/T2IAdapterField" - }, - { - "items": { - "$ref": "#/components/schemas/T2IAdapterField" - }, - "type": "array" + "type": "string" }, { "type": "null" } ], - "description": "T2I-Adapter(s) to apply", - "field_kind": "output", - "title": "T2I Adapter-List", - "ui_hidden": false + "title": "Description", + "description": "Model description" }, - "type": { - "const": "md_ip_adapters_output", - "default": "md_ip_adapters_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "t2i_adapter_list", "type", "type"], - "title": "MDT2IAdapterListOutput", - "type": "object" - }, - "MLSDDetectionInvocation": { - "category": "controlnet", - "class": "invocation", - "classification": "stable", - "description": "Generates an line segment map using MLSD.", - "node_pack": "invokeai", - "properties": { - "board": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "metadata": { + "cover_image": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "title": "Cover Image", + "description": "Url for image to preview model" }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "type": { + "type": "string", + "const": "lora", + "title": "Type", + "default": "lora" }, - "image": { + "trigger_phrases": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true }, { "type": "null" } ], - "default": null, - "description": "The image to process", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Trigger Phrases", + "description": "Set of trigger phrases for this model" }, - "score_threshold": { - "default": 0.1, - "description": "The threshold used to score points when determining line segments", - "field_kind": "input", - "input": "any", - "minimum": 0, - "orig_default": 0.1, - "orig_required": false, - "title": "Score Threshold", - "type": "number" + "default_settings": { + "anyOf": [ + { + "$ref": "#/components/schemas/LoraModelDefaultSettings" + }, + { + "type": "null" + } + ], + "description": "Default settings for this model" }, - "distance_threshold": { - "default": 20.0, - "description": "Threshold for including a line segment - lines shorter than this distance will be discarded", - "field_kind": "input", - "input": "any", - "minimum": 0, - "orig_default": 20.0, - "orig_required": false, - "title": "Distance Threshold", - "type": "number" + "format": { + "type": "string", + "const": "lycoris", + "title": "Format", + "default": "lycoris" }, - "type": { - "const": "mlsd_detection", - "default": "mlsd_detection", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "base": { + "type": "string", + "const": "sd-2", + "title": "Base", + "default": "sd-2" } }, - "required": ["type", "id"], - "tags": ["controlnet", "mlsd", "edge"], - "title": "MLSD Detection", "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "type", + "trigger_phrases", + "default_settings", + "format", + "base" + ], + "title": "LoRA_LyCORIS_SD2_Config" }, - "MainModelDefaultSettings": { + "LoRA_LyCORIS_SDXL_Config": { "properties": { - "vae": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { "type": "string" @@ -39144,230 +42809,109 @@ "type": "null" } ], - "title": "Vae", - "description": "Default VAE for this model (model key)" + "title": "Description", + "description": "Model description" }, - "vae_precision": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "type": "string", - "enum": ["fp16", "fp32"] + "type": "string" }, { "type": "null" } ], - "title": "Vae Precision", - "description": "Default VAE precision for this model" + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "scheduler": { + "cover_image": { "anyOf": [ { - "type": "string", - "enum": [ - "ddim", - "ddpm", - "deis", - "deis_k", - "lms", - "lms_k", - "pndm", - "heun", - "heun_k", - "euler", - "euler_k", - "euler_a", - "kdpm_2", - "kdpm_2_k", - "kdpm_2_a", - "kdpm_2_a_k", - "dpmpp_2s", - "dpmpp_2s_k", - "dpmpp_2m", - "dpmpp_2m_k", - "dpmpp_2m_sde", - "dpmpp_2m_sde_k", - "dpmpp_3m", - "dpmpp_3m_k", - "dpmpp_sde", - "dpmpp_sde_k", - "unipc", - "unipc_k", - "lcm", - "tcd" - ] + "type": "string" }, { "type": "null" } ], - "title": "Scheduler", - "description": "Default scheduler for this model" + "title": "Cover Image", + "description": "Url for image to preview model" }, - "steps": { + "type": { + "type": "string", + "const": "lora", + "title": "Type", + "default": "lora" + }, + "trigger_phrases": { "anyOf": [ { - "type": "integer", - "exclusiveMinimum": 0.0 + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true }, { "type": "null" } ], - "title": "Steps", - "description": "Default number of steps for this model" + "title": "Trigger Phrases", + "description": "Set of trigger phrases for this model" }, - "cfg_scale": { + "default_settings": { "anyOf": [ { - "type": "number", - "minimum": 1.0 + "$ref": "#/components/schemas/LoraModelDefaultSettings" }, { "type": "null" } ], - "title": "Cfg Scale", - "description": "Default CFG Scale for this model" - }, - "cfg_rescale_multiplier": { - "anyOf": [ - { - "type": "number", - "exclusiveMaximum": 1.0, - "minimum": 0.0 - }, - { - "type": "null" - } - ], - "title": "Cfg Rescale Multiplier", - "description": "Default CFG Rescale Multiplier for this model" - }, - "width": { - "anyOf": [ - { - "type": "integer", - "multipleOf": 8.0, - "minimum": 64.0 - }, - { - "type": "null" - } - ], - "title": "Width", - "description": "Default width for this model" - }, - "height": { - "anyOf": [ - { - "type": "integer", - "multipleOf": 8.0, - "minimum": 64.0 - }, - { - "type": "null" - } - ], - "title": "Height", - "description": "Default height for this model" - }, - "guidance": { - "anyOf": [ - { - "type": "number", - "minimum": 1.0 - }, - { - "type": "null" - } - ], - "title": "Guidance", - "description": "Default Guidance for this model" - }, - "cpu_only": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "title": "Cpu Only", - "description": "Whether this model should run on CPU only" - } - }, - "additionalProperties": false, - "type": "object", - "title": "MainModelDefaultSettings" - }, - "MainModelLoaderInvocation": { - "category": "model", - "class": "invocation", - "classification": "stable", - "description": "Loads a main model, outputting its submodels.", - "node_pack": "invokeai", - "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" - }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "description": "Default settings for this model" }, - "model": { - "anyOf": [ - { - "$ref": "#/components/schemas/ModelIdentifierField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Main model (UNet, VAE, CLIP) to load", - "field_kind": "input", - "input": "any", - "orig_required": true, - "ui_model_base": ["sd-1", "sd-2"], - "ui_model_type": ["main"] + "format": { + "type": "string", + "const": "lycoris", + "title": "Format", + "default": "lycoris" }, - "type": { - "const": "main_model_loader", - "default": "main_model_loader", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "base": { + "type": "string", + "const": "sdxl", + "title": "Base", + "default": "sdxl" } }, - "required": ["type", "id"], - "tags": ["model"], - "title": "Main Model - SD1.5, SD2", "type": "object", - "version": "1.0.4", - "output": { - "$ref": "#/components/schemas/ModelLoaderOutput" - } + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "type", + "trigger_phrases", + "default_settings", + "format", + "base" + ], + "title": "LoRA_LyCORIS_SDXL_Config" }, - "Main_BnBNF4_FLUX_Config": { + "LoRA_LyCORIS_ZImage_Config": { "properties": { "key": { "type": "string", @@ -39441,9 +42985,9 @@ }, "type": { "type": "string", - "const": "main", + "const": "lora", "title": "Type", - "default": "main" + "default": "lora" }, "trigger_phrases": { "anyOf": [ @@ -39464,7 +43008,7 @@ "default_settings": { "anyOf": [ { - "$ref": "#/components/schemas/MainModelDefaultSettings" + "$ref": "#/components/schemas/LoraModelDefaultSettings" }, { "type": "null" @@ -39472,32 +43016,27 @@ ], "description": "Default settings for this model" }, - "config_path": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Config Path", - "description": "Path to the config for this model, if any." + "format": { + "type": "string", + "const": "lycoris", + "title": "Format", + "default": "lycoris" }, "base": { "type": "string", - "const": "flux", + "const": "z-image", "title": "Base", - "default": "flux" - }, - "format": { - "type": "string", - "const": "bnb_quantized_nf4b", - "title": "Format", - "default": "bnb_quantized_nf4b" + "default": "z-image" }, "variant": { - "$ref": "#/components/schemas/FluxVariantType" + "anyOf": [ + { + "$ref": "#/components/schemas/ZImageVariantType" + }, + { + "type": "null" + } + ] } }, "type": "object", @@ -39515,15 +43054,14 @@ "type", "trigger_phrases", "default_settings", - "config_path", - "base", "format", + "base", "variant" ], - "title": "Main_BnBNF4_FLUX_Config", - "description": "Model config for main checkpoint models." + "title": "LoRA_LyCORIS_ZImage_Config", + "description": "Model config for Z-Image LoRA models in LyCORIS format." }, - "Main_Checkpoint_FLUX_Config": { + "LoRA_OMI_FLUX_Config": { "properties": { "key": { "type": "string", @@ -39597,9 +43135,9 @@ }, "type": { "type": "string", - "const": "main", + "const": "lora", "title": "Type", - "default": "main" + "default": "lora" }, "trigger_phrases": { "anyOf": [ @@ -39620,7 +43158,7 @@ "default_settings": { "anyOf": [ { - "$ref": "#/components/schemas/MainModelDefaultSettings" + "$ref": "#/components/schemas/LoraModelDefaultSettings" }, { "type": "null" @@ -39628,32 +43166,17 @@ ], "description": "Default settings for this model" }, - "config_path": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Config Path", - "description": "Path to the config for this model, if any." - }, "format": { "type": "string", - "const": "checkpoint", + "const": "omi", "title": "Format", - "default": "checkpoint" + "default": "omi" }, "base": { "type": "string", "const": "flux", "title": "Base", "default": "flux" - }, - "variant": { - "$ref": "#/components/schemas/FluxVariantType" } }, "type": "object", @@ -39671,15 +43194,12 @@ "type", "trigger_phrases", "default_settings", - "config_path", "format", - "base", - "variant" + "base" ], - "title": "Main_Checkpoint_FLUX_Config", - "description": "Model config for main checkpoint models." + "title": "LoRA_OMI_FLUX_Config" }, - "Main_Checkpoint_Flux2_Config": { + "LoRA_OMI_SDXL_Config": { "properties": { "key": { "type": "string", @@ -39753,9 +43273,9 @@ }, "type": { "type": "string", - "const": "main", + "const": "lora", "title": "Type", - "default": "main" + "default": "lora" }, "trigger_phrases": { "anyOf": [ @@ -39776,7 +43296,7 @@ "default_settings": { "anyOf": [ { - "$ref": "#/components/schemas/MainModelDefaultSettings" + "$ref": "#/components/schemas/LoraModelDefaultSettings" }, { "type": "null" @@ -39784,32 +43304,17 @@ ], "description": "Default settings for this model" }, - "config_path": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Config Path", - "description": "Path to the config for this model, if any." - }, "format": { "type": "string", - "const": "checkpoint", + "const": "omi", "title": "Format", - "default": "checkpoint" + "default": "omi" }, "base": { "type": "string", - "const": "flux2", + "const": "sdxl", "title": "Base", - "default": "flux2" - }, - "variant": { - "$ref": "#/components/schemas/Flux2VariantType" + "default": "sdxl" } }, "type": "object", @@ -39827,174 +43332,591 @@ "type", "trigger_phrases", "default_settings", - "config_path", "format", - "base", - "variant" + "base" ], - "title": "Main_Checkpoint_Flux2_Config", - "description": "Model config for FLUX.2 checkpoint models (e.g. Klein)." + "title": "LoRA_OMI_SDXL_Config" }, - "Main_Checkpoint_SD1_Config": { + "LocalModelSource": { "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." - }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." - }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." - }, - "description": { "anyOf": [ { "type": "string" }, + { + "type": "string", + "format": "path" + } + ], + "title": "Path" + }, + "inplace": { + "anyOf": [ + { + "type": "boolean" + }, { "type": "null" } ], - "title": "Description", - "description": "Model description" + "title": "Inplace", + "default": false }, - "source": { + "type": { "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." + "const": "local", + "title": "Type", + "default": "local" + } + }, + "type": "object", + "required": ["path"], + "title": "LocalModelSource", + "description": "A local file or directory path." + }, + "LogLevel": { + "type": "integer", + "enum": [0, 10, 20, 30, 40, 50], + "title": "LogLevel" + }, + "LoginRequest": { + "properties": { + "email": { + "type": "string", + "title": "Email", + "description": "User email address" }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" + "password": { + "type": "string", + "title": "Password", + "description": "User password" }, - "source_api_response": { + "remember_me": { + "type": "boolean", + "title": "Remember Me", + "description": "Whether to extend session duration", + "default": false + } + }, + "type": "object", + "required": ["email", "password"], + "title": "LoginRequest", + "description": "Request body for user login." + }, + "LoginResponse": { + "properties": { + "token": { + "type": "string", + "title": "Token", + "description": "JWT access token" + }, + "user": { + "$ref": "#/components/schemas/UserDTO", + "description": "User information" + }, + "expires_in": { + "type": "integer", + "title": "Expires In", + "description": "Token expiration time in seconds" + } + }, + "type": "object", + "required": ["token", "user", "expires_in"], + "title": "LoginResponse", + "description": "Response from successful login." + }, + "LogoutResponse": { + "properties": { + "success": { + "type": "boolean", + "title": "Success", + "description": "Whether logout was successful" + } + }, + "type": "object", + "required": ["success"], + "title": "LogoutResponse", + "description": "Response from logout." + }, + "LoraModelDefaultSettings": { + "properties": { + "weight": { "anyOf": [ { - "type": "string" + "type": "number", + "maximum": 2.0, + "minimum": -1.0 }, { "type": "null" } ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." - }, - "cover_image": { + "title": "Weight", + "description": "Default weight for this model" + } + }, + "additionalProperties": false, + "type": "object", + "title": "LoraModelDefaultSettings" + }, + "MDControlListOutput": { + "class": "output", + "properties": { + "control_list": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ControlField" + }, + { + "items": { + "$ref": "#/components/schemas/ControlField" + }, + "type": "array" }, { "type": "null" } ], - "title": "Cover Image", - "description": "Url for image to preview model" + "description": "ControlNet(s) to apply", + "field_kind": "output", + "title": "ControlNet-List", + "ui_hidden": false }, "type": { - "type": "string", - "const": "main", - "title": "Type", - "default": "main" - }, - "trigger_phrases": { + "const": "md_control_list_output", + "default": "md_control_list_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "control_list", "type", "type"], + "title": "MDControlListOutput", + "type": "object" + }, + "MDIPAdapterListOutput": { + "class": "output", + "properties": { + "ip_adapter_list": { "anyOf": [ + { + "$ref": "#/components/schemas/IPAdapterField" + }, { "items": { - "type": "string" + "$ref": "#/components/schemas/IPAdapterField" }, - "type": "array", - "uniqueItems": true + "type": "array" }, { "type": "null" } ], - "title": "Trigger Phrases", - "description": "Set of trigger phrases for this model" + "description": "IP-Adapter to apply", + "field_kind": "output", + "title": "IP-Adapter-List", + "ui_hidden": false }, - "default_settings": { + "type": { + "const": "md_ip_adapter_list_output", + "default": "md_ip_adapter_list_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "ip_adapter_list", "type", "type"], + "title": "MDIPAdapterListOutput", + "type": "object" + }, + "MDT2IAdapterListOutput": { + "class": "output", + "properties": { + "t2i_adapter_list": { "anyOf": [ { - "$ref": "#/components/schemas/MainModelDefaultSettings" + "$ref": "#/components/schemas/T2IAdapterField" + }, + { + "items": { + "$ref": "#/components/schemas/T2IAdapterField" + }, + "type": "array" }, { "type": "null" } ], - "description": "Default settings for this model" + "description": "T2I-Adapter(s) to apply", + "field_kind": "output", + "title": "T2I Adapter-List", + "ui_hidden": false }, - "config_path": { + "type": { + "const": "md_ip_adapters_output", + "default": "md_ip_adapters_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "t2i_adapter_list", "type", "type"], + "title": "MDT2IAdapterListOutput", + "type": "object" + }, + "MLSDDetectionInvocation": { + "category": "controlnet", + "class": "invocation", + "classification": "stable", + "description": "Generates an line segment map using MLSD.", + "node_pack": "invokeai", + "properties": { + "board": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } ], - "title": "Config Path", - "description": "Path to the config for this model, if any." + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "format": { - "type": "string", - "const": "checkpoint", - "title": "Format", - "default": "checkpoint" + "metadata": { + "anyOf": [ + { + "$ref": "#/components/schemas/MetadataField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "prediction_type": { - "$ref": "#/components/schemas/SchedulerPredictionType" + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "variant": { - "$ref": "#/components/schemas/ModelVariantType" + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "base": { - "type": "string", - "const": "sd-1", - "title": "Base", - "default": "sd-1" + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "image": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The image to process", + "field_kind": "input", + "input": "any", + "orig_required": true + }, + "score_threshold": { + "default": 0.1, + "description": "The threshold used to score points when determining line segments", + "field_kind": "input", + "input": "any", + "minimum": 0, + "orig_default": 0.1, + "orig_required": false, + "title": "Score Threshold", + "type": "number" + }, + "distance_threshold": { + "default": 20.0, + "description": "Threshold for including a line segment - lines shorter than this distance will be discarded", + "field_kind": "input", + "input": "any", + "minimum": 0, + "orig_default": 20.0, + "orig_required": false, + "title": "Distance Threshold", + "type": "number" + }, + "type": { + "const": "mlsd_detection", + "default": "mlsd_detection", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["type", "id"], + "tags": ["controlnet", "mlsd", "edge"], + "title": "MLSD Detection", "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "type", - "trigger_phrases", - "default_settings", - "config_path", - "format", - "prediction_type", - "variant", - "base" - ], - "title": "Main_Checkpoint_SD1_Config" + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } }, - "Main_Checkpoint_SD2_Config": { + "MainModelDefaultSettings": { + "properties": { + "vae": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Vae", + "description": "Default VAE for this model (model key)" + }, + "vae_precision": { + "anyOf": [ + { + "type": "string", + "enum": ["fp16", "fp32"] + }, + { + "type": "null" + } + ], + "title": "Vae Precision", + "description": "Default VAE precision for this model" + }, + "scheduler": { + "anyOf": [ + { + "type": "string", + "enum": [ + "ddim", + "ddpm", + "deis", + "deis_k", + "lms", + "lms_k", + "pndm", + "heun", + "heun_k", + "euler", + "euler_k", + "euler_a", + "kdpm_2", + "kdpm_2_k", + "kdpm_2_a", + "kdpm_2_a_k", + "dpmpp_2s", + "dpmpp_2s_k", + "dpmpp_2m", + "dpmpp_2m_k", + "dpmpp_2m_sde", + "dpmpp_2m_sde_k", + "dpmpp_3m", + "dpmpp_3m_k", + "dpmpp_sde", + "dpmpp_sde_k", + "unipc", + "unipc_k", + "lcm", + "tcd" + ] + }, + { + "type": "null" + } + ], + "title": "Scheduler", + "description": "Default scheduler for this model" + }, + "steps": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Steps", + "description": "Default number of steps for this model" + }, + "cfg_scale": { + "anyOf": [ + { + "type": "number", + "minimum": 1.0 + }, + { + "type": "null" + } + ], + "title": "Cfg Scale", + "description": "Default CFG Scale for this model" + }, + "cfg_rescale_multiplier": { + "anyOf": [ + { + "type": "number", + "exclusiveMaximum": 1.0, + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Cfg Rescale Multiplier", + "description": "Default CFG Rescale Multiplier for this model" + }, + "width": { + "anyOf": [ + { + "type": "integer", + "multipleOf": 8.0, + "minimum": 64.0 + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "Default width for this model" + }, + "height": { + "anyOf": [ + { + "type": "integer", + "multipleOf": 8.0, + "minimum": 64.0 + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "Default height for this model" + }, + "guidance": { + "anyOf": [ + { + "type": "number", + "minimum": 1.0 + }, + { + "type": "null" + } + ], + "title": "Guidance", + "description": "Default Guidance for this model" + }, + "cpu_only": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Cpu Only", + "description": "Whether this model should run on CPU only" + } + }, + "additionalProperties": false, + "type": "object", + "title": "MainModelDefaultSettings" + }, + "MainModelLoaderInvocation": { + "category": "model", + "class": "invocation", + "classification": "stable", + "description": "Loads a main model, outputting its submodels.", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "model": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelIdentifierField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Main model (UNet, VAE, CLIP) to load", + "field_kind": "input", + "input": "any", + "orig_required": true, + "ui_model_base": ["sd-1", "sd-2"], + "ui_model_type": ["main"] + }, + "type": { + "const": "main_model_loader", + "default": "main_model_loader", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["model"], + "title": "Main Model - SD1.5, SD2", + "type": "object", + "version": "1.0.4", + "output": { + "$ref": "#/components/schemas/ModelLoaderOutput" + } + }, + "Main_BnBNF4_FLUX_Config": { "properties": { "key": { "type": "string", @@ -40111,23 +44033,20 @@ "title": "Config Path", "description": "Path to the config for this model, if any." }, + "base": { + "type": "string", + "const": "flux", + "title": "Base", + "default": "flux" + }, "format": { "type": "string", - "const": "checkpoint", + "const": "bnb_quantized_nf4b", "title": "Format", - "default": "checkpoint" - }, - "prediction_type": { - "$ref": "#/components/schemas/SchedulerPredictionType" + "default": "bnb_quantized_nf4b" }, "variant": { - "$ref": "#/components/schemas/ModelVariantType" - }, - "base": { - "type": "string", - "const": "sd-2", - "title": "Base", - "default": "sd-2" + "$ref": "#/components/schemas/FluxVariantType" } }, "type": "object", @@ -40146,14 +44065,14 @@ "trigger_phrases", "default_settings", "config_path", + "base", "format", - "prediction_type", - "variant", - "base" + "variant" ], - "title": "Main_Checkpoint_SD2_Config" + "title": "Main_BnBNF4_FLUX_Config", + "description": "Model config for main checkpoint models." }, - "Main_Checkpoint_SDXLRefiner_Config": { + "Main_Checkpoint_Anima_Config": { "properties": { "key": { "type": "string", @@ -40270,23 +44189,17 @@ "title": "Config Path", "description": "Path to the config for this model, if any." }, + "base": { + "type": "string", + "const": "anima", + "title": "Base", + "default": "anima" + }, "format": { "type": "string", "const": "checkpoint", "title": "Format", "default": "checkpoint" - }, - "prediction_type": { - "$ref": "#/components/schemas/SchedulerPredictionType" - }, - "variant": { - "$ref": "#/components/schemas/ModelVariantType" - }, - "base": { - "type": "string", - "const": "sdxl-refiner", - "title": "Base", - "default": "sdxl-refiner" } }, "type": "object", @@ -40305,14 +44218,13 @@ "trigger_phrases", "default_settings", "config_path", - "format", - "prediction_type", - "variant", - "base" + "base", + "format" ], - "title": "Main_Checkpoint_SDXLRefiner_Config" + "title": "Main_Checkpoint_Anima_Config", + "description": "Model config for Anima single-file checkpoint models (safetensors).\n\nAnima is built on NVIDIA Cosmos Predict2 DiT with a custom LLM Adapter\nthat bridges Qwen3 0.6B text encoder outputs to the DiT." }, - "Main_Checkpoint_SDXL_Config": { + "Main_Checkpoint_FLUX_Config": { "properties": { "key": { "type": "string", @@ -40435,17 +44347,14 @@ "title": "Format", "default": "checkpoint" }, - "prediction_type": { - "$ref": "#/components/schemas/SchedulerPredictionType" - }, - "variant": { - "$ref": "#/components/schemas/ModelVariantType" - }, "base": { "type": "string", - "const": "sdxl", + "const": "flux", "title": "Base", - "default": "sdxl" + "default": "flux" + }, + "variant": { + "$ref": "#/components/schemas/FluxVariantType" } }, "type": "object", @@ -40465,13 +44374,13 @@ "default_settings", "config_path", "format", - "prediction_type", - "variant", - "base" + "base", + "variant" ], - "title": "Main_Checkpoint_SDXL_Config" + "title": "Main_Checkpoint_FLUX_Config", + "description": "Model config for main checkpoint models." }, - "Main_Checkpoint_ZImage_Config": { + "Main_Checkpoint_Flux2_Config": { "properties": { "key": { "type": "string", @@ -40588,17 +44497,20 @@ "title": "Config Path", "description": "Path to the config for this model, if any." }, - "base": { - "type": "string", - "const": "z-image", - "title": "Base", - "default": "z-image" - }, "format": { "type": "string", "const": "checkpoint", "title": "Format", "default": "checkpoint" + }, + "base": { + "type": "string", + "const": "flux2", + "title": "Base", + "default": "flux2" + }, + "variant": { + "$ref": "#/components/schemas/Flux2VariantType" } }, "type": "object", @@ -40617,13 +44529,14 @@ "trigger_phrases", "default_settings", "config_path", + "format", "base", - "format" + "variant" ], - "title": "Main_Checkpoint_ZImage_Config", - "description": "Model config for Z-Image single-file checkpoint models (safetensors, etc)." + "title": "Main_Checkpoint_Flux2_Config", + "description": "Model config for FLUX.2 checkpoint models (e.g. Klein)." }, - "Main_Diffusers_CogView4_Config": { + "Main_Checkpoint_SD1_Config": { "properties": { "key": { "type": "string", @@ -40728,21 +44641,35 @@ ], "description": "Default settings for this model" }, + "config_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Config Path", + "description": "Path to the config for this model, if any." + }, "format": { "type": "string", - "const": "diffusers", + "const": "checkpoint", "title": "Format", - "default": "diffusers" + "default": "checkpoint" }, - "repo_variant": { - "$ref": "#/components/schemas/ModelRepoVariant", - "default": "" + "prediction_type": { + "$ref": "#/components/schemas/SchedulerPredictionType" + }, + "variant": { + "$ref": "#/components/schemas/ModelVariantType" }, "base": { "type": "string", - "const": "cogview4", + "const": "sd-1", "title": "Base", - "default": "cogview4" + "default": "sd-1" } }, "type": "object", @@ -40760,13 +44687,15 @@ "type", "trigger_phrases", "default_settings", + "config_path", "format", - "repo_variant", + "prediction_type", + "variant", "base" ], - "title": "Main_Diffusers_CogView4_Config" + "title": "Main_Checkpoint_SD1_Config" }, - "Main_Diffusers_FLUX_Config": { + "Main_Checkpoint_SD2_Config": { "properties": { "key": { "type": "string", @@ -40871,24 +44800,35 @@ ], "description": "Default settings for this model" }, + "config_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Config Path", + "description": "Path to the config for this model, if any." + }, "format": { "type": "string", - "const": "diffusers", + "const": "checkpoint", "title": "Format", - "default": "diffusers" + "default": "checkpoint" }, - "repo_variant": { - "$ref": "#/components/schemas/ModelRepoVariant", - "default": "" + "prediction_type": { + "$ref": "#/components/schemas/SchedulerPredictionType" + }, + "variant": { + "$ref": "#/components/schemas/ModelVariantType" }, "base": { "type": "string", - "const": "flux", + "const": "sd-2", "title": "Base", - "default": "flux" - }, - "variant": { - "$ref": "#/components/schemas/FluxVariantType" + "default": "sd-2" } }, "type": "object", @@ -40906,15 +44846,15 @@ "type", "trigger_phrases", "default_settings", + "config_path", "format", - "repo_variant", - "base", - "variant" + "prediction_type", + "variant", + "base" ], - "title": "Main_Diffusers_FLUX_Config", - "description": "Model config for FLUX.1 models in diffusers format." + "title": "Main_Checkpoint_SD2_Config" }, - "Main_Diffusers_Flux2_Config": { + "Main_Checkpoint_SDXLRefiner_Config": { "properties": { "key": { "type": "string", @@ -41019,24 +44959,35 @@ ], "description": "Default settings for this model" }, + "config_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Config Path", + "description": "Path to the config for this model, if any." + }, "format": { "type": "string", - "const": "diffusers", + "const": "checkpoint", "title": "Format", - "default": "diffusers" + "default": "checkpoint" }, - "repo_variant": { - "$ref": "#/components/schemas/ModelRepoVariant", - "default": "" + "prediction_type": { + "$ref": "#/components/schemas/SchedulerPredictionType" + }, + "variant": { + "$ref": "#/components/schemas/ModelVariantType" }, "base": { "type": "string", - "const": "flux2", + "const": "sdxl-refiner", "title": "Base", - "default": "flux2" - }, - "variant": { - "$ref": "#/components/schemas/Flux2VariantType" + "default": "sdxl-refiner" } }, "type": "object", @@ -41054,15 +45005,15 @@ "type", "trigger_phrases", "default_settings", + "config_path", "format", - "repo_variant", - "base", - "variant" + "prediction_type", + "variant", + "base" ], - "title": "Main_Diffusers_Flux2_Config", - "description": "Model config for FLUX.2 models in diffusers format (e.g. FLUX.2 Klein)." + "title": "Main_Checkpoint_SDXLRefiner_Config" }, - "Main_Diffusers_SD1_Config": { + "Main_Checkpoint_SDXL_Config": { "properties": { "key": { "type": "string", @@ -41167,15 +45118,23 @@ ], "description": "Default settings for this model" }, + "config_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Config Path", + "description": "Path to the config for this model, if any." + }, "format": { "type": "string", - "const": "diffusers", + "const": "checkpoint", "title": "Format", - "default": "diffusers" - }, - "repo_variant": { - "$ref": "#/components/schemas/ModelRepoVariant", - "default": "" + "default": "checkpoint" }, "prediction_type": { "$ref": "#/components/schemas/SchedulerPredictionType" @@ -41185,9 +45144,9 @@ }, "base": { "type": "string", - "const": "sd-1", + "const": "sdxl", "title": "Base", - "default": "sd-1" + "default": "sdxl" } }, "type": "object", @@ -41205,15 +45164,171 @@ "type", "trigger_phrases", "default_settings", + "config_path", "format", - "repo_variant", "prediction_type", "variant", "base" ], - "title": "Main_Diffusers_SD1_Config" + "title": "Main_Checkpoint_SDXL_Config" }, - "Main_Diffusers_SD2_Config": { + "Main_Checkpoint_ZImage_Config": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "Model description" + }, + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." + }, + "cover_image": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Cover Image", + "description": "Url for image to preview model" + }, + "type": { + "type": "string", + "const": "main", + "title": "Type", + "default": "main" + }, + "trigger_phrases": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + { + "type": "null" + } + ], + "title": "Trigger Phrases", + "description": "Set of trigger phrases for this model" + }, + "default_settings": { + "anyOf": [ + { + "$ref": "#/components/schemas/MainModelDefaultSettings" + }, + { + "type": "null" + } + ], + "description": "Default settings for this model" + }, + "config_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Config Path", + "description": "Path to the config for this model, if any." + }, + "base": { + "type": "string", + "const": "z-image", + "title": "Base", + "default": "z-image" + }, + "format": { + "type": "string", + "const": "checkpoint", + "title": "Format", + "default": "checkpoint" + }, + "variant": { + "$ref": "#/components/schemas/ZImageVariantType" + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "type", + "trigger_phrases", + "default_settings", + "config_path", + "base", + "format", + "variant" + ], + "title": "Main_Checkpoint_ZImage_Config", + "description": "Model config for Z-Image single-file checkpoint models (safetensors, etc)." + }, + "Main_Diffusers_CogView4_Config": { "properties": { "key": { "type": "string", @@ -41328,17 +45443,11 @@ "$ref": "#/components/schemas/ModelRepoVariant", "default": "" }, - "prediction_type": { - "$ref": "#/components/schemas/SchedulerPredictionType" - }, - "variant": { - "$ref": "#/components/schemas/ModelVariantType" - }, "base": { "type": "string", - "const": "sd-2", + "const": "cogview4", "title": "Base", - "default": "sd-2" + "default": "cogview4" } }, "type": "object", @@ -41358,13 +45467,11 @@ "default_settings", "format", "repo_variant", - "prediction_type", - "variant", "base" ], - "title": "Main_Diffusers_SD2_Config" + "title": "Main_Diffusers_CogView4_Config" }, - "Main_Diffusers_SD3_Config": { + "Main_Diffusers_FLUX_Config": { "properties": { "key": { "type": "string", @@ -41481,27 +45588,160 @@ }, "base": { "type": "string", - "const": "sd-3", + "const": "flux", "title": "Base", - "default": "sd-3" + "default": "flux" }, - "submodels": { + "variant": { + "$ref": "#/components/schemas/FluxVariantType" + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "type", + "trigger_phrases", + "default_settings", + "format", + "repo_variant", + "base", + "variant" + ], + "title": "Main_Diffusers_FLUX_Config", + "description": "Model config for FLUX.1 models in diffusers format." + }, + "Main_Diffusers_Flux2_Config": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "additionalProperties": { - "$ref": "#/components/schemas/SubmodelDefinition" - }, - "propertyNames": { - "$ref": "#/components/schemas/SubModelType" + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "Model description" + }, + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." + }, + "cover_image": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Cover Image", + "description": "Url for image to preview model" + }, + "type": { + "type": "string", + "const": "main", + "title": "Type", + "default": "main" + }, + "trigger_phrases": { + "anyOf": [ + { + "items": { + "type": "string" }, - "type": "object" + "type": "array", + "uniqueItems": true }, { "type": "null" } ], - "title": "Submodels", - "description": "Loadable submodels in this model" + "title": "Trigger Phrases", + "description": "Set of trigger phrases for this model" + }, + "default_settings": { + "anyOf": [ + { + "$ref": "#/components/schemas/MainModelDefaultSettings" + }, + { + "type": "null" + } + ], + "description": "Default settings for this model" + }, + "format": { + "type": "string", + "const": "diffusers", + "title": "Format", + "default": "diffusers" + }, + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" + }, + "base": { + "type": "string", + "const": "flux2", + "title": "Base", + "default": "flux2" + }, + "variant": { + "$ref": "#/components/schemas/Flux2VariantType" } }, "type": "object", @@ -41522,11 +45762,12 @@ "format", "repo_variant", "base", - "submodels" + "variant" ], - "title": "Main_Diffusers_SD3_Config" + "title": "Main_Diffusers_Flux2_Config", + "description": "Model config for FLUX.2 models in diffusers format (e.g. FLUX.2 Klein)." }, - "Main_Diffusers_SDXLRefiner_Config": { + "Main_Diffusers_QwenImage_Config": { "properties": { "key": { "type": "string", @@ -41641,17 +45882,21 @@ "$ref": "#/components/schemas/ModelRepoVariant", "default": "" }, - "prediction_type": { - "$ref": "#/components/schemas/SchedulerPredictionType" - }, - "variant": { - "$ref": "#/components/schemas/ModelVariantType" - }, "base": { "type": "string", - "const": "sdxl-refiner", + "const": "qwen-image", "title": "Base", - "default": "sdxl-refiner" + "default": "qwen-image" + }, + "variant": { + "anyOf": [ + { + "$ref": "#/components/schemas/QwenImageVariantType" + }, + { + "type": "null" + } + ] } }, "type": "object", @@ -41671,13 +45916,13 @@ "default_settings", "format", "repo_variant", - "prediction_type", - "variant", - "base" + "base", + "variant" ], - "title": "Main_Diffusers_SDXLRefiner_Config" + "title": "Main_Diffusers_QwenImage_Config", + "description": "Model config for Qwen Image diffusers models (both txt2img and edit)." }, - "Main_Diffusers_SDXL_Config": { + "Main_Diffusers_SD1_Config": { "properties": { "key": { "type": "string", @@ -41800,9 +46045,9 @@ }, "base": { "type": "string", - "const": "sdxl", + "const": "sd-1", "title": "Base", - "default": "sdxl" + "default": "sd-1" } }, "type": "object", @@ -41826,9 +46071,9 @@ "variant", "base" ], - "title": "Main_Diffusers_SDXL_Config" + "title": "Main_Diffusers_SD1_Config" }, - "Main_Diffusers_ZImage_Config": { + "Main_Diffusers_SD2_Config": { "properties": { "key": { "type": "string", @@ -41943,11 +46188,17 @@ "$ref": "#/components/schemas/ModelRepoVariant", "default": "" }, + "prediction_type": { + "$ref": "#/components/schemas/SchedulerPredictionType" + }, + "variant": { + "$ref": "#/components/schemas/ModelVariantType" + }, "base": { "type": "string", - "const": "z-image", + "const": "sd-2", "title": "Base", - "default": "z-image" + "default": "sd-2" } }, "type": "object", @@ -41967,12 +46218,13 @@ "default_settings", "format", "repo_variant", + "prediction_type", + "variant", "base" ], - "title": "Main_Diffusers_ZImage_Config", - "description": "Model config for Z-Image diffusers models (Z-Image-Turbo, Z-Image-Base, Z-Image-Edit)." + "title": "Main_Diffusers_SD2_Config" }, - "Main_GGUF_FLUX_Config": { + "Main_Diffusers_SD3_Config": { "properties": { "key": { "type": "string", @@ -42077,32 +46329,39 @@ ], "description": "Default settings for this model" }, - "config_path": { + "format": { + "type": "string", + "const": "diffusers", + "title": "Format", + "default": "diffusers" + }, + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" + }, + "base": { + "type": "string", + "const": "sd-3", + "title": "Base", + "default": "sd-3" + }, + "submodels": { "anyOf": [ { - "type": "string" + "additionalProperties": { + "$ref": "#/components/schemas/SubmodelDefinition" + }, + "propertyNames": { + "$ref": "#/components/schemas/SubModelType" + }, + "type": "object" }, { "type": "null" } ], - "title": "Config Path", - "description": "Path to the config for this model, if any." - }, - "base": { - "type": "string", - "const": "flux", - "title": "Base", - "default": "flux" - }, - "format": { - "type": "string", - "const": "gguf_quantized", - "title": "Format", - "default": "gguf_quantized" - }, - "variant": { - "$ref": "#/components/schemas/FluxVariantType" + "title": "Submodels", + "description": "Loadable submodels in this model" } }, "type": "object", @@ -42120,15 +46379,14 @@ "type", "trigger_phrases", "default_settings", - "config_path", - "base", "format", - "variant" + "repo_variant", + "base", + "submodels" ], - "title": "Main_GGUF_FLUX_Config", - "description": "Model config for main checkpoint models." + "title": "Main_Diffusers_SD3_Config" }, - "Main_GGUF_Flux2_Config": { + "Main_Diffusers_SDXLRefiner_Config": { "properties": { "key": { "type": "string", @@ -42233,32 +46491,27 @@ ], "description": "Default settings for this model" }, - "config_path": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Config Path", - "description": "Path to the config for this model, if any." - }, - "base": { - "type": "string", - "const": "flux2", - "title": "Base", - "default": "flux2" - }, "format": { "type": "string", - "const": "gguf_quantized", + "const": "diffusers", "title": "Format", - "default": "gguf_quantized" + "default": "diffusers" + }, + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" + }, + "prediction_type": { + "$ref": "#/components/schemas/SchedulerPredictionType" }, "variant": { - "$ref": "#/components/schemas/Flux2VariantType" + "$ref": "#/components/schemas/ModelVariantType" + }, + "base": { + "type": "string", + "const": "sdxl-refiner", + "title": "Base", + "default": "sdxl-refiner" } }, "type": "object", @@ -42276,15 +46529,15 @@ "type", "trigger_phrases", "default_settings", - "config_path", - "base", "format", - "variant" + "repo_variant", + "prediction_type", + "variant", + "base" ], - "title": "Main_GGUF_Flux2_Config", - "description": "Model config for GGUF-quantized FLUX.2 checkpoint models (e.g. Klein)." + "title": "Main_Diffusers_SDXLRefiner_Config" }, - "Main_GGUF_ZImage_Config": { + "Main_Diffusers_SDXL_Config": { "properties": { "key": { "type": "string", @@ -42389,29 +46642,27 @@ ], "description": "Default settings for this model" }, - "config_path": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Config Path", - "description": "Path to the config for this model, if any." + "format": { + "type": "string", + "const": "diffusers", + "title": "Format", + "default": "diffusers" + }, + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" + }, + "prediction_type": { + "$ref": "#/components/schemas/SchedulerPredictionType" + }, + "variant": { + "$ref": "#/components/schemas/ModelVariantType" }, "base": { "type": "string", - "const": "z-image", + "const": "sdxl", "title": "Base", - "default": "z-image" - }, - "format": { - "type": "string", - "const": "gguf_quantized", - "title": "Format", - "default": "gguf_quantized" + "default": "sdxl" } }, "type": "object", @@ -42429,838 +46680,798 @@ "type", "trigger_phrases", "default_settings", - "config_path", - "base", - "format" + "format", + "repo_variant", + "prediction_type", + "variant", + "base" ], - "title": "Main_GGUF_ZImage_Config", - "description": "Model config for GGUF-quantized Z-Image transformer models." + "title": "Main_Diffusers_SDXL_Config" }, - "MaskCombineInvocation": { - "category": "image", - "class": "invocation", - "classification": "stable", - "description": "Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.", - "node_pack": "invokeai", + "Main_Diffusers_ZImage_Config": { "properties": { - "board": { - "anyOf": [ - { - "$ref": "#/components/schemas/BoardField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." }, - "metadata": { + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "title": "Description", + "description": "Model description" }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" }, - "mask1": { + "source_api_response": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The first mask to combine", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "mask2": { + "cover_image": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The second image to combine", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Cover Image", + "description": "Url for image to preview model" }, "type": { - "const": "mask_combine", - "default": "mask_combine", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["image", "mask", "multiply"], - "title": "Combine Masks", - "type": "object", - "version": "1.2.2", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } - }, - "MaskEdgeInvocation": { - "category": "image", - "class": "invocation", - "classification": "stable", - "description": "Applies an edge mask to an image", - "node_pack": "invokeai", - "properties": { - "board": { + "type": "string", + "const": "main", + "title": "Type", + "default": "main" + }, + "trigger_phrases": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true }, { "type": "null" } ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "title": "Trigger Phrases", + "description": "Set of trigger phrases for this model" }, - "metadata": { + "default_settings": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "$ref": "#/components/schemas/MainModelDefaultSettings" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false + "description": "Default settings for this model" }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "format": { + "type": "string", + "const": "diffusers", + "title": "Format", + "default": "diffusers" }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "repo_variant": { + "$ref": "#/components/schemas/ModelRepoVariant", + "default": "" }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "base": { + "type": "string", + "const": "z-image", + "title": "Base", + "default": "z-image" }, - "image": { - "anyOf": [ - { - "$ref": "#/components/schemas/ImageField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The image to apply the mask to", - "field_kind": "input", - "input": "any", - "orig_required": true + "variant": { + "$ref": "#/components/schemas/ZImageVariantType" + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "type", + "trigger_phrases", + "default_settings", + "format", + "repo_variant", + "base", + "variant" + ], + "title": "Main_Diffusers_ZImage_Config", + "description": "Model config for Z-Image diffusers models (Z-Image-Turbo, Z-Image-Base)." + }, + "Main_GGUF_FLUX_Config": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." }, - "edge_size": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The size of the edge", - "field_kind": "input", - "input": "any", - "orig_required": true, - "title": "Edge Size" + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." }, - "edge_blur": { + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "type": "integer" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The amount of blur on the edge", - "field_kind": "input", - "input": "any", - "orig_required": true, - "title": "Edge Blur" + "title": "Description", + "description": "Model description" }, - "low_threshold": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "type": "integer" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "First threshold for the hysteresis procedure in Canny edge detection", - "field_kind": "input", - "input": "any", - "orig_required": true, - "title": "Low Threshold" + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "high_threshold": { + "cover_image": { "anyOf": [ { - "type": "integer" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Second threshold for the hysteresis procedure in Canny edge detection", - "field_kind": "input", - "input": "any", - "orig_required": true, - "title": "High Threshold" + "title": "Cover Image", + "description": "Url for image to preview model" }, "type": { - "const": "mask_edge", - "default": "mask_edge", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["image", "mask", "inpaint"], - "title": "Mask Edge", - "type": "object", - "version": "1.2.2", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } - }, - "MaskFromAlphaInvocation": { - "category": "image", - "class": "invocation", - "classification": "stable", - "description": "Extracts the alpha channel of an image as a mask.", - "node_pack": "invokeai", - "properties": { - "board": { + "type": "string", + "const": "main", + "title": "Type", + "default": "main" + }, + "trigger_phrases": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true }, { "type": "null" } ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "title": "Trigger Phrases", + "description": "Set of trigger phrases for this model" }, - "metadata": { + "default_settings": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "$ref": "#/components/schemas/MainModelDefaultSettings" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" - }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "description": "Default settings for this model" }, - "image": { + "config_path": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The image to create the mask from", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Config Path", + "description": "Path to the config for this model, if any." }, - "invert": { - "default": false, - "description": "Whether or not to invert the mask", - "field_kind": "input", - "input": "any", - "orig_default": false, - "orig_required": false, - "title": "Invert", - "type": "boolean" + "base": { + "type": "string", + "const": "flux", + "title": "Base", + "default": "flux" }, - "type": { - "const": "tomask", - "default": "tomask", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "format": { + "type": "string", + "const": "gguf_quantized", + "title": "Format", + "default": "gguf_quantized" + }, + "variant": { + "$ref": "#/components/schemas/FluxVariantType" } }, - "required": ["type", "id"], - "tags": ["image", "mask"], - "title": "Mask from Alpha", "type": "object", - "version": "1.2.2", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "type", + "trigger_phrases", + "default_settings", + "config_path", + "base", + "format", + "variant" + ], + "title": "Main_GGUF_FLUX_Config", + "description": "Model config for main checkpoint models." }, - "MaskFromIDInvocation": { - "category": "image", - "class": "invocation", - "classification": "stable", - "description": "Generate a mask for a particular color in an ID Map", - "node_pack": "invokeai", + "Main_GGUF_Flux2_Config": { "properties": { - "board": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "title": "Description", + "description": "Model description" }, - "metadata": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "cover_image": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Cover Image", + "description": "Url for image to preview model" }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "type": { + "type": "string", + "const": "main", + "title": "Type", + "default": "main" }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "trigger_phrases": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + { + "type": "null" + } + ], + "title": "Trigger Phrases", + "description": "Set of trigger phrases for this model" }, - "image": { + "default_settings": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "$ref": "#/components/schemas/MainModelDefaultSettings" }, { "type": "null" } ], - "default": null, - "description": "The image to create the mask from", - "field_kind": "input", - "input": "any", - "orig_required": true + "description": "Default settings for this model" }, - "color": { + "config_path": { "anyOf": [ { - "$ref": "#/components/schemas/ColorField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "ID color to mask", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Config Path", + "description": "Path to the config for this model, if any." }, - "threshold": { - "default": 100, - "description": "Threshold for color detection", - "field_kind": "input", - "input": "any", - "orig_default": 100, - "orig_required": false, - "title": "Threshold", - "type": "integer" + "base": { + "type": "string", + "const": "flux2", + "title": "Base", + "default": "flux2" }, - "invert": { - "default": false, - "description": "Whether or not to invert the mask", - "field_kind": "input", - "input": "any", - "orig_default": false, - "orig_required": false, - "title": "Invert", - "type": "boolean" + "format": { + "type": "string", + "const": "gguf_quantized", + "title": "Format", + "default": "gguf_quantized" }, - "type": { - "const": "mask_from_id", - "default": "mask_from_id", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "variant": { + "$ref": "#/components/schemas/Flux2VariantType" } }, - "required": ["type", "id"], - "tags": ["image", "mask", "id"], - "title": "Mask from Segmented Image", "type": "object", - "version": "1.0.1", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "type", + "trigger_phrases", + "default_settings", + "config_path", + "base", + "format", + "variant" + ], + "title": "Main_GGUF_Flux2_Config", + "description": "Model config for GGUF-quantized FLUX.2 checkpoint models (e.g. Klein)." }, - "MaskOutput": { - "class": "output", - "description": "A torch mask tensor.", + "Main_GGUF_QwenImage_Config": { "properties": { - "mask": { - "$ref": "#/components/schemas/TensorField", - "description": "The mask.", - "field_kind": "output", - "ui_hidden": false + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." }, - "width": { - "description": "The width of the mask in pixels.", - "field_kind": "output", - "title": "Width", - "type": "integer", - "ui_hidden": false + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." }, - "height": { - "description": "The height of the mask in pixels.", - "field_kind": "output", - "title": "Height", + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { "type": "integer", - "ui_hidden": false + "title": "File Size", + "description": "The size of the model in bytes." }, - "type": { - "const": "mask_output", - "default": "mask_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "mask", "width", "height", "type", "type"], - "title": "MaskOutput", - "type": "object" - }, - "MaskTensorToImageInvocation": { - "category": "mask", - "class": "invocation", - "classification": "stable", - "description": "Convert a mask tensor to an image.", - "node_pack": "invokeai", - "properties": { - "board": { + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "title": "Description", + "description": "Model description" }, - "metadata": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" - }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "mask": { + "cover_image": { "anyOf": [ { - "$ref": "#/components/schemas/TensorField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The mask tensor to convert.", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Cover Image", + "description": "Url for image to preview model" }, "type": { - "const": "tensor_mask_to_image", - "default": "tensor_mask_to_image", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["mask"], - "title": "Tensor Mask to Image", - "type": "object", - "version": "1.1.0", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } - }, - "MediaPipeFaceDetectionInvocation": { - "category": "controlnet", - "class": "invocation", - "classification": "stable", - "description": "Detects faces using MediaPipe.", - "node_pack": "invokeai", - "properties": { - "board": { + "type": "string", + "const": "main", + "title": "Type", + "default": "main" + }, + "trigger_phrases": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true }, { "type": "null" } ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "title": "Trigger Phrases", + "description": "Set of trigger phrases for this model" }, - "metadata": { + "default_settings": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "$ref": "#/components/schemas/MainModelDefaultSettings" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" - }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "description": "Default settings for this model" }, - "image": { + "config_path": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The image to process", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Config Path", + "description": "Path to the config for this model, if any." }, - "max_faces": { - "default": 1, - "description": "Maximum number of faces to detect", - "field_kind": "input", - "input": "any", - "minimum": 1, - "orig_default": 1, - "orig_required": false, - "title": "Max Faces", - "type": "integer" + "base": { + "type": "string", + "const": "qwen-image", + "title": "Base", + "default": "qwen-image" }, - "min_confidence": { - "default": 0.5, - "description": "Minimum confidence for face detection", - "field_kind": "input", - "input": "any", - "maximum": 1, - "minimum": 0, - "orig_default": 0.5, - "orig_required": false, - "title": "Min Confidence", - "type": "number" + "format": { + "type": "string", + "const": "gguf_quantized", + "title": "Format", + "default": "gguf_quantized" }, - "type": { - "const": "mediapipe_face_detection", - "default": "mediapipe_face_detection", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["controlnet", "face"], - "title": "MediaPipe Face Detection", - "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } + "variant": { + "anyOf": [ + { + "$ref": "#/components/schemas/QwenImageVariantType" + }, + { + "type": "null" + } + ] + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "type", + "trigger_phrases", + "default_settings", + "config_path", + "base", + "format", + "variant" + ], + "title": "Main_GGUF_QwenImage_Config", + "description": "Model config for GGUF-quantized Qwen Image transformer models." }, - "MergeMetadataInvocation": { - "category": "metadata", - "class": "invocation", - "classification": "stable", - "description": "Merged a collection of MetadataDict into a single MetadataDict.", - "node_pack": "invokeai", + "Main_GGUF_ZImage_Config": { "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." }, - "collection": { + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "Model description" + }, + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." + }, + "cover_image": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Cover Image", + "description": "Url for image to preview model" + }, + "type": { + "type": "string", + "const": "main", + "title": "Type", + "default": "main" + }, + "trigger_phrases": { "anyOf": [ { "items": { - "$ref": "#/components/schemas/MetadataField" + "type": "string" }, - "type": "array" + "type": "array", + "uniqueItems": true }, { "type": "null" } ], - "default": null, - "description": "Collection of Metadata", - "field_kind": "input", - "input": "any", - "orig_required": true, - "title": "Collection" + "title": "Trigger Phrases", + "description": "Set of trigger phrases for this model" }, - "type": { - "const": "merge_metadata", - "default": "merge_metadata", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "default_settings": { + "anyOf": [ + { + "$ref": "#/components/schemas/MainModelDefaultSettings" + }, + { + "type": "null" + } + ], + "description": "Default settings for this model" + }, + "config_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Config Path", + "description": "Path to the config for this model, if any." + }, + "base": { + "type": "string", + "const": "z-image", + "title": "Base", + "default": "z-image" + }, + "format": { + "type": "string", + "const": "gguf_quantized", + "title": "Format", + "default": "gguf_quantized" + }, + "variant": { + "$ref": "#/components/schemas/ZImageVariantType" } }, - "required": ["type", "id"], - "tags": ["metadata"], - "title": "Metadata Merge", "type": "object", - "version": "1.0.1", - "output": { - "$ref": "#/components/schemas/MetadataOutput" - } + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "type", + "trigger_phrases", + "default_settings", + "config_path", + "base", + "format", + "variant" + ], + "title": "Main_GGUF_ZImage_Config", + "description": "Model config for GGUF-quantized Z-Image transformer models." }, - "MergeTilesToImageInvocation": { - "category": "tiles", + "MaskCombineInvocation": { + "category": "image", "class": "invocation", "classification": "stable", - "description": "Merge multiple tile images into a single image.", + "description": "Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.", "node_pack": "invokeai", "properties": { "board": { @@ -43319,77 +47530,92 @@ "title": "Use Cache", "type": "boolean" }, - "tiles_with_images": { + "mask1": { "anyOf": [ { - "items": { - "$ref": "#/components/schemas/TileWithImage" - }, - "type": "array" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], "default": null, - "description": "A list of tile images with tile properties.", + "description": "The first mask to combine", "field_kind": "input", "input": "any", - "orig_required": true, - "title": "Tiles With Images" - }, - "blend_mode": { - "default": "Seam", - "description": "blending type Linear or Seam", - "enum": ["Linear", "Seam"], - "field_kind": "input", - "input": "direct", - "orig_default": "Seam", - "orig_required": false, - "title": "Blend Mode", - "type": "string" + "orig_required": true }, - "blend_amount": { - "default": 32, - "description": "The amount to blend adjacent tiles in pixels. Must be <= the amount of overlap between adjacent tiles.", + "mask2": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The second image to combine", "field_kind": "input", "input": "any", - "minimum": 0, - "orig_default": 32, - "orig_required": false, - "title": "Blend Amount", - "type": "integer" + "orig_required": true }, "type": { - "const": "merge_tiles_to_image", - "default": "merge_tiles_to_image", + "const": "mask_combine", + "default": "mask_combine", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["tiles"], - "title": "Merge Tiles to Image", + "tags": ["image", "mask", "multiply"], + "title": "Combine Masks", "type": "object", - "version": "1.1.1", + "version": "1.2.2", "output": { "$ref": "#/components/schemas/ImageOutput" } }, - "MetadataField": { - "additionalProperties": true, - "type": "object", - "title": "MetadataField", - "description": "Pydantic model for metadata with custom root of type dict[str, Any].\nMetadata is stored without a strict schema." - }, - "MetadataFieldExtractorInvocation": { - "category": "metadata", + "MaskEdgeInvocation": { + "category": "image", "class": "invocation", - "classification": "deprecated", - "description": "Extracts the text value from an image's metadata given a key.\nRaises an error if the image has no metadata or if the value is not a string (nesting not permitted).", + "classification": "stable", + "description": "Applies an edge mask to an image", "node_pack": "invokeai", "properties": { + "board": { + "anyOf": [ + { + "$ref": "#/components/schemas/BoardField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false + }, + "metadata": { + "anyOf": [ + { + "$ref": "#/components/schemas/MetadataField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -43424,51 +47650,131 @@ } ], "default": null, - "description": "The image to extract metadata from", + "description": "The image to apply the mask to", "field_kind": "input", "input": "any", "orig_required": true }, - "key": { + "edge_size": { "anyOf": [ { - "type": "string" + "type": "integer" }, { "type": "null" } ], "default": null, - "description": "The key in the image's metadata to extract the value from", + "description": "The size of the edge", "field_kind": "input", "input": "any", "orig_required": true, - "title": "Key" + "title": "Edge Size" + }, + "edge_blur": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The amount of blur on the edge", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "Edge Blur" + }, + "low_threshold": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "First threshold for the hysteresis procedure in Canny edge detection", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "Low Threshold" + }, + "high_threshold": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Second threshold for the hysteresis procedure in Canny edge detection", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "High Threshold" }, "type": { - "const": "metadata_field_extractor", - "default": "metadata_field_extractor", + "const": "mask_edge", + "default": "mask_edge", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["metadata"], - "title": "Metadata Field Extractor", + "tags": ["image", "mask", "inpaint"], + "title": "Mask Edge", "type": "object", - "version": "1.0.0", + "version": "1.2.2", "output": { - "$ref": "#/components/schemas/StringOutput" + "$ref": "#/components/schemas/ImageOutput" } }, - "MetadataFromImageInvocation": { - "category": "metadata", + "MaskFromAlphaInvocation": { + "category": "image", "class": "invocation", - "classification": "beta", - "description": "Used to create a core metadata item then Add/Update it to the provided metadata", + "classification": "stable", + "description": "Extracts the alpha channel of an image as a mask.", "node_pack": "invokeai", "properties": { + "board": { + "anyOf": [ + { + "$ref": "#/components/schemas/BoardField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false + }, + "metadata": { + "anyOf": [ + { + "$ref": "#/components/schemas/MetadataField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -43503,35 +47809,77 @@ } ], "default": null, - "description": "The image to process", + "description": "The image to create the mask from", "field_kind": "input", "input": "any", "orig_required": true }, + "invert": { + "default": false, + "description": "Whether or not to invert the mask", + "field_kind": "input", + "input": "any", + "orig_default": false, + "orig_required": false, + "title": "Invert", + "type": "boolean" + }, "type": { - "const": "metadata_from_image", - "default": "metadata_from_image", + "const": "tomask", + "default": "tomask", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["metadata"], - "title": "Metadata From Image", + "tags": ["image", "mask"], + "title": "Mask from Alpha", "type": "object", - "version": "1.0.1", + "version": "1.2.2", "output": { - "$ref": "#/components/schemas/MetadataOutput" + "$ref": "#/components/schemas/ImageOutput" } }, - "MetadataInvocation": { - "category": "metadata", + "MaskFromIDInvocation": { + "category": "image", "class": "invocation", "classification": "stable", - "description": "Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict.", + "description": "Generate a mask for a particular color in an ID Map", "node_pack": "invokeai", "properties": { + "board": { + "anyOf": [ + { + "$ref": "#/components/schemas/BoardField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false + }, + "metadata": { + "anyOf": [ + { + "$ref": "#/components/schemas/MetadataField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -43556,68 +47904,148 @@ "title": "Use Cache", "type": "boolean" }, - "items": { + "image": { "anyOf": [ { - "items": { - "$ref": "#/components/schemas/MetadataItemField" - }, - "type": "array" - }, - { - "$ref": "#/components/schemas/MetadataItemField" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], "default": null, - "description": "A single metadata item or collection of metadata items", + "description": "The image to create the mask from", "field_kind": "input", "input": "any", - "orig_required": true, - "title": "Items" + "orig_required": true + }, + "color": { + "anyOf": [ + { + "$ref": "#/components/schemas/ColorField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID color to mask", + "field_kind": "input", + "input": "any", + "orig_required": true + }, + "threshold": { + "default": 100, + "description": "Threshold for color detection", + "field_kind": "input", + "input": "any", + "orig_default": 100, + "orig_required": false, + "title": "Threshold", + "type": "integer" + }, + "invert": { + "default": false, + "description": "Whether or not to invert the mask", + "field_kind": "input", + "input": "any", + "orig_default": false, + "orig_required": false, + "title": "Invert", + "type": "boolean" }, "type": { - "const": "metadata", - "default": "metadata", + "const": "mask_from_id", + "default": "mask_from_id", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["metadata"], - "title": "Metadata", + "tags": ["image", "mask", "id"], + "title": "Mask from Segmented Image", "type": "object", "version": "1.0.1", "output": { - "$ref": "#/components/schemas/MetadataOutput" + "$ref": "#/components/schemas/ImageOutput" } }, - "MetadataItemField": { + "MaskOutput": { + "class": "output", + "description": "A torch mask tensor.", "properties": { - "label": { - "description": "Label for this metadata item", - "title": "Label", - "type": "string" + "mask": { + "$ref": "#/components/schemas/TensorField", + "description": "The mask.", + "field_kind": "output", + "ui_hidden": false }, - "value": { - "description": "The value for this metadata item (may be any type)", - "title": "Value" + "width": { + "description": "The width of the mask in pixels.", + "field_kind": "output", + "title": "Width", + "type": "integer", + "ui_hidden": false + }, + "height": { + "description": "The height of the mask in pixels.", + "field_kind": "output", + "title": "Height", + "type": "integer", + "ui_hidden": false + }, + "type": { + "const": "mask_output", + "default": "mask_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, - "required": ["label", "value"], - "title": "MetadataItemField", + "required": ["output_meta", "mask", "width", "height", "type", "type"], + "title": "MaskOutput", "type": "object" }, - "MetadataItemInvocation": { - "category": "metadata", + "MaskTensorToImageInvocation": { + "category": "mask", "class": "invocation", "classification": "stable", - "description": "Used to create an arbitrary metadata item. Provide \"label\" and make a connection to \"value\" to store that data as the value.", + "description": "Convert a mask tensor to an image.", "node_pack": "invokeai", "properties": { + "board": { + "anyOf": [ + { + "$ref": "#/components/schemas/BoardField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false + }, + "metadata": { + "anyOf": [ + { + "$ref": "#/components/schemas/MetadataField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -43642,61 +48070,61 @@ "title": "Use Cache", "type": "boolean" }, - "label": { + "mask": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/TensorField" }, { "type": "null" } ], "default": null, - "description": "Label for this metadata item", - "field_kind": "input", - "input": "any", - "orig_required": true, - "title": "Label" - }, - "value": { - "anyOf": [ - {}, - { - "type": "null" - } - ], - "default": null, - "description": "The value for this metadata item (may be any type)", + "description": "The mask tensor to convert.", "field_kind": "input", "input": "any", - "orig_required": true, - "title": "Value", - "ui_type": "AnyField" + "orig_required": true }, "type": { - "const": "metadata_item", - "default": "metadata_item", + "const": "tensor_mask_to_image", + "default": "tensor_mask_to_image", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["metadata"], - "title": "Metadata Item", + "tags": ["mask"], + "title": "Tensor Mask to Image", "type": "object", - "version": "1.0.1", + "version": "1.1.0", "output": { - "$ref": "#/components/schemas/MetadataItemOutput" + "$ref": "#/components/schemas/ImageOutput" } }, - "MetadataItemLinkedInvocation": { - "category": "metadata", + "MediaPipeFaceDetectionInvocation": { + "category": "controlnet", "class": "invocation", - "classification": "beta", - "description": "Used to Create/Add/Update a value into a metadata label", + "classification": "stable", + "description": "Detects faces using MediaPipe.", "node_pack": "invokeai", "properties": { + "board": { + "anyOf": [ + { + "$ref": "#/components/schemas/BoardField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false + }, "metadata": { "anyOf": [ { @@ -43737,153 +48165,68 @@ "title": "Use Cache", "type": "boolean" }, - "label": { - "default": "* CUSTOM LABEL *", - "description": "Label for this metadata item", - "enum": [ - "* CUSTOM LABEL *", - "positive_prompt", - "positive_style_prompt", - "negative_prompt", - "negative_style_prompt", - "width", - "height", - "seed", - "cfg_scale", - "cfg_rescale_multiplier", - "steps", - "scheduler", - "clip_skip", - "model", - "vae", - "seamless_x", - "seamless_y", - "guidance", - "cfg_scale_start_step", - "cfg_scale_end_step" - ], - "field_kind": "input", - "input": "direct", - "orig_default": "* CUSTOM LABEL *", - "orig_required": false, - "title": "Label", - "type": "string" - }, - "custom_label": { + "image": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], "default": null, - "description": "Label for this metadata item", + "description": "The image to process", "field_kind": "input", - "input": "direct", - "orig_default": null, + "input": "any", + "orig_required": true + }, + "max_faces": { + "default": 1, + "description": "Maximum number of faces to detect", + "field_kind": "input", + "input": "any", + "minimum": 1, + "orig_default": 1, "orig_required": false, - "title": "Custom Label" + "title": "Max Faces", + "type": "integer" }, - "value": { - "anyOf": [ - {}, - { - "type": "null" - } - ], - "default": null, - "description": "The value for this metadata item (may be any type)", + "min_confidence": { + "default": 0.5, + "description": "Minimum confidence for face detection", "field_kind": "input", "input": "any", - "orig_required": true, - "title": "Value", - "ui_type": "AnyField" + "maximum": 1, + "minimum": 0, + "orig_default": 0.5, + "orig_required": false, + "title": "Min Confidence", + "type": "number" }, "type": { - "const": "metadata_item_linked", - "default": "metadata_item_linked", + "const": "mediapipe_face_detection", + "default": "mediapipe_face_detection", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["metadata"], - "title": "Metadata Item Linked", + "tags": ["controlnet", "face"], + "title": "MediaPipe Face Detection", "type": "object", - "version": "1.0.1", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/MetadataOutput" + "$ref": "#/components/schemas/ImageOutput" } }, - "MetadataItemOutput": { - "class": "output", - "description": "Metadata Item Output", - "properties": { - "item": { - "$ref": "#/components/schemas/MetadataItemField", - "description": "Metadata Item", - "field_kind": "output", - "ui_hidden": false - }, - "type": { - "const": "metadata_item_output", - "default": "metadata_item_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "item", "type", "type"], - "title": "MetadataItemOutput", - "type": "object" - }, - "MetadataOutput": { - "class": "output", - "properties": { - "metadata": { - "$ref": "#/components/schemas/MetadataField", - "description": "Metadata Dict", - "field_kind": "output", - "ui_hidden": false - }, - "type": { - "const": "metadata_output", - "default": "metadata_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "metadata", "type", "type"], - "title": "MetadataOutput", - "type": "object" - }, - "MetadataToBoolCollectionInvocation": { + "MergeMetadataInvocation": { "category": "metadata", "class": "invocation", - "classification": "beta", - "description": "Extracts a Boolean value Collection of a label from metadata", + "classification": "stable", + "description": "Merged a collection of MetadataDict into a single MetadataDict.", "node_pack": "invokeai", "properties": { - "metadata": { - "anyOf": [ - { - "$ref": "#/components/schemas/MetadataField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -43908,39 +48251,11 @@ "title": "Use Cache", "type": "boolean" }, - "label": { - "default": "* CUSTOM LABEL *", - "description": "Label for this metadata item", - "enum": ["* CUSTOM LABEL *", "seamless_x", "seamless_y"], - "field_kind": "input", - "input": "direct", - "orig_default": "* CUSTOM LABEL *", - "orig_required": false, - "title": "Label", - "type": "string" - }, - "custom_label": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Label for this metadata item", - "field_kind": "input", - "input": "direct", - "orig_default": null, - "orig_required": false, - "title": "Custom Label" - }, - "default_value": { + "collection": { "anyOf": [ { "items": { - "type": "boolean" + "$ref": "#/components/schemas/MetadataField" }, "type": "array" }, @@ -43949,15 +48264,15 @@ } ], "default": null, - "description": "The default bool to use if not found in the metadata", + "description": "Collection of Metadata", "field_kind": "input", "input": "any", "orig_required": true, - "title": "Default Value" + "title": "Collection" }, "type": { - "const": "metadata_to_bool_collection", - "default": "metadata_to_bool_collection", + "const": "merge_metadata", + "default": "merge_metadata", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -43965,20 +48280,36 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To Bool Collection", + "title": "Metadata Merge", "type": "object", - "version": "1.0.0", + "version": "1.0.1", "output": { - "$ref": "#/components/schemas/BooleanCollectionOutput" + "$ref": "#/components/schemas/MetadataOutput" } }, - "MetadataToBoolInvocation": { - "category": "metadata", + "MergeTilesToImageInvocation": { + "category": "tiles", "class": "invocation", - "classification": "beta", - "description": "Extracts a Boolean value of a label from metadata", + "classification": "stable", + "description": "Merge multiple tile images into a single image.", "node_pack": "invokeai", "properties": { + "board": { + "anyOf": [ + { + "$ref": "#/components/schemas/BoardField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false + }, "metadata": { "anyOf": [ { @@ -44019,90 +48350,77 @@ "title": "Use Cache", "type": "boolean" }, - "label": { - "default": "* CUSTOM LABEL *", - "description": "Label for this metadata item", - "enum": ["* CUSTOM LABEL *", "seamless_x", "seamless_y"], - "field_kind": "input", - "input": "direct", - "orig_default": "* CUSTOM LABEL *", - "orig_required": false, - "title": "Label", - "type": "string" - }, - "custom_label": { + "tiles_with_images": { "anyOf": [ { - "type": "string" + "items": { + "$ref": "#/components/schemas/TileWithImage" + }, + "type": "array" }, { "type": "null" } ], "default": null, - "description": "Label for this metadata item", + "description": "A list of tile images with tile properties.", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "Tiles With Images" + }, + "blend_mode": { + "default": "Seam", + "description": "blending type Linear or Seam", + "enum": ["Linear", "Seam"], "field_kind": "input", "input": "direct", - "orig_default": null, + "orig_default": "Seam", "orig_required": false, - "title": "Custom Label" + "title": "Blend Mode", + "type": "string" }, - "default_value": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The default bool to use if not found in the metadata", + "blend_amount": { + "default": 32, + "description": "The amount to blend adjacent tiles in pixels. Must be <= the amount of overlap between adjacent tiles.", "field_kind": "input", "input": "any", - "orig_required": true, - "title": "Default Value" + "minimum": 0, + "orig_default": 32, + "orig_required": false, + "title": "Blend Amount", + "type": "integer" }, "type": { - "const": "metadata_to_bool", - "default": "metadata_to_bool", + "const": "merge_tiles_to_image", + "default": "merge_tiles_to_image", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["metadata"], - "title": "Metadata To Bool", + "tags": ["tiles"], + "title": "Merge Tiles to Image", "type": "object", - "version": "1.0.0", + "version": "1.1.1", "output": { - "$ref": "#/components/schemas/BooleanOutput" + "$ref": "#/components/schemas/ImageOutput" } }, - "MetadataToControlnetsInvocation": { + "MetadataField": { + "additionalProperties": true, + "type": "object", + "title": "MetadataField", + "description": "Pydantic model for metadata with custom root of type dict[str, Any].\nMetadata is stored without a strict schema." + }, + "MetadataFieldExtractorInvocation": { "category": "metadata", "class": "invocation", - "classification": "beta", - "description": "Extracts a Controlnets value of a label from metadata", + "classification": "deprecated", + "description": "Extracts the text value from an image's metadata given a key.\nRaises an error if the image has no metadata or if the value is not a string (nesting not permitted).", "node_pack": "invokeai", "properties": { - "metadata": { - "anyOf": [ - { - "$ref": "#/components/schemas/MetadataField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -44127,31 +48445,40 @@ "title": "Use Cache", "type": "boolean" }, - "control_list": { + "image": { "anyOf": [ { - "$ref": "#/components/schemas/ControlField" - }, - { - "items": { - "$ref": "#/components/schemas/ControlField" - }, - "type": "array" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], "default": null, + "description": "The image to extract metadata from", "field_kind": "input", - "input": "connection", - "orig_default": null, - "orig_required": false, - "title": "ControlNet-List" + "input": "any", + "orig_required": true + }, + "key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The key in the image's metadata to extract the value from", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "Key" }, "type": { - "const": "metadata_to_controlnets", - "default": "metadata_to_controlnets", + "const": "metadata_field_extractor", + "default": "metadata_field_extractor", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -44159,36 +48486,20 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To ControlNets", + "title": "Metadata Field Extractor", "type": "object", - "version": "1.2.0", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/MDControlListOutput" + "$ref": "#/components/schemas/StringOutput" } }, - "MetadataToFloatCollectionInvocation": { + "MetadataFromImageInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts a Float value Collection of a label from metadata", + "description": "Used to create a core metadata item then Add/Update it to the provided metadata", "node_pack": "invokeai", "properties": { - "metadata": { - "anyOf": [ - { - "$ref": "#/components/schemas/MetadataField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -44213,56 +48524,24 @@ "title": "Use Cache", "type": "boolean" }, - "label": { - "default": "* CUSTOM LABEL *", - "description": "Label for this metadata item", - "enum": ["* CUSTOM LABEL *", "cfg_scale", "cfg_rescale_multiplier", "guidance"], - "field_kind": "input", - "input": "direct", - "orig_default": "* CUSTOM LABEL *", - "orig_required": false, - "title": "Label", - "type": "string" - }, - "custom_label": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Label for this metadata item", - "field_kind": "input", - "input": "direct", - "orig_default": null, - "orig_required": false, - "title": "Custom Label" - }, - "default_value": { + "image": { "anyOf": [ { - "items": { - "type": "number" - }, - "type": "array" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], "default": null, - "description": "The default float to use if not found in the metadata", + "description": "The image to process", "field_kind": "input", "input": "any", - "orig_required": true, - "title": "Default Value" + "orig_required": true }, "type": { - "const": "metadata_to_float_collection", - "default": "metadata_to_float_collection", + "const": "metadata_from_image", + "default": "metadata_from_image", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -44270,36 +48549,20 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To Float Collection", + "title": "Metadata From Image", "type": "object", - "version": "1.0.0", + "version": "1.0.1", "output": { - "$ref": "#/components/schemas/FloatCollectionOutput" + "$ref": "#/components/schemas/MetadataOutput" } }, - "MetadataToFloatInvocation": { + "MetadataInvocation": { "category": "metadata", "class": "invocation", - "classification": "beta", - "description": "Extracts a Float value of a label from metadata", + "classification": "stable", + "description": "Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict.", "node_pack": "invokeai", "properties": { - "metadata": { - "anyOf": [ - { - "$ref": "#/components/schemas/MetadataField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -44324,53 +48587,31 @@ "title": "Use Cache", "type": "boolean" }, - "label": { - "default": "* CUSTOM LABEL *", - "description": "Label for this metadata item", - "enum": ["* CUSTOM LABEL *", "cfg_scale", "cfg_rescale_multiplier", "guidance"], - "field_kind": "input", - "input": "direct", - "orig_default": "* CUSTOM LABEL *", - "orig_required": false, - "title": "Label", - "type": "string" - }, - "custom_label": { + "items": { "anyOf": [ { - "type": "string" + "items": { + "$ref": "#/components/schemas/MetadataItemField" + }, + "type": "array" }, { - "type": "null" - } - ], - "default": null, - "description": "Label for this metadata item", - "field_kind": "input", - "input": "direct", - "orig_default": null, - "orig_required": false, - "title": "Custom Label" - }, - "default_value": { - "anyOf": [ - { - "type": "number" + "$ref": "#/components/schemas/MetadataItemField" }, { "type": "null" } ], "default": null, - "description": "The default float to use if not found in the metadata", + "description": "A single metadata item or collection of metadata items", "field_kind": "input", "input": "any", "orig_required": true, - "title": "Default Value" + "title": "Items" }, "type": { - "const": "metadata_to_float", - "default": "metadata_to_float", + "const": "metadata", + "default": "metadata", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -44378,36 +48619,36 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To Float", + "title": "Metadata", "type": "object", - "version": "1.1.0", + "version": "1.0.1", "output": { - "$ref": "#/components/schemas/FloatOutput" + "$ref": "#/components/schemas/MetadataOutput" } }, - "MetadataToIPAdaptersInvocation": { + "MetadataItemField": { + "properties": { + "label": { + "description": "Label for this metadata item", + "title": "Label", + "type": "string" + }, + "value": { + "description": "The value for this metadata item (may be any type)", + "title": "Value" + } + }, + "required": ["label", "value"], + "title": "MetadataItemField", + "type": "object" + }, + "MetadataItemInvocation": { "category": "metadata", "class": "invocation", - "classification": "beta", - "description": "Extracts a IP-Adapters value of a label from metadata", + "classification": "stable", + "description": "Used to create an arbitrary metadata item. Provide \"label\" and make a connection to \"value\" to store that data as the value.", "node_pack": "invokeai", "properties": { - "metadata": { - "anyOf": [ - { - "$ref": "#/components/schemas/MetadataField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -44432,32 +48673,40 @@ "title": "Use Cache", "type": "boolean" }, - "ip_adapter_list": { + "label": { "anyOf": [ { - "$ref": "#/components/schemas/IPAdapterField" + "type": "string" }, { - "items": { - "$ref": "#/components/schemas/IPAdapterField" - }, - "type": "array" - }, + "type": "null" + } + ], + "default": null, + "description": "Label for this metadata item", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "Label" + }, + "value": { + "anyOf": [ + {}, { "type": "null" } ], "default": null, - "description": "IP-Adapter to apply", + "description": "The value for this metadata item (may be any type)", "field_kind": "input", - "input": "connection", - "orig_default": null, - "orig_required": false, - "title": "IP-Adapter-List" + "input": "any", + "orig_required": true, + "title": "Value", + "ui_type": "AnyField" }, "type": { - "const": "metadata_to_ip_adapters", - "default": "metadata_to_ip_adapters", + "const": "metadata_item", + "default": "metadata_item", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -44465,18 +48714,18 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To IP-Adapters", + "title": "Metadata Item", "type": "object", - "version": "1.2.0", + "version": "1.0.1", "output": { - "$ref": "#/components/schemas/MDIPAdapterListOutput" + "$ref": "#/components/schemas/MetadataItemOutput" } }, - "MetadataToIntegerCollectionInvocation": { + "MetadataItemLinkedInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts an integer value Collection of a label from metadata", + "description": "Used to Create/Add/Update a value into a metadata label", "node_pack": "invokeai", "properties": { "metadata": { @@ -44524,11 +48773,23 @@ "description": "Label for this metadata item", "enum": [ "* CUSTOM LABEL *", + "positive_prompt", + "positive_style_prompt", + "negative_prompt", + "negative_style_prompt", "width", "height", "seed", + "cfg_scale", + "cfg_rescale_multiplier", "steps", + "scheduler", "clip_skip", + "model", + "vae", + "seamless_x", + "seamless_y", + "guidance", "cfg_scale_start_step", "cfg_scale_end_step" ], @@ -44556,28 +48817,24 @@ "orig_required": false, "title": "Custom Label" }, - "default_value": { + "value": { "anyOf": [ - { - "items": { - "type": "integer" - }, - "type": "array" - }, + {}, { "type": "null" } ], "default": null, - "description": "The default integer to use if not found in the metadata", + "description": "The value for this metadata item (may be any type)", "field_kind": "input", "input": "any", "orig_required": true, - "title": "Default Value" + "title": "Value", + "ui_type": "AnyField" }, "type": { - "const": "metadata_to_integer_collection", - "default": "metadata_to_integer_collection", + "const": "metadata_item_linked", + "default": "metadata_item_linked", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -44585,18 +48842,61 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To Integer Collection", + "title": "Metadata Item Linked", "type": "object", - "version": "1.0.0", + "version": "1.0.1", "output": { - "$ref": "#/components/schemas/IntegerCollectionOutput" + "$ref": "#/components/schemas/MetadataOutput" } }, - "MetadataToIntegerInvocation": { + "MetadataItemOutput": { + "class": "output", + "description": "Metadata Item Output", + "properties": { + "item": { + "$ref": "#/components/schemas/MetadataItemField", + "description": "Metadata Item", + "field_kind": "output", + "ui_hidden": false + }, + "type": { + "const": "metadata_item_output", + "default": "metadata_item_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "item", "type", "type"], + "title": "MetadataItemOutput", + "type": "object" + }, + "MetadataOutput": { + "class": "output", + "properties": { + "metadata": { + "$ref": "#/components/schemas/MetadataField", + "description": "Metadata Dict", + "field_kind": "output", + "ui_hidden": false + }, + "type": { + "const": "metadata_output", + "default": "metadata_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "metadata", "type", "type"], + "title": "MetadataOutput", + "type": "object" + }, + "MetadataToBoolCollectionInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts an integer value of a label from metadata", + "description": "Extracts a Boolean value Collection of a label from metadata", "node_pack": "invokeai", "properties": { "metadata": { @@ -44642,16 +48942,7 @@ "label": { "default": "* CUSTOM LABEL *", "description": "Label for this metadata item", - "enum": [ - "* CUSTOM LABEL *", - "width", - "height", - "seed", - "steps", - "clip_skip", - "cfg_scale_start_step", - "cfg_scale_end_step" - ], + "enum": ["* CUSTOM LABEL *", "seamless_x", "seamless_y"], "field_kind": "input", "input": "direct", "orig_default": "* CUSTOM LABEL *", @@ -44679,22 +48970,25 @@ "default_value": { "anyOf": [ { - "type": "integer" + "items": { + "type": "boolean" + }, + "type": "array" }, { "type": "null" } ], "default": null, - "description": "The default integer to use if not found in the metadata", + "description": "The default bool to use if not found in the metadata", "field_kind": "input", "input": "any", "orig_required": true, "title": "Default Value" }, "type": { - "const": "metadata_to_integer", - "default": "metadata_to_integer", + "const": "metadata_to_bool_collection", + "default": "metadata_to_bool_collection", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -44702,18 +48996,18 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To Integer", + "title": "Metadata To Bool Collection", "type": "object", "version": "1.0.0", "output": { - "$ref": "#/components/schemas/IntegerOutput" + "$ref": "#/components/schemas/BooleanCollectionOutput" } }, - "MetadataToLorasCollectionInvocation": { + "MetadataToBoolInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts Lora(s) from metadata into a collection", + "description": "Extracts a Boolean value of a label from metadata", "node_pack": "invokeai", "properties": { "metadata": { @@ -44756,42 +49050,53 @@ "title": "Use Cache", "type": "boolean" }, - "custom_label": { - "default": "loras", + "label": { + "default": "* CUSTOM LABEL *", "description": "Label for this metadata item", + "enum": ["* CUSTOM LABEL *", "seamless_x", "seamless_y"], "field_kind": "input", "input": "direct", - "orig_default": "loras", + "orig_default": "* CUSTOM LABEL *", "orig_required": false, - "title": "Custom Label", + "title": "Label", "type": "string" }, - "loras": { + "custom_label": { "anyOf": [ { - "$ref": "#/components/schemas/LoRAField" + "type": "string" }, { - "items": { - "$ref": "#/components/schemas/LoRAField" - }, - "type": "array" + "type": "null" + } + ], + "default": null, + "description": "Label for this metadata item", + "field_kind": "input", + "input": "direct", + "orig_default": null, + "orig_required": false, + "title": "Custom Label" + }, + "default_value": { + "anyOf": [ + { + "type": "boolean" }, { "type": "null" } ], - "default": [], - "description": "LoRA models and weights. May be a single LoRA or collection.", + "default": null, + "description": "The default bool to use if not found in the metadata", "field_kind": "input", "input": "any", - "orig_default": [], - "orig_required": false, - "title": "LoRAs" + "orig_required": true, + "title": "Default Value" }, "type": { - "const": "metadata_to_lora_collection", - "default": "metadata_to_lora_collection", + "const": "metadata_to_bool", + "default": "metadata_to_bool", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -44799,44 +49104,18 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To LoRA Collection", + "title": "Metadata To Bool", "type": "object", - "version": "1.1.0", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/MetadataToLorasCollectionOutput" + "$ref": "#/components/schemas/BooleanOutput" } }, - "MetadataToLorasCollectionOutput": { - "class": "output", - "description": "Model loader output", - "properties": { - "lora": { - "description": "Collection of LoRA model and weights", - "field_kind": "output", - "items": { - "$ref": "#/components/schemas/LoRAField" - }, - "title": "LoRAs", - "type": "array", - "ui_hidden": false - }, - "type": { - "const": "metadata_to_lora_collection_output", - "default": "metadata_to_lora_collection_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "lora", "type", "type"], - "title": "MetadataToLorasCollectionOutput", - "type": "object" - }, - "MetadataToLorasInvocation": { + "MetadataToControlnetsInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts a Loras value of a label from metadata", + "description": "Extracts a Controlnets value of a label from metadata", "node_pack": "invokeai", "properties": { "metadata": { @@ -44879,43 +49158,31 @@ "title": "Use Cache", "type": "boolean" }, - "unet": { + "control_list": { "anyOf": [ { - "$ref": "#/components/schemas/UNetField" + "$ref": "#/components/schemas/ControlField" }, { - "type": "null" - } - ], - "default": null, - "description": "UNet (scheduler, LoRAs)", - "field_kind": "input", - "input": "connection", - "orig_default": null, - "orig_required": false, - "title": "UNet" - }, - "clip": { - "anyOf": [ - { - "$ref": "#/components/schemas/CLIPField" + "items": { + "$ref": "#/components/schemas/ControlField" + }, + "type": "array" }, { "type": "null" } ], "default": null, - "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", "field_kind": "input", "input": "connection", "orig_default": null, "orig_required": false, - "title": "CLIP" + "title": "ControlNet-List" }, "type": { - "const": "metadata_to_loras", - "default": "metadata_to_loras", + "const": "metadata_to_controlnets", + "default": "metadata_to_controlnets", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -44923,18 +49190,18 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To LoRAs", + "title": "Metadata To ControlNets", "type": "object", - "version": "1.1.1", + "version": "1.2.0", "output": { - "$ref": "#/components/schemas/LoRALoaderOutput" + "$ref": "#/components/schemas/MDControlListOutput" } }, - "MetadataToModelInvocation": { + "MetadataToFloatCollectionInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts a Model value of a label from metadata", + "description": "Extracts a Float value Collection of a label from metadata", "node_pack": "invokeai", "properties": { "metadata": { @@ -44978,12 +49245,12 @@ "type": "boolean" }, "label": { - "default": "model", + "default": "* CUSTOM LABEL *", "description": "Label for this metadata item", - "enum": ["* CUSTOM LABEL *", "model"], + "enum": ["* CUSTOM LABEL *", "cfg_scale", "cfg_rescale_multiplier", "guidance"], "field_kind": "input", "input": "direct", - "orig_default": "model", + "orig_default": "* CUSTOM LABEL *", "orig_required": false, "title": "Label", "type": "string" @@ -45008,22 +49275,25 @@ "default_value": { "anyOf": [ { - "$ref": "#/components/schemas/ModelIdentifierField" + "items": { + "type": "number" + }, + "type": "array" }, { "type": "null" } ], "default": null, - "description": "The default model to use if not found in the metadata", + "description": "The default float to use if not found in the metadata", "field_kind": "input", "input": "any", "orig_required": true, - "ui_model_type": ["main"] + "title": "Default Value" }, "type": { - "const": "metadata_to_model", - "default": "metadata_to_model", + "const": "metadata_to_float_collection", + "default": "metadata_to_float_collection", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -45031,69 +49301,18 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To Model", + "title": "Metadata To Float Collection", "type": "object", - "version": "1.3.0", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/MetadataToModelOutput" + "$ref": "#/components/schemas/FloatCollectionOutput" } }, - "MetadataToModelOutput": { - "class": "output", - "description": "String to main model output", - "properties": { - "model": { - "$ref": "#/components/schemas/ModelIdentifierField", - "description": "Main model (UNet, VAE, CLIP) to load", - "field_kind": "output", - "title": "Model", - "ui_hidden": false - }, - "name": { - "description": "Model Name", - "field_kind": "output", - "title": "Name", - "type": "string", - "ui_hidden": false - }, - "unet": { - "$ref": "#/components/schemas/UNetField", - "description": "UNet (scheduler, LoRAs)", - "field_kind": "output", - "title": "UNet", - "ui_hidden": false - }, - "vae": { - "$ref": "#/components/schemas/VAEField", - "description": "VAE", - "field_kind": "output", - "title": "VAE", - "ui_hidden": false - }, - "clip": { - "$ref": "#/components/schemas/CLIPField", - "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", - "field_kind": "output", - "title": "CLIP", - "ui_hidden": false - }, - "type": { - "const": "metadata_to_model_output", - "default": "metadata_to_model_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "model", "name", "unet", "vae", "clip", "type", "type"], - "title": "MetadataToModelOutput", - "type": "object" - }, - "MetadataToSDXLLorasInvocation": { + "MetadataToFloatInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts a SDXL Loras value of a label from metadata", + "description": "Extracts a Float value of a label from metadata", "node_pack": "invokeai", "properties": { "metadata": { @@ -45136,60 +49355,53 @@ "title": "Use Cache", "type": "boolean" }, - "unet": { - "anyOf": [ - { - "$ref": "#/components/schemas/UNetField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "UNet (scheduler, LoRAs)", + "label": { + "default": "* CUSTOM LABEL *", + "description": "Label for this metadata item", + "enum": ["* CUSTOM LABEL *", "cfg_scale", "cfg_rescale_multiplier", "guidance"], "field_kind": "input", - "input": "connection", - "orig_default": null, + "input": "direct", + "orig_default": "* CUSTOM LABEL *", "orig_required": false, - "title": "UNet" + "title": "Label", + "type": "string" }, - "clip": { + "custom_label": { "anyOf": [ { - "$ref": "#/components/schemas/CLIPField" + "type": "string" }, { "type": "null" } ], "default": null, - "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", + "description": "Label for this metadata item", "field_kind": "input", - "input": "connection", + "input": "direct", "orig_default": null, "orig_required": false, - "title": "CLIP 1" + "title": "Custom Label" }, - "clip2": { + "default_value": { "anyOf": [ { - "$ref": "#/components/schemas/CLIPField" + "type": "number" }, { "type": "null" } ], "default": null, - "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", + "description": "The default float to use if not found in the metadata", "field_kind": "input", - "input": "connection", - "orig_default": null, - "orig_required": false, - "title": "CLIP 2" + "input": "any", + "orig_required": true, + "title": "Default Value" }, "type": { - "const": "metadata_to_sdlx_loras", - "default": "metadata_to_sdlx_loras", + "const": "metadata_to_float", + "default": "metadata_to_float", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -45197,18 +49409,18 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To SDXL LoRAs", + "title": "Metadata To Float", "type": "object", - "version": "1.1.1", + "version": "1.1.0", "output": { - "$ref": "#/components/schemas/SDXLLoRALoaderOutput" + "$ref": "#/components/schemas/FloatOutput" } }, - "MetadataToSDXLModelInvocation": { + "MetadataToIPAdaptersInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts a SDXL Model value of a label from metadata", + "description": "Extracts a IP-Adapters value of a label from metadata", "node_pack": "invokeai", "properties": { "metadata": { @@ -45251,54 +49463,32 @@ "title": "Use Cache", "type": "boolean" }, - "label": { - "default": "model", - "description": "Label for this metadata item", - "enum": ["* CUSTOM LABEL *", "model"], - "field_kind": "input", - "input": "direct", - "orig_default": "model", - "orig_required": false, - "title": "Label", - "type": "string" - }, - "custom_label": { + "ip_adapter_list": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/IPAdapterField" }, { - "type": "null" - } - ], - "default": null, - "description": "Label for this metadata item", - "field_kind": "input", - "input": "direct", - "orig_default": null, - "orig_required": false, - "title": "Custom Label" - }, - "default_value": { - "anyOf": [ - { - "$ref": "#/components/schemas/ModelIdentifierField" + "items": { + "$ref": "#/components/schemas/IPAdapterField" + }, + "type": "array" }, { "type": "null" } ], "default": null, - "description": "The default SDXL Model to use if not found in the metadata", + "description": "IP-Adapter to apply", "field_kind": "input", - "input": "any", - "orig_required": true, - "ui_model_base": ["sdxl"], - "ui_model_type": ["main"] + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "IP-Adapter-List" }, "type": { - "const": "metadata_to_sdxl_model", - "default": "metadata_to_sdxl_model", + "const": "metadata_to_ip_adapters", + "default": "metadata_to_ip_adapters", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -45306,76 +49496,18 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To SDXL Model", + "title": "Metadata To IP-Adapters", "type": "object", - "version": "1.3.0", + "version": "1.2.0", "output": { - "$ref": "#/components/schemas/MetadataToSDXLModelOutput" + "$ref": "#/components/schemas/MDIPAdapterListOutput" } }, - "MetadataToSDXLModelOutput": { - "class": "output", - "description": "String to SDXL main model output", - "properties": { - "model": { - "$ref": "#/components/schemas/ModelIdentifierField", - "description": "Main model (UNet, VAE, CLIP) to load", - "field_kind": "output", - "title": "Model", - "ui_hidden": false - }, - "name": { - "description": "Model Name", - "field_kind": "output", - "title": "Name", - "type": "string", - "ui_hidden": false - }, - "unet": { - "$ref": "#/components/schemas/UNetField", - "description": "UNet (scheduler, LoRAs)", - "field_kind": "output", - "title": "UNet", - "ui_hidden": false - }, - "clip": { - "$ref": "#/components/schemas/CLIPField", - "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", - "field_kind": "output", - "title": "CLIP 1", - "ui_hidden": false - }, - "clip2": { - "$ref": "#/components/schemas/CLIPField", - "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", - "field_kind": "output", - "title": "CLIP 2", - "ui_hidden": false - }, - "vae": { - "$ref": "#/components/schemas/VAEField", - "description": "VAE", - "field_kind": "output", - "title": "VAE", - "ui_hidden": false - }, - "type": { - "const": "metadata_to_sdxl_model_output", - "default": "metadata_to_sdxl_model_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "model", "name", "unet", "clip", "clip2", "vae", "type", "type"], - "title": "MetadataToSDXLModelOutput", - "type": "object" - }, - "MetadataToSchedulerInvocation": { + "MetadataToIntegerCollectionInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts a Scheduler value of a label from metadata", + "description": "Extracts an integer value Collection of a label from metadata", "node_pack": "invokeai", "properties": { "metadata": { @@ -45419,12 +49551,21 @@ "type": "boolean" }, "label": { - "default": "scheduler", + "default": "* CUSTOM LABEL *", "description": "Label for this metadata item", - "enum": ["* CUSTOM LABEL *", "scheduler"], + "enum": [ + "* CUSTOM LABEL *", + "width", + "height", + "seed", + "steps", + "clip_skip", + "cfg_scale_start_step", + "cfg_scale_end_step" + ], "field_kind": "input", "input": "direct", - "orig_default": "scheduler", + "orig_default": "* CUSTOM LABEL *", "orig_required": false, "title": "Label", "type": "string" @@ -45447,51 +49588,27 @@ "title": "Custom Label" }, "default_value": { - "default": "euler", - "description": "The default scheduler to use if not found in the metadata", - "enum": [ - "ddim", - "ddpm", - "deis", - "deis_k", - "lms", - "lms_k", - "pndm", - "heun", - "heun_k", - "euler", - "euler_k", - "euler_a", - "kdpm_2", - "kdpm_2_k", - "kdpm_2_a", - "kdpm_2_a_k", - "dpmpp_2s", - "dpmpp_2s_k", - "dpmpp_2m", - "dpmpp_2m_k", - "dpmpp_2m_sde", - "dpmpp_2m_sde_k", - "dpmpp_3m", - "dpmpp_3m_k", - "dpmpp_sde", - "dpmpp_sde_k", - "unipc", - "unipc_k", - "lcm", - "tcd" + "anyOf": [ + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "type": "null" + } ], + "default": null, + "description": "The default integer to use if not found in the metadata", "field_kind": "input", "input": "any", - "orig_default": "euler", - "orig_required": false, - "title": "Default Value", - "type": "string", - "ui_type": "SchedulerField" + "orig_required": true, + "title": "Default Value" }, "type": { - "const": "metadata_to_scheduler", - "default": "metadata_to_scheduler", + "const": "metadata_to_integer_collection", + "default": "metadata_to_integer_collection", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -45499,18 +49616,18 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To Scheduler", + "title": "Metadata To Integer Collection", "type": "object", - "version": "1.0.1", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/SchedulerOutput" + "$ref": "#/components/schemas/IntegerCollectionOutput" } }, - "MetadataToStringCollectionInvocation": { + "MetadataToIntegerInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts a string collection value of a label from metadata", + "description": "Extracts an integer value of a label from metadata", "node_pack": "invokeai", "properties": { "metadata": { @@ -45558,10 +49675,13 @@ "description": "Label for this metadata item", "enum": [ "* CUSTOM LABEL *", - "positive_prompt", - "positive_style_prompt", - "negative_prompt", - "negative_style_prompt" + "width", + "height", + "seed", + "steps", + "clip_skip", + "cfg_scale_start_step", + "cfg_scale_end_step" ], "field_kind": "input", "input": "direct", @@ -45590,25 +49710,22 @@ "default_value": { "anyOf": [ { - "items": { - "type": "string" - }, - "type": "array" + "type": "integer" }, { "type": "null" } ], "default": null, - "description": "The default string collection to use if not found in the metadata", + "description": "The default integer to use if not found in the metadata", "field_kind": "input", "input": "any", "orig_required": true, "title": "Default Value" }, "type": { - "const": "metadata_to_string_collection", - "default": "metadata_to_string_collection", + "const": "metadata_to_integer", + "default": "metadata_to_integer", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -45616,18 +49733,18 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To String Collection", + "title": "Metadata To Integer", "type": "object", "version": "1.0.0", "output": { - "$ref": "#/components/schemas/StringCollectionOutput" + "$ref": "#/components/schemas/IntegerOutput" } }, - "MetadataToStringInvocation": { + "MetadataToLorasCollectionInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts a string value of a label from metadata", + "description": "Extracts Lora(s) from metadata into a collection", "node_pack": "invokeai", "properties": { "metadata": { @@ -45670,59 +49787,42 @@ "title": "Use Cache", "type": "boolean" }, - "label": { - "default": "* CUSTOM LABEL *", + "custom_label": { + "default": "loras", "description": "Label for this metadata item", - "enum": [ - "* CUSTOM LABEL *", - "positive_prompt", - "positive_style_prompt", - "negative_prompt", - "negative_style_prompt" - ], "field_kind": "input", "input": "direct", - "orig_default": "* CUSTOM LABEL *", + "orig_default": "loras", "orig_required": false, - "title": "Label", + "title": "Custom Label", "type": "string" }, - "custom_label": { + "loras": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/LoRAField" }, { - "type": "null" - } - ], - "default": null, - "description": "Label for this metadata item", - "field_kind": "input", - "input": "direct", - "orig_default": null, - "orig_required": false, - "title": "Custom Label" - }, - "default_value": { - "anyOf": [ - { - "type": "string" + "items": { + "$ref": "#/components/schemas/LoRAField" + }, + "type": "array" }, { "type": "null" } ], - "default": null, - "description": "The default string to use if not found in the metadata", + "default": [], + "description": "LoRA models and weights. May be a single LoRA or collection.", "field_kind": "input", "input": "any", - "orig_required": true, - "title": "Default Value" + "orig_default": [], + "orig_required": false, + "title": "LoRAs" }, "type": { - "const": "metadata_to_string", - "default": "metadata_to_string", + "const": "metadata_to_lora_collection", + "default": "metadata_to_lora_collection", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -45730,18 +49830,44 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To String", + "title": "Metadata To LoRA Collection", "type": "object", - "version": "1.0.0", + "version": "1.1.0", "output": { - "$ref": "#/components/schemas/StringOutput" + "$ref": "#/components/schemas/MetadataToLorasCollectionOutput" } }, - "MetadataToT2IAdaptersInvocation": { + "MetadataToLorasCollectionOutput": { + "class": "output", + "description": "Model loader output", + "properties": { + "lora": { + "description": "Collection of LoRA model and weights", + "field_kind": "output", + "items": { + "$ref": "#/components/schemas/LoRAField" + }, + "title": "LoRAs", + "type": "array", + "ui_hidden": false + }, + "type": { + "const": "metadata_to_lora_collection_output", + "default": "metadata_to_lora_collection_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "lora", "type", "type"], + "title": "MetadataToLorasCollectionOutput", + "type": "object" + }, + "MetadataToLorasInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts a T2I-Adapters value of a label from metadata", + "description": "Extracts a Loras value of a label from metadata", "node_pack": "invokeai", "properties": { "metadata": { @@ -45784,32 +49910,43 @@ "title": "Use Cache", "type": "boolean" }, - "t2i_adapter_list": { + "unet": { "anyOf": [ { - "$ref": "#/components/schemas/T2IAdapterField" + "$ref": "#/components/schemas/UNetField" }, { - "items": { - "$ref": "#/components/schemas/T2IAdapterField" - }, - "type": "array" + "type": "null" + } + ], + "default": null, + "description": "UNet (scheduler, LoRAs)", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "UNet" + }, + "clip": { + "anyOf": [ + { + "$ref": "#/components/schemas/CLIPField" }, { "type": "null" } ], "default": null, - "description": "IP-Adapter to apply", + "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", "field_kind": "input", "input": "connection", "orig_default": null, "orig_required": false, - "title": "T2I-Adapter" + "title": "CLIP" }, "type": { - "const": "metadata_to_t2i_adapters", - "default": "metadata_to_t2i_adapters", + "const": "metadata_to_loras", + "default": "metadata_to_loras", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -45817,18 +49954,18 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To T2I-Adapters", + "title": "Metadata To LoRAs", "type": "object", - "version": "1.2.0", + "version": "1.1.1", "output": { - "$ref": "#/components/schemas/MDT2IAdapterListOutput" + "$ref": "#/components/schemas/LoRALoaderOutput" } }, - "MetadataToVAEInvocation": { + "MetadataToModelInvocation": { "category": "metadata", "class": "invocation", "classification": "beta", - "description": "Extracts a VAE value of a label from metadata", + "description": "Extracts a Model value of a label from metadata", "node_pack": "invokeai", "properties": { "metadata": { @@ -45872,12 +50009,12 @@ "type": "boolean" }, "label": { - "default": "vae", + "default": "model", "description": "Label for this metadata item", - "enum": ["* CUSTOM LABEL *", "vae"], + "enum": ["* CUSTOM LABEL *", "model"], "field_kind": "input", "input": "direct", - "orig_default": "vae", + "orig_default": "model", "orig_required": false, "title": "Label", "type": "string" @@ -45902,21 +50039,22 @@ "default_value": { "anyOf": [ { - "$ref": "#/components/schemas/VAEField" + "$ref": "#/components/schemas/ModelIdentifierField" }, { "type": "null" } ], "default": null, - "description": "The default VAE to use if not found in the metadata", + "description": "The default model to use if not found in the metadata", "field_kind": "input", "input": "any", - "orig_required": true + "orig_required": true, + "ui_model_type": ["main"] }, "type": { - "const": "metadata_to_vae", - "default": "metadata_to_vae", + "const": "metadata_to_model", + "default": "metadata_to_model", "field_kind": "node_attribute", "title": "type", "type": "string" @@ -45924,84 +50062,202 @@ }, "required": ["type", "id"], "tags": ["metadata"], - "title": "Metadata To VAE", + "title": "Metadata To Model", "type": "object", - "version": "1.2.1", + "version": "1.3.0", "output": { - "$ref": "#/components/schemas/VAEOutput" + "$ref": "#/components/schemas/MetadataToModelOutput" } }, - "ModelFormat": { - "type": "string", - "enum": [ - "omi", - "diffusers", - "checkpoint", - "lycoris", - "onnx", - "olive", - "embedding_file", - "embedding_folder", - "invokeai", - "t5_encoder", - "qwen3_encoder", - "bnb_quantized_int8b", - "bnb_quantized_nf4b", - "gguf_quantized", - "unknown" - ], - "title": "ModelFormat", - "description": "Storage format of model." - }, - "ModelIdentifierField": { + "MetadataToModelOutput": { + "class": "output", + "description": "String to main model output", "properties": { - "key": { - "description": "The model's unique key", - "title": "Key", - "type": "string" - }, - "hash": { - "description": "The model's BLAKE3 hash", - "title": "Hash", - "type": "string" + "model": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Main model (UNet, VAE, CLIP) to load", + "field_kind": "output", + "title": "Model", + "ui_hidden": false }, "name": { - "description": "The model's name", + "description": "Model Name", + "field_kind": "output", "title": "Name", - "type": "string" + "type": "string", + "ui_hidden": false }, - "base": { - "$ref": "#/components/schemas/BaseModelType", - "description": "The model's base model type" + "unet": { + "$ref": "#/components/schemas/UNetField", + "description": "UNet (scheduler, LoRAs)", + "field_kind": "output", + "title": "UNet", + "ui_hidden": false + }, + "vae": { + "$ref": "#/components/schemas/VAEField", + "description": "VAE", + "field_kind": "output", + "title": "VAE", + "ui_hidden": false + }, + "clip": { + "$ref": "#/components/schemas/CLIPField", + "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", + "field_kind": "output", + "title": "CLIP", + "ui_hidden": false }, "type": { - "$ref": "#/components/schemas/ModelType", - "description": "The model's type" + "const": "metadata_to_model_output", + "default": "metadata_to_model_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "model", "name", "unet", "vae", "clip", "type", "type"], + "title": "MetadataToModelOutput", + "type": "object" + }, + "MetadataToSDXLLorasInvocation": { + "category": "metadata", + "class": "invocation", + "classification": "beta", + "description": "Extracts a SDXL Loras value of a label from metadata", + "node_pack": "invokeai", + "properties": { + "metadata": { + "anyOf": [ + { + "$ref": "#/components/schemas/MetadataField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "submodel_type": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "unet": { "anyOf": [ { - "$ref": "#/components/schemas/SubModelType" + "$ref": "#/components/schemas/UNetField" }, { "type": "null" } ], "default": null, - "description": "The submodel to load, if this is a main model" + "description": "UNet (scheduler, LoRAs)", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "UNet" + }, + "clip": { + "anyOf": [ + { + "$ref": "#/components/schemas/CLIPField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "CLIP 1" + }, + "clip2": { + "anyOf": [ + { + "$ref": "#/components/schemas/CLIPField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "CLIP 2" + }, + "type": { + "const": "metadata_to_sdlx_loras", + "default": "metadata_to_sdlx_loras", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, - "required": ["key", "hash", "name", "base", "type"], - "title": "ModelIdentifierField", - "type": "object" + "required": ["type", "id"], + "tags": ["metadata"], + "title": "Metadata To SDXL LoRAs", + "type": "object", + "version": "1.1.1", + "output": { + "$ref": "#/components/schemas/SDXLLoRALoaderOutput" + } }, - "ModelIdentifierInvocation": { - "category": "model", + "MetadataToSDXLModelInvocation": { + "category": "metadata", "class": "invocation", - "classification": "stable", - "description": "Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as\ninput for any model, even if the model types don't match. If you connect this to a mismatched input, you'll get an\nerror.", + "classification": "beta", + "description": "Extracts a SDXL Model value of a label from metadata", "node_pack": "invokeai", "properties": { + "metadata": { + "anyOf": [ + { + "$ref": "#/components/schemas/MetadataField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -46026,7 +50282,35 @@ "title": "Use Cache", "type": "boolean" }, - "model": { + "label": { + "default": "model", + "description": "Label for this metadata item", + "enum": ["* CUSTOM LABEL *", "model"], + "field_kind": "input", + "input": "direct", + "orig_default": "model", + "orig_required": false, + "title": "Label", + "type": "string" + }, + "custom_label": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Label for this metadata item", + "field_kind": "input", + "input": "direct", + "orig_default": null, + "orig_required": false, + "title": "Custom Label" + }, + "default_value": { "anyOf": [ { "$ref": "#/components/schemas/ModelIdentifierField" @@ -46036,248 +50320,1007 @@ } ], "default": null, - "description": "The model to select", + "description": "The default SDXL Model to use if not found in the metadata", "field_kind": "input", "input": "any", "orig_required": true, - "title": "Model" + "ui_model_base": ["sdxl"], + "ui_model_type": ["main"] }, "type": { - "const": "model_identifier", - "default": "model_identifier", + "const": "metadata_to_sdxl_model", + "default": "metadata_to_sdxl_model", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["model"], - "title": "Any Model", + "tags": ["metadata"], + "title": "Metadata To SDXL Model", "type": "object", - "version": "1.0.1", + "version": "1.3.0", "output": { - "$ref": "#/components/schemas/ModelIdentifierOutput" + "$ref": "#/components/schemas/MetadataToSDXLModelOutput" } }, - "ModelIdentifierOutput": { + "MetadataToSDXLModelOutput": { "class": "output", - "description": "Model identifier output", + "description": "String to SDXL main model output", "properties": { "model": { "$ref": "#/components/schemas/ModelIdentifierField", - "description": "Model identifier", + "description": "Main model (UNet, VAE, CLIP) to load", "field_kind": "output", "title": "Model", "ui_hidden": false }, + "name": { + "description": "Model Name", + "field_kind": "output", + "title": "Name", + "type": "string", + "ui_hidden": false + }, + "unet": { + "$ref": "#/components/schemas/UNetField", + "description": "UNet (scheduler, LoRAs)", + "field_kind": "output", + "title": "UNet", + "ui_hidden": false + }, + "clip": { + "$ref": "#/components/schemas/CLIPField", + "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", + "field_kind": "output", + "title": "CLIP 1", + "ui_hidden": false + }, + "clip2": { + "$ref": "#/components/schemas/CLIPField", + "description": "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count", + "field_kind": "output", + "title": "CLIP 2", + "ui_hidden": false + }, + "vae": { + "$ref": "#/components/schemas/VAEField", + "description": "VAE", + "field_kind": "output", + "title": "VAE", + "ui_hidden": false + }, "type": { - "const": "model_identifier_output", - "default": "model_identifier_output", + "const": "metadata_to_sdxl_model_output", + "default": "metadata_to_sdxl_model_output", "field_kind": "node_attribute", "title": "type", "type": "string" } }, - "required": ["output_meta", "model", "type", "type"], - "title": "ModelIdentifierOutput", + "required": ["output_meta", "model", "name", "unet", "clip", "clip2", "vae", "type", "type"], + "title": "MetadataToSDXLModelOutput", "type": "object" }, - "ModelInstallCancelledEvent": { - "description": "Event model for model_install_cancelled", + "MetadataToSchedulerInvocation": { + "category": "metadata", + "class": "invocation", + "classification": "beta", + "description": "Extracts a Scheduler value of a label from metadata", + "node_pack": "invokeai", "properties": { - "timestamp": { - "description": "The timestamp of the event", - "title": "Timestamp", - "type": "integer" - }, - "id": { - "description": "The ID of the install job", - "title": "Id", - "type": "integer" - }, - "source": { - "description": "Source of the model; local path, repo_id or url", - "discriminator": { - "mapping": { - "hf": "#/components/schemas/HFModelSource", - "local": "#/components/schemas/LocalModelSource", - "url": "#/components/schemas/URLModelSource" - }, - "propertyName": "type" - }, - "oneOf": [ - { - "$ref": "#/components/schemas/LocalModelSource" - }, + "metadata": { + "anyOf": [ { - "$ref": "#/components/schemas/HFModelSource" + "$ref": "#/components/schemas/MetadataField" }, { - "$ref": "#/components/schemas/URLModelSource" + "type": "null" } ], - "title": "Source" - } - }, - "required": ["timestamp", "id", "source"], - "title": "ModelInstallCancelledEvent", - "type": "object" - }, - "ModelInstallCompleteEvent": { - "description": "Event model for model_install_complete", - "properties": { - "timestamp": { - "description": "The timestamp of the event", - "title": "Timestamp", - "type": "integer" + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, "id": { - "description": "The ID of the install job", + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", "title": "Id", - "type": "integer" + "type": "string" }, - "source": { - "description": "Source of the model; local path, repo_id or url", - "discriminator": { - "mapping": { - "hf": "#/components/schemas/HFModelSource", - "local": "#/components/schemas/LocalModelSource", - "url": "#/components/schemas/URLModelSource" - }, - "propertyName": "type" - }, - "oneOf": [ - { - "$ref": "#/components/schemas/LocalModelSource" - }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "label": { + "default": "scheduler", + "description": "Label for this metadata item", + "enum": ["* CUSTOM LABEL *", "scheduler"], + "field_kind": "input", + "input": "direct", + "orig_default": "scheduler", + "orig_required": false, + "title": "Label", + "type": "string" + }, + "custom_label": { + "anyOf": [ { - "$ref": "#/components/schemas/HFModelSource" + "type": "string" }, { - "$ref": "#/components/schemas/URLModelSource" + "type": "null" } ], - "title": "Source" + "default": null, + "description": "Label for this metadata item", + "field_kind": "input", + "input": "direct", + "orig_default": null, + "orig_required": false, + "title": "Custom Label" }, - "key": { - "description": "Model config record key", - "title": "Key", - "type": "string" + "default_value": { + "default": "euler", + "description": "The default scheduler to use if not found in the metadata", + "enum": [ + "ddim", + "ddpm", + "deis", + "deis_k", + "lms", + "lms_k", + "pndm", + "heun", + "heun_k", + "euler", + "euler_k", + "euler_a", + "kdpm_2", + "kdpm_2_k", + "kdpm_2_a", + "kdpm_2_a_k", + "dpmpp_2s", + "dpmpp_2s_k", + "dpmpp_2m", + "dpmpp_2m_k", + "dpmpp_2m_sde", + "dpmpp_2m_sde_k", + "dpmpp_3m", + "dpmpp_3m_k", + "dpmpp_sde", + "dpmpp_sde_k", + "unipc", + "unipc_k", + "lcm", + "tcd" + ], + "field_kind": "input", + "input": "any", + "orig_default": "euler", + "orig_required": false, + "title": "Default Value", + "type": "string", + "ui_type": "SchedulerField" }, - "total_bytes": { + "type": { + "const": "metadata_to_scheduler", + "default": "metadata_to_scheduler", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["metadata"], + "title": "Metadata To Scheduler", + "type": "object", + "version": "1.0.1", + "output": { + "$ref": "#/components/schemas/SchedulerOutput" + } + }, + "MetadataToStringCollectionInvocation": { + "category": "metadata", + "class": "invocation", + "classification": "beta", + "description": "Extracts a string collection value of a label from metadata", + "node_pack": "invokeai", + "properties": { + "metadata": { "anyOf": [ { - "type": "integer" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "description": "Size of the model (may be None for installation of a local path)", - "title": "Total Bytes" + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "config": { - "description": "The installed model's config", - "oneOf": [ - { - "$ref": "#/components/schemas/Main_Diffusers_SD1_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SD2_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SDXL_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SDXLRefiner_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SD3_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_FLUX_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_Flux2_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" - }, + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "label": { + "default": "* CUSTOM LABEL *", + "description": "Label for this metadata item", + "enum": [ + "* CUSTOM LABEL *", + "positive_prompt", + "positive_style_prompt", + "negative_prompt", + "negative_style_prompt" + ], + "field_kind": "input", + "input": "direct", + "orig_default": "* CUSTOM LABEL *", + "orig_required": false, + "title": "Label", + "type": "string" + }, + "custom_label": { + "anyOf": [ { - "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" + "type": "string" }, { - "$ref": "#/components/schemas/Main_Checkpoint_SD1_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "Label for this metadata item", + "field_kind": "input", + "input": "direct", + "orig_default": null, + "orig_required": false, + "title": "Custom Label" + }, + "default_value": { + "anyOf": [ { - "$ref": "#/components/schemas/Main_Checkpoint_SD2_Config" + "items": { + "type": "string" + }, + "type": "array" }, { - "$ref": "#/components/schemas/Main_Checkpoint_SDXL_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "The default string collection to use if not found in the metadata", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "Default Value" + }, + "type": { + "const": "metadata_to_string_collection", + "default": "metadata_to_string_collection", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["metadata"], + "title": "Metadata To String Collection", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/StringCollectionOutput" + } + }, + "MetadataToStringInvocation": { + "category": "metadata", + "class": "invocation", + "classification": "beta", + "description": "Extracts a string value of a label from metadata", + "node_pack": "invokeai", + "properties": { + "metadata": { + "anyOf": [ { - "$ref": "#/components/schemas/Main_Checkpoint_SDXLRefiner_Config" + "$ref": "#/components/schemas/MetadataField" }, { - "$ref": "#/components/schemas/Main_Checkpoint_Flux2_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "label": { + "default": "* CUSTOM LABEL *", + "description": "Label for this metadata item", + "enum": [ + "* CUSTOM LABEL *", + "positive_prompt", + "positive_style_prompt", + "negative_prompt", + "negative_style_prompt" + ], + "field_kind": "input", + "input": "direct", + "orig_default": "* CUSTOM LABEL *", + "orig_required": false, + "title": "Label", + "type": "string" + }, + "custom_label": { + "anyOf": [ { - "$ref": "#/components/schemas/Main_Checkpoint_FLUX_Config" + "type": "string" }, { - "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "Label for this metadata item", + "field_kind": "input", + "input": "direct", + "orig_default": null, + "orig_required": false, + "title": "Custom Label" + }, + "default_value": { + "anyOf": [ { - "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" + "type": "string" }, { - "$ref": "#/components/schemas/Main_GGUF_Flux2_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "The default string to use if not found in the metadata", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "Default Value" + }, + "type": { + "const": "metadata_to_string", + "default": "metadata_to_string", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["metadata"], + "title": "Metadata To String", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/StringOutput" + } + }, + "MetadataToT2IAdaptersInvocation": { + "category": "metadata", + "class": "invocation", + "classification": "beta", + "description": "Extracts a T2I-Adapters value of a label from metadata", + "node_pack": "invokeai", + "properties": { + "metadata": { + "anyOf": [ { - "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" + "$ref": "#/components/schemas/MetadataField" }, { - "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "t2i_adapter_list": { + "anyOf": [ { - "$ref": "#/components/schemas/VAE_Checkpoint_SD1_Config" + "$ref": "#/components/schemas/T2IAdapterField" }, { - "$ref": "#/components/schemas/VAE_Checkpoint_SD2_Config" + "items": { + "$ref": "#/components/schemas/T2IAdapterField" + }, + "type": "array" }, { - "$ref": "#/components/schemas/VAE_Checkpoint_SDXL_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "IP-Adapter to apply", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "T2I-Adapter" + }, + "type": { + "const": "metadata_to_t2i_adapters", + "default": "metadata_to_t2i_adapters", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["metadata"], + "title": "Metadata To T2I-Adapters", + "type": "object", + "version": "1.2.0", + "output": { + "$ref": "#/components/schemas/MDT2IAdapterListOutput" + } + }, + "MetadataToVAEInvocation": { + "category": "metadata", + "class": "invocation", + "classification": "beta", + "description": "Extracts a VAE value of a label from metadata", + "node_pack": "invokeai", + "properties": { + "metadata": { + "anyOf": [ { - "$ref": "#/components/schemas/VAE_Checkpoint_FLUX_Config" + "$ref": "#/components/schemas/MetadataField" }, { - "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "label": { + "default": "vae", + "description": "Label for this metadata item", + "enum": ["* CUSTOM LABEL *", "vae"], + "field_kind": "input", + "input": "direct", + "orig_default": "vae", + "orig_required": false, + "title": "Label", + "type": "string" + }, + "custom_label": { + "anyOf": [ { - "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" + "type": "string" }, { - "$ref": "#/components/schemas/VAE_Diffusers_SDXL_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "Label for this metadata item", + "field_kind": "input", + "input": "direct", + "orig_default": null, + "orig_required": false, + "title": "Custom Label" + }, + "default_value": { + "anyOf": [ { - "$ref": "#/components/schemas/VAE_Diffusers_Flux2_Config" + "$ref": "#/components/schemas/VAEField" }, { - "$ref": "#/components/schemas/ControlNet_Checkpoint_SD1_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "The default VAE to use if not found in the metadata", + "field_kind": "input", + "input": "any", + "orig_required": true + }, + "type": { + "const": "metadata_to_vae", + "default": "metadata_to_vae", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["metadata"], + "title": "Metadata To VAE", + "type": "object", + "version": "1.2.1", + "output": { + "$ref": "#/components/schemas/VAEOutput" + } + }, + "ModelFormat": { + "type": "string", + "enum": [ + "omi", + "diffusers", + "checkpoint", + "lycoris", + "onnx", + "olive", + "embedding_file", + "embedding_folder", + "invokeai", + "t5_encoder", + "qwen3_encoder", + "bnb_quantized_int8b", + "bnb_quantized_nf4b", + "gguf_quantized", + "unknown" + ], + "title": "ModelFormat", + "description": "Storage format of model." + }, + "ModelIdentifierField": { + "properties": { + "key": { + "description": "The model's unique key", + "title": "Key", + "type": "string" + }, + "hash": { + "description": "The model's BLAKE3 hash", + "title": "Hash", + "type": "string" + }, + "name": { + "description": "The model's name", + "title": "Name", + "type": "string" + }, + "base": { + "$ref": "#/components/schemas/BaseModelType", + "description": "The model's base model type" + }, + "type": { + "$ref": "#/components/schemas/ModelType", + "description": "The model's type" + }, + "submodel_type": { + "anyOf": [ { - "$ref": "#/components/schemas/ControlNet_Checkpoint_SD2_Config" + "$ref": "#/components/schemas/SubModelType" }, { - "$ref": "#/components/schemas/ControlNet_Checkpoint_SDXL_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "The submodel to load, if this is a main model" + } + }, + "required": ["key", "hash", "name", "base", "type"], + "title": "ModelIdentifierField", + "type": "object" + }, + "ModelIdentifierInvocation": { + "category": "model", + "class": "invocation", + "classification": "stable", + "description": "Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as\ninput for any model, even if the model types don't match. If you connect this to a mismatched input, you'll get an\nerror.", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "model": { + "anyOf": [ { - "$ref": "#/components/schemas/ControlNet_Checkpoint_FLUX_Config" + "$ref": "#/components/schemas/ModelIdentifierField" }, { - "$ref": "#/components/schemas/ControlNet_Checkpoint_ZImage_Config" - }, + "type": "null" + } + ], + "default": null, + "description": "The model to select", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "Model" + }, + "type": { + "const": "model_identifier", + "default": "model_identifier", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["model"], + "title": "Any Model", + "type": "object", + "version": "1.0.1", + "output": { + "$ref": "#/components/schemas/ModelIdentifierOutput" + } + }, + "ModelIdentifierOutput": { + "class": "output", + "description": "Model identifier output", + "properties": { + "model": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Model identifier", + "field_kind": "output", + "title": "Model", + "ui_hidden": false + }, + "type": { + "const": "model_identifier_output", + "default": "model_identifier_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "model", "type", "type"], + "title": "ModelIdentifierOutput", + "type": "object" + }, + "ModelInstallCancelledEvent": { + "description": "Event model for model_install_cancelled", + "properties": { + "timestamp": { + "description": "The timestamp of the event", + "title": "Timestamp", + "type": "integer" + }, + "id": { + "description": "The ID of the install job", + "title": "Id", + "type": "integer" + }, + "source": { + "description": "Source of the model; local path, repo_id or url", + "discriminator": { + "mapping": { + "hf": "#/components/schemas/HFModelSource", + "local": "#/components/schemas/LocalModelSource", + "url": "#/components/schemas/URLModelSource" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/LocalModelSource" + }, + { + "$ref": "#/components/schemas/HFModelSource" + }, + { + "$ref": "#/components/schemas/URLModelSource" + } + ], + "title": "Source" + } + }, + "required": ["timestamp", "id", "source"], + "title": "ModelInstallCancelledEvent", + "type": "object" + }, + "ModelInstallCompleteEvent": { + "description": "Event model for model_install_complete", + "properties": { + "timestamp": { + "description": "The timestamp of the event", + "title": "Timestamp", + "type": "integer" + }, + "id": { + "description": "The ID of the install job", + "title": "Id", + "type": "integer" + }, + "source": { + "description": "Source of the model; local path, repo_id or url", + "discriminator": { + "mapping": { + "hf": "#/components/schemas/HFModelSource", + "local": "#/components/schemas/LocalModelSource", + "url": "#/components/schemas/URLModelSource" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/LocalModelSource" + }, + { + "$ref": "#/components/schemas/HFModelSource" + }, + { + "$ref": "#/components/schemas/URLModelSource" + } + ], + "title": "Source" + }, + "key": { + "description": "Model config record key", + "title": "Key", + "type": "string" + }, + "total_bytes": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Size of the model (may be None for installation of a local path)", + "title": "Total Bytes" + }, + "config": { + "description": "The installed model's config", + "oneOf": [ + { + "$ref": "#/components/schemas/Main_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SD2_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SDXLRefiner_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SD3_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_Flux2_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SDXLRefiner_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Flux2_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, + { + "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_Flux2_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_Flux2_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_ZImage_Config" + }, { "$ref": "#/components/schemas/ControlNet_Diffusers_SD1_Config" }, @@ -46299,12 +51342,21 @@ { "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" }, { "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, { "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" }, @@ -46320,6 +51372,9 @@ { "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" }, @@ -46717,6 +51772,9 @@ { "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" }, + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" }, @@ -46741,6 +51799,9 @@ { "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" }, @@ -46750,6 +51811,9 @@ { "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" }, @@ -46768,6 +51832,9 @@ { "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" }, @@ -46813,12 +51880,21 @@ { "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" }, { "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, { "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" }, @@ -46834,6 +51910,9 @@ { "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" }, @@ -47124,6 +52203,9 @@ { "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" }, + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" }, @@ -47148,6 +52230,9 @@ { "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" }, @@ -47157,6 +52242,9 @@ { "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" }, @@ -47175,6 +52263,9 @@ { "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" }, @@ -47220,12 +52311,21 @@ { "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" }, { "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, { "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" }, @@ -47241,6 +52341,9 @@ { "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" }, @@ -47389,6 +52492,9 @@ { "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" }, + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" }, @@ -47413,6 +52519,9 @@ { "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" }, @@ -47422,6 +52531,9 @@ { "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, { "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" }, @@ -47440,6 +52552,9 @@ { "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, { "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" }, @@ -47485,12 +52600,21 @@ { "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" }, { "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, { "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" }, @@ -47506,6 +52630,9 @@ { "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, { "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" }, @@ -47658,612 +52785,1674 @@ }, "ModelRecordChanges": { "properties": { - "source": { + "source": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Source", + "description": "original source of the model" + }, + "source_type": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelSourceType" + }, + { + "type": "null" + } + ], + "description": "type of model source" + }, + "source_api_response": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Source Api Response", + "description": "metadata from remote source" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name", + "description": "Name of the model." + }, + "path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Path", + "description": "Path to the model." + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "Model description" + }, + "base": { + "anyOf": [ + { + "$ref": "#/components/schemas/BaseModelType" + }, + { + "type": "null" + } + ], + "description": "The base model." + }, + "type": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelType" + }, + { + "type": "null" + } + ], + "description": "Type of model" + }, + "key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Key", + "description": "Database ID for this model" + }, + "hash": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Hash", + "description": "hash of model file" + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "Size of model file" + }, + "format": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Format", + "description": "format of model file" + }, + "trigger_phrases": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + { + "type": "null" + } + ], + "title": "Trigger Phrases", + "description": "Set of trigger phrases for this model" + }, + "default_settings": { + "anyOf": [ + { + "$ref": "#/components/schemas/MainModelDefaultSettings" + }, + { + "$ref": "#/components/schemas/LoraModelDefaultSettings" + }, + { + "$ref": "#/components/schemas/ControlAdapterDefaultSettings" + }, + { + "type": "null" + } + ], + "title": "Default Settings", + "description": "Default settings for this model" + }, + "cpu_only": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Cpu Only", + "description": "Whether this model should run on CPU only" + }, + "variant": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelVariantType" + }, + { + "$ref": "#/components/schemas/ClipVariantType" + }, + { + "$ref": "#/components/schemas/FluxVariantType" + }, + { + "$ref": "#/components/schemas/Flux2VariantType" + }, + { + "$ref": "#/components/schemas/ZImageVariantType" + }, + { + "$ref": "#/components/schemas/QwenImageVariantType" + }, + { + "$ref": "#/components/schemas/Qwen3VariantType" + }, + { + "type": "null" + } + ], + "title": "Variant", + "description": "The variant of the model." + }, + "prediction_type": { + "anyOf": [ + { + "$ref": "#/components/schemas/SchedulerPredictionType" + }, + { + "type": "null" + } + ], + "description": "The prediction type of the model." + }, + "upcast_attention": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Upcast Attention", + "description": "Whether to upcast attention." + }, + "config_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Config Path", + "description": "Path to config file for model" + } + }, + "type": "object", + "title": "ModelRecordChanges", + "description": "A set of changes to apply to a model." + }, + "ModelRelationshipBatchRequest": { + "properties": { + "model_keys": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Model Keys", + "description": "List of model keys to fetch related models for", + "examples": [ + ["aa3b247f-90c9-4416-bfcd-aeaa57a5339e", "ac32b914-10ab-496e-a24a-3068724b9c35"], + [ + "b1c2d3e4-f5a6-7890-abcd-ef1234567890", + "12345678-90ab-cdef-1234-567890abcdef", + "fedcba98-7654-3210-fedc-ba9876543210" + ], + ["3bb7c0eb-b6c8-469c-ad8c-4d69c06075e4"] + ] + } + }, + "type": "object", + "required": ["model_keys"], + "title": "ModelRelationshipBatchRequest" + }, + "ModelRelationshipCreateRequest": { + "properties": { + "model_key_1": { + "type": "string", + "title": "Model Key 1", + "description": "The key of the first model in the relationship", + "examples": [ + "aa3b247f-90c9-4416-bfcd-aeaa57a5339e", + "ac32b914-10ab-496e-a24a-3068724b9c35", + "d944abfd-c7c3-42e2-a4ff-da640b29b8b4", + "b1c2d3e4-f5a6-7890-abcd-ef1234567890", + "12345678-90ab-cdef-1234-567890abcdef", + "fedcba98-7654-3210-fedc-ba9876543210" + ] + }, + "model_key_2": { + "type": "string", + "title": "Model Key 2", + "description": "The key of the second model in the relationship", + "examples": [ + "3bb7c0eb-b6c8-469c-ad8c-4d69c06075e4", + "f0c3da4e-d9ff-42b5-a45c-23be75c887c9", + "38170dd8-f1e5-431e-866c-2c81f1277fcc", + "c57fea2d-7646-424c-b9ad-c0ba60fc68be", + "10f7807b-ab54-46a9-ab03-600e88c630a1", + "f6c1d267-cf87-4ee0-bee0-37e791eacab7" + ] + } + }, + "type": "object", + "required": ["model_key_1", "model_key_2"], + "title": "ModelRelationshipCreateRequest" + }, + "ModelRepoVariant": { + "type": "string", + "enum": ["", "fp16", "fp32", "onnx", "openvino", "flax"], + "title": "ModelRepoVariant", + "description": "Various hugging face variants on the diffusers format." + }, + "ModelSourceType": { + "type": "string", + "enum": ["path", "url", "hf_repo_id"], + "title": "ModelSourceType", + "description": "Model source type." + }, + "ModelType": { + "type": "string", + "enum": [ + "onnx", + "main", + "vae", + "lora", + "control_lora", + "controlnet", + "embedding", + "ip_adapter", + "clip_vision", + "clip_embed", + "t2i_adapter", + "t5_encoder", + "qwen3_encoder", + "spandrel_image_to_image", + "siglip", + "flux_redux", + "llava_onevision", + "unknown" + ], + "title": "ModelType", + "description": "Model type." + }, + "ModelVariantType": { + "type": "string", + "enum": ["normal", "inpaint", "depth"], + "title": "ModelVariantType", + "description": "Variant type." + }, + "ModelsList": { + "properties": { + "models": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/Main_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SD2_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SDXLRefiner_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_SD3_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_Flux2_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_SDXLRefiner_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Flux2_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" + }, + { + "$ref": "#/components/schemas/Main_Checkpoint_Anima_Config" + }, + { + "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_Flux2_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" + }, + { + "$ref": "#/components/schemas/VAE_Checkpoint_Anima_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/VAE_Diffusers_Flux2_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Checkpoint_ZImage_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_SD2_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/ControlNet_Diffusers_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_SD1_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_SD2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Flux2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_QwenImage_Config" + }, + { + "$ref": "#/components/schemas/LoRA_LyCORIS_Anima_Config" + }, + { + "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" + }, + { + "$ref": "#/components/schemas/LoRA_OMI_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_SD2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_Flux2_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" + }, + { + "$ref": "#/components/schemas/LoRA_Diffusers_ZImage_Config" + }, + { + "$ref": "#/components/schemas/ControlLoRA_LyCORIS_FLUX_Config" + }, + { + "$ref": "#/components/schemas/T5Encoder_T5Encoder_Config" + }, + { + "$ref": "#/components/schemas/T5Encoder_BnBLLMint8_Config" + }, + { + "$ref": "#/components/schemas/Qwen3Encoder_Qwen3Encoder_Config" + }, + { + "$ref": "#/components/schemas/Qwen3Encoder_Checkpoint_Config" + }, + { + "$ref": "#/components/schemas/Qwen3Encoder_GGUF_Config" + }, + { + "$ref": "#/components/schemas/TI_File_SD1_Config" + }, + { + "$ref": "#/components/schemas/TI_File_SD2_Config" + }, + { + "$ref": "#/components/schemas/TI_File_SDXL_Config" + }, + { + "$ref": "#/components/schemas/TI_Folder_SD1_Config" + }, + { + "$ref": "#/components/schemas/TI_Folder_SD2_Config" + }, + { + "$ref": "#/components/schemas/TI_Folder_SDXL_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_InvokeAI_SD1_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_InvokeAI_SD2_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_InvokeAI_SDXL_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_SD1_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_SD2_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_SDXL_Config" + }, + { + "$ref": "#/components/schemas/IPAdapter_Checkpoint_FLUX_Config" + }, + { + "$ref": "#/components/schemas/T2IAdapter_Diffusers_SD1_Config" + }, + { + "$ref": "#/components/schemas/T2IAdapter_Diffusers_SDXL_Config" + }, + { + "$ref": "#/components/schemas/Spandrel_Checkpoint_Config" + }, + { + "$ref": "#/components/schemas/CLIPEmbed_Diffusers_G_Config" + }, + { + "$ref": "#/components/schemas/CLIPEmbed_Diffusers_L_Config" + }, + { + "$ref": "#/components/schemas/CLIPVision_Diffusers_Config" + }, + { + "$ref": "#/components/schemas/SigLIP_Diffusers_Config" + }, + { + "$ref": "#/components/schemas/FLUXRedux_Checkpoint_Config" + }, + { + "$ref": "#/components/schemas/LlavaOnevision_Diffusers_Config" + }, + { + "$ref": "#/components/schemas/Unknown_Config" + } + ] + }, + "type": "array", + "title": "Models" + } + }, + "type": "object", + "required": ["models"], + "title": "ModelsList", + "description": "Return list of configs." + }, + "MultiplyInvocation": { + "category": "math", + "class": "invocation", + "classification": "stable", + "description": "Multiplies two numbers", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "a": { + "default": 0, + "description": "The first number", + "field_kind": "input", + "input": "any", + "orig_default": 0, + "orig_required": false, + "title": "A", + "type": "integer" + }, + "b": { + "default": 0, + "description": "The second number", + "field_kind": "input", + "input": "any", + "orig_default": 0, + "orig_required": false, + "title": "B", + "type": "integer" + }, + "type": { + "const": "mul", + "default": "mul", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["math", "multiply"], + "title": "Multiply Integers", + "type": "object", + "version": "1.0.1", + "output": { + "$ref": "#/components/schemas/IntegerOutput" + } + }, + "NodeFieldValue": { + "properties": { + "node_path": { + "type": "string", + "title": "Node Path", + "description": "The node into which this batch data item will be substituted." + }, + "field_name": { + "type": "string", + "title": "Field Name", + "description": "The field into which this batch data item will be substituted." + }, + "value": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "integer" + }, + { + "$ref": "#/components/schemas/ImageField" + } + ], + "title": "Value", + "description": "The value to substitute into the node/field." + } + }, + "type": "object", + "required": ["node_path", "field_name", "value"], + "title": "NodeFieldValue" + }, + "NoiseInvocation": { + "category": "latents", + "class": "invocation", + "classification": "stable", + "description": "Generates latent noise.", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "seed": { + "default": 0, + "description": "Seed for random number generation", + "field_kind": "input", + "input": "any", + "maximum": 4294967295, + "minimum": 0, + "orig_default": 0, + "orig_required": false, + "title": "Seed", + "type": "integer" + }, + "width": { + "default": 512, + "description": "Width of output (px)", + "exclusiveMinimum": 0, + "field_kind": "input", + "input": "any", + "multipleOf": 8, + "orig_default": 512, + "orig_required": false, + "title": "Width", + "type": "integer" + }, + "height": { + "default": 512, + "description": "Height of output (px)", + "exclusiveMinimum": 0, + "field_kind": "input", + "input": "any", + "multipleOf": 8, + "orig_default": 512, + "orig_required": false, + "title": "Height", + "type": "integer" + }, + "use_cpu": { + "default": true, + "description": "Use CPU for noise generation (for reproducible results across platforms)", + "field_kind": "input", + "input": "any", + "orig_default": true, + "orig_required": false, + "title": "Use Cpu", + "type": "boolean" + }, + "type": { + "const": "noise", + "default": "noise", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["latents", "noise"], + "title": "Create Latent Noise", + "type": "object", + "version": "1.0.3", + "output": { + "$ref": "#/components/schemas/NoiseOutput" + } + }, + "NoiseOutput": { + "class": "output", + "description": "Invocation noise output", + "properties": { + "noise": { + "$ref": "#/components/schemas/LatentsField", + "description": "Noise tensor", + "field_kind": "output", + "ui_hidden": false + }, + "width": { + "description": "Width of output (px)", + "field_kind": "output", + "title": "Width", + "type": "integer", + "ui_hidden": false + }, + "height": { + "description": "Height of output (px)", + "field_kind": "output", + "title": "Height", + "type": "integer", + "ui_hidden": false + }, + "type": { + "const": "noise_output", + "default": "noise_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "noise", "width", "height", "type", "type"], + "title": "NoiseOutput", + "type": "object" + }, + "NormalMapInvocation": { + "category": "controlnet", + "class": "invocation", + "classification": "stable", + "description": "Generates a normal map.", + "node_pack": "invokeai", + "properties": { + "board": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } ], - "title": "Source", - "description": "original source of the model" + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "source_type": { + "metadata": { "anyOf": [ { - "$ref": "#/components/schemas/ModelSourceType" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "description": "type of model source" + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "source_api_response": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "image": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], - "title": "Source Api Response", - "description": "metadata from remote source" + "default": null, + "description": "The image to process", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "name": { + "type": { + "const": "normal_map", + "default": "normal_map", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["controlnet", "normal"], + "title": "Normal Map", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } + }, + "OffsetPaginatedResults_BoardDTO_": { + "properties": { + "limit": { + "type": "integer", + "title": "Limit", + "description": "Limit of items to get" + }, + "offset": { + "type": "integer", + "title": "Offset", + "description": "Offset from which to retrieve items" + }, + "total": { + "type": "integer", + "title": "Total", + "description": "Total number of items in result" + }, + "items": { + "items": { + "$ref": "#/components/schemas/BoardDTO" + }, + "type": "array", + "title": "Items", + "description": "Items" + } + }, + "type": "object", + "required": ["limit", "offset", "total", "items"], + "title": "OffsetPaginatedResults[BoardDTO]" + }, + "OffsetPaginatedResults_ImageDTO_": { + "properties": { + "limit": { + "type": "integer", + "title": "Limit", + "description": "Limit of items to get" + }, + "offset": { + "type": "integer", + "title": "Offset", + "description": "Offset from which to retrieve items" + }, + "total": { + "type": "integer", + "title": "Total", + "description": "Total number of items in result" + }, + "items": { + "items": { + "$ref": "#/components/schemas/ImageDTO" + }, + "type": "array", + "title": "Items", + "description": "Items" + } + }, + "type": "object", + "required": ["limit", "offset", "total", "items"], + "title": "OffsetPaginatedResults[ImageDTO]" + }, + "OrphanedModelInfo": { + "properties": { + "path": { + "type": "string", + "title": "Path", + "description": "Relative path to the orphaned directory from models root" + }, + "absolute_path": { + "type": "string", + "title": "Absolute Path", + "description": "Absolute path to the orphaned directory" + }, + "files": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Files", + "description": "List of model files in this directory" + }, + "size_bytes": { + "type": "integer", + "title": "Size Bytes", + "description": "Total size of all files in bytes" + } + }, + "type": "object", + "required": ["path", "absolute_path", "files", "size_bytes"], + "title": "OrphanedModelInfo", + "description": "Information about an orphaned model directory." + }, + "OutputFieldJSONSchemaExtra": { + "description": "Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor\nduring schema parsing and UI rendering.", + "properties": { + "field_kind": { + "$ref": "#/components/schemas/FieldKind" + }, + "ui_hidden": { + "default": false, + "title": "Ui Hidden", + "type": "boolean" + }, + "ui_order": { "anyOf": [ { - "type": "string" + "type": "integer" }, { "type": "null" } ], - "title": "Name", - "description": "Name of the model." + "default": null, + "title": "Ui Order" }, - "path": { + "ui_type": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/UIType" }, { "type": "null" } ], - "title": "Path", - "description": "Path to the model." - }, - "description": { + "default": null + } + }, + "required": ["field_kind", "ui_hidden", "ui_order", "ui_type"], + "title": "OutputFieldJSONSchemaExtra", + "type": "object" + }, + "PBRMapsInvocation": { + "category": "image", + "class": "invocation", + "classification": "stable", + "description": "Generate Normal, Displacement and Roughness Map from a given image", + "node_pack": "invokeai", + "properties": { + "board": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } ], - "title": "Description", - "description": "Model description" + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "base": { + "metadata": { "anyOf": [ { - "$ref": "#/components/schemas/BaseModelType" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "description": "The base model." + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "type": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "image": { "anyOf": [ { - "$ref": "#/components/schemas/ModelType" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], - "description": "Type of model" + "default": null, + "description": "Input image", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "key": { + "tile_size": { + "default": 512, + "description": "Tile size", + "field_kind": "input", + "input": "any", + "orig_default": 512, + "orig_required": false, + "title": "Tile Size", + "type": "integer" + }, + "border_mode": { + "default": "none", + "description": "Border mode to apply to eliminate any artifacts or seams", + "enum": ["none", "seamless", "mirror", "replicate"], + "field_kind": "input", + "input": "any", + "orig_default": "none", + "orig_required": false, + "title": "Border Mode", + "type": "string" + }, + "type": { + "const": "pbr_maps", + "default": "pbr_maps", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["image", "material"], + "title": "PBR Maps", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/PBRMapsOutput" + } + }, + "PBRMapsOutput": { + "class": "output", + "properties": { + "normal_map": { + "$ref": "#/components/schemas/ImageField", + "default": null, + "description": "The generated normal map", + "field_kind": "output", + "ui_hidden": false + }, + "roughness_map": { + "$ref": "#/components/schemas/ImageField", + "default": null, + "description": "The generated roughness map", + "field_kind": "output", + "ui_hidden": false + }, + "displacement_map": { + "$ref": "#/components/schemas/ImageField", + "default": null, + "description": "The generated displacement map", + "field_kind": "output", + "ui_hidden": false + }, + "type": { + "const": "pbr_maps-output", + "default": "pbr_maps-output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "normal_map", "roughness_map", "displacement_map", "type", "type"], + "title": "PBRMapsOutput", + "type": "object" + }, + "PaginatedResults_WorkflowRecordListItemWithThumbnailDTO_": { + "properties": { + "page": { + "type": "integer", + "title": "Page", + "description": "Current Page" + }, + "pages": { + "type": "integer", + "title": "Pages", + "description": "Total number of pages" + }, + "per_page": { + "type": "integer", + "title": "Per Page", + "description": "Number of items per page" + }, + "total": { + "type": "integer", + "title": "Total", + "description": "Total number of items in result" + }, + "items": { + "items": { + "$ref": "#/components/schemas/WorkflowRecordListItemWithThumbnailDTO" + }, + "type": "array", + "title": "Items", + "description": "Items" + } + }, + "type": "object", + "required": ["page", "pages", "per_page", "total", "items"], + "title": "PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]" + }, + "PairTileImageInvocation": { + "category": "tiles", + "class": "invocation", + "classification": "stable", + "description": "Pair an image with its tile properties.", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "image": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], - "title": "Key", - "description": "Database ID for this model" + "default": null, + "description": "The tile image.", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "hash": { + "tile": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/Tile" }, { "type": "null" } ], - "title": "Hash", - "description": "hash of model file" + "default": null, + "description": "The tile properties.", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "file_size": { + "type": { + "const": "pair_tile_image", + "default": "pair_tile_image", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["tiles"], + "title": "Pair Tile with Image", + "type": "object", + "version": "1.0.1", + "output": { + "$ref": "#/components/schemas/PairTileImageOutput" + } + }, + "PairTileImageOutput": { + "class": "output", + "properties": { + "tile_with_image": { + "$ref": "#/components/schemas/TileWithImage", + "description": "A tile description with its corresponding image.", + "field_kind": "output", + "ui_hidden": false + }, + "type": { + "const": "pair_tile_image_output", + "default": "pair_tile_image_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "tile_with_image", "type", "type"], + "title": "PairTileImageOutput", + "type": "object" + }, + "PasteImageIntoBoundingBoxInvocation": { + "category": "image", + "class": "invocation", + "classification": "stable", + "description": "Paste the source image into the target image at the given bounding box.\n\nThe source image must be the same size as the bounding box, and the bounding box must fit within the target image.", + "node_pack": "invokeai", + "properties": { + "board": { "anyOf": [ { - "type": "integer" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } ], - "title": "File Size", - "description": "Size of model file" + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "format": { + "metadata": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "title": "Format", - "description": "format of model file" + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "trigger_phrases": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array", - "uniqueItems": true - }, - { - "type": "null" - } - ], - "title": "Trigger Phrases", - "description": "Set of trigger phrases for this model" + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "default_settings": { + "source_image": { "anyOf": [ { - "$ref": "#/components/schemas/MainModelDefaultSettings" - }, - { - "$ref": "#/components/schemas/LoraModelDefaultSettings" - }, - { - "$ref": "#/components/schemas/ControlAdapterDefaultSettings" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], - "title": "Default Settings", - "description": "Default settings for this model" + "default": null, + "description": "The image to paste", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "cpu_only": { + "target_image": { "anyOf": [ { - "type": "boolean" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], - "title": "Cpu Only", - "description": "Whether this model should run on CPU only" + "default": null, + "description": "The image to paste into", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "variant": { + "bounding_box": { "anyOf": [ { - "$ref": "#/components/schemas/ModelVariantType" - }, - { - "$ref": "#/components/schemas/ClipVariantType" - }, - { - "$ref": "#/components/schemas/FluxVariantType" - }, - { - "$ref": "#/components/schemas/Flux2VariantType" - }, - { - "$ref": "#/components/schemas/Qwen3VariantType" + "$ref": "#/components/schemas/BoundingBoxField" }, { "type": "null" } ], - "title": "Variant", - "description": "The variant of the model." + "default": null, + "description": "The bounding box to paste the image into", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "prediction_type": { + "type": { + "const": "paste_image_into_bounding_box", + "default": "paste_image_into_bounding_box", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["image", "crop"], + "title": "Paste Image into Bounding Box", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } + }, + "PiDiNetEdgeDetectionInvocation": { + "category": "controlnet", + "class": "invocation", + "classification": "stable", + "description": "Generates an edge map using PiDiNet.", + "node_pack": "invokeai", + "properties": { + "board": { "anyOf": [ { - "$ref": "#/components/schemas/SchedulerPredictionType" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } ], - "description": "The prediction type of the model." + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "upcast_attention": { + "metadata": { "anyOf": [ { - "type": "boolean" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], - "title": "Upcast Attention", - "description": "Whether to upcast attention." + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "config_path": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "image": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], - "title": "Config Path", - "description": "Path to config file for model" - } - }, - "type": "object", - "title": "ModelRecordChanges", - "description": "A set of changes to apply to a model." - }, - "ModelRelationshipBatchRequest": { - "properties": { - "model_keys": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Model Keys", - "description": "List of model keys to fetch related models for", - "examples": [ - ["aa3b247f-90c9-4416-bfcd-aeaa57a5339e", "ac32b914-10ab-496e-a24a-3068724b9c35"], - [ - "b1c2d3e4-f5a6-7890-abcd-ef1234567890", - "12345678-90ab-cdef-1234-567890abcdef", - "fedcba98-7654-3210-fedc-ba9876543210" - ], - ["3bb7c0eb-b6c8-469c-ad8c-4d69c06075e4"] - ] - } - }, - "type": "object", - "required": ["model_keys"], - "title": "ModelRelationshipBatchRequest" - }, - "ModelRelationshipCreateRequest": { - "properties": { - "model_key_1": { - "type": "string", - "title": "Model Key 1", - "description": "The key of the first model in the relationship", - "examples": [ - "aa3b247f-90c9-4416-bfcd-aeaa57a5339e", - "ac32b914-10ab-496e-a24a-3068724b9c35", - "d944abfd-c7c3-42e2-a4ff-da640b29b8b4", - "b1c2d3e4-f5a6-7890-abcd-ef1234567890", - "12345678-90ab-cdef-1234-567890abcdef", - "fedcba98-7654-3210-fedc-ba9876543210" - ] + "default": null, + "description": "The image to process", + "field_kind": "input", + "input": "any", + "orig_required": true }, - "model_key_2": { - "type": "string", - "title": "Model Key 2", - "description": "The key of the second model in the relationship", - "examples": [ - "3bb7c0eb-b6c8-469c-ad8c-4d69c06075e4", - "f0c3da4e-d9ff-42b5-a45c-23be75c887c9", - "38170dd8-f1e5-431e-866c-2c81f1277fcc", - "c57fea2d-7646-424c-b9ad-c0ba60fc68be", - "10f7807b-ab54-46a9-ab03-600e88c630a1", - "f6c1d267-cf87-4ee0-bee0-37e791eacab7" - ] - } - }, - "type": "object", - "required": ["model_key_1", "model_key_2"], - "title": "ModelRelationshipCreateRequest" - }, - "ModelRepoVariant": { - "type": "string", - "enum": ["", "fp16", "fp32", "onnx", "openvino", "flax"], - "title": "ModelRepoVariant", - "description": "Various hugging face variants on the diffusers format." - }, - "ModelSourceType": { - "type": "string", - "enum": ["path", "url", "hf_repo_id"], - "title": "ModelSourceType", - "description": "Model source type." - }, - "ModelType": { - "type": "string", - "enum": [ - "onnx", - "main", - "vae", - "lora", - "control_lora", - "controlnet", - "embedding", - "ip_adapter", - "clip_vision", - "clip_embed", - "t2i_adapter", - "t5_encoder", - "qwen3_encoder", - "spandrel_image_to_image", - "siglip", - "flux_redux", - "llava_onevision", - "unknown" - ], - "title": "ModelType", - "description": "Model type." - }, - "ModelVariantType": { - "type": "string", - "enum": ["normal", "inpaint", "depth"], - "title": "ModelVariantType", - "description": "Variant type." - }, - "ModelsList": { - "properties": { - "models": { - "items": { - "oneOf": [ - { - "$ref": "#/components/schemas/Main_Diffusers_SD1_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SD2_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SDXL_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SDXLRefiner_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_SD3_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_FLUX_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_Flux2_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_CogView4_Config" - }, - { - "$ref": "#/components/schemas/Main_Diffusers_ZImage_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_SD1_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_SD2_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_SDXL_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_SDXLRefiner_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_Flux2_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_FLUX_Config" - }, - { - "$ref": "#/components/schemas/Main_Checkpoint_ZImage_Config" - }, - { - "$ref": "#/components/schemas/Main_BnBNF4_FLUX_Config" - }, - { - "$ref": "#/components/schemas/Main_GGUF_Flux2_Config" - }, - { - "$ref": "#/components/schemas/Main_GGUF_FLUX_Config" - }, - { - "$ref": "#/components/schemas/Main_GGUF_ZImage_Config" - }, - { - "$ref": "#/components/schemas/VAE_Checkpoint_SD1_Config" - }, - { - "$ref": "#/components/schemas/VAE_Checkpoint_SD2_Config" - }, - { - "$ref": "#/components/schemas/VAE_Checkpoint_SDXL_Config" - }, - { - "$ref": "#/components/schemas/VAE_Checkpoint_FLUX_Config" - }, - { - "$ref": "#/components/schemas/VAE_Checkpoint_Flux2_Config" - }, - { - "$ref": "#/components/schemas/VAE_Diffusers_SD1_Config" - }, - { - "$ref": "#/components/schemas/VAE_Diffusers_SDXL_Config" - }, - { - "$ref": "#/components/schemas/VAE_Diffusers_Flux2_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Checkpoint_SD1_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Checkpoint_SD2_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Checkpoint_SDXL_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Checkpoint_FLUX_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Checkpoint_ZImage_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Diffusers_SD1_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Diffusers_SD2_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Diffusers_SDXL_Config" - }, - { - "$ref": "#/components/schemas/ControlNet_Diffusers_FLUX_Config" - }, - { - "$ref": "#/components/schemas/LoRA_LyCORIS_SD1_Config" - }, - { - "$ref": "#/components/schemas/LoRA_LyCORIS_SD2_Config" - }, - { - "$ref": "#/components/schemas/LoRA_LyCORIS_SDXL_Config" - }, - { - "$ref": "#/components/schemas/LoRA_LyCORIS_FLUX_Config" - }, - { - "$ref": "#/components/schemas/LoRA_LyCORIS_ZImage_Config" - }, - { - "$ref": "#/components/schemas/LoRA_OMI_SDXL_Config" - }, - { - "$ref": "#/components/schemas/LoRA_OMI_FLUX_Config" - }, - { - "$ref": "#/components/schemas/LoRA_Diffusers_SD1_Config" - }, - { - "$ref": "#/components/schemas/LoRA_Diffusers_SD2_Config" - }, - { - "$ref": "#/components/schemas/LoRA_Diffusers_SDXL_Config" - }, - { - "$ref": "#/components/schemas/LoRA_Diffusers_FLUX_Config" - }, - { - "$ref": "#/components/schemas/LoRA_Diffusers_ZImage_Config" - }, - { - "$ref": "#/components/schemas/ControlLoRA_LyCORIS_FLUX_Config" - }, - { - "$ref": "#/components/schemas/T5Encoder_T5Encoder_Config" - }, - { - "$ref": "#/components/schemas/T5Encoder_BnBLLMint8_Config" - }, - { - "$ref": "#/components/schemas/Qwen3Encoder_Qwen3Encoder_Config" - }, - { - "$ref": "#/components/schemas/Qwen3Encoder_Checkpoint_Config" - }, - { - "$ref": "#/components/schemas/Qwen3Encoder_GGUF_Config" - }, - { - "$ref": "#/components/schemas/TI_File_SD1_Config" - }, - { - "$ref": "#/components/schemas/TI_File_SD2_Config" - }, - { - "$ref": "#/components/schemas/TI_File_SDXL_Config" - }, - { - "$ref": "#/components/schemas/TI_Folder_SD1_Config" - }, - { - "$ref": "#/components/schemas/TI_Folder_SD2_Config" - }, - { - "$ref": "#/components/schemas/TI_Folder_SDXL_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_InvokeAI_SD1_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_InvokeAI_SD2_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_InvokeAI_SDXL_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_Checkpoint_SD1_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_Checkpoint_SD2_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_Checkpoint_SDXL_Config" - }, - { - "$ref": "#/components/schemas/IPAdapter_Checkpoint_FLUX_Config" - }, - { - "$ref": "#/components/schemas/T2IAdapter_Diffusers_SD1_Config" - }, - { - "$ref": "#/components/schemas/T2IAdapter_Diffusers_SDXL_Config" - }, - { - "$ref": "#/components/schemas/Spandrel_Checkpoint_Config" - }, - { - "$ref": "#/components/schemas/CLIPEmbed_Diffusers_G_Config" - }, - { - "$ref": "#/components/schemas/CLIPEmbed_Diffusers_L_Config" - }, - { - "$ref": "#/components/schemas/CLIPVision_Diffusers_Config" - }, - { - "$ref": "#/components/schemas/SigLIP_Diffusers_Config" - }, - { - "$ref": "#/components/schemas/FLUXRedux_Checkpoint_Config" - }, - { - "$ref": "#/components/schemas/LlavaOnevision_Diffusers_Config" - }, - { - "$ref": "#/components/schemas/Unknown_Config" - } - ] - }, - "type": "array", - "title": "Models" + "quantize_edges": { + "default": false, + "description": "Whether or not to use safe mode", + "field_kind": "input", + "input": "any", + "orig_default": false, + "orig_required": false, + "title": "Quantize Edges", + "type": "boolean" + }, + "scribble": { + "default": false, + "description": "Whether or not to use scribble mode", + "field_kind": "input", + "input": "any", + "orig_default": false, + "orig_required": false, + "title": "Scribble", + "type": "boolean" + }, + "type": { + "const": "pidi_edge_detection", + "default": "pidi_edge_detection", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["type", "id"], + "tags": ["controlnet", "edge"], + "title": "PiDiNet Edge Detection", "type": "object", - "required": ["models"], - "title": "ModelsList", - "description": "Return list of configs." + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } }, - "MultiplyInvocation": { - "category": "math", + "PresetData": { + "properties": { + "positive_prompt": { + "type": "string", + "title": "Positive Prompt", + "description": "Positive prompt" + }, + "negative_prompt": { + "type": "string", + "title": "Negative Prompt", + "description": "Negative prompt" + } + }, + "additionalProperties": false, + "type": "object", + "required": ["positive_prompt", "negative_prompt"], + "title": "PresetData" + }, + "PresetType": { + "type": "string", + "enum": ["user", "default"], + "title": "PresetType" + }, + "ProgressImage": { + "description": "The progress image sent intermittently during processing", + "properties": { + "width": { + "description": "The effective width of the image in pixels", + "minimum": 1, + "title": "Width", + "type": "integer" + }, + "height": { + "description": "The effective height of the image in pixels", + "minimum": 1, + "title": "Height", + "type": "integer" + }, + "dataURL": { + "description": "The image data as a b64 data URL", + "title": "Dataurl", + "type": "string" + } + }, + "required": ["width", "height", "dataURL"], + "title": "ProgressImage", + "type": "object" + }, + "PromptTemplateInvocation": { + "category": "prompt", "class": "invocation", "classification": "stable", - "description": "Multiplies two numbers", + "description": "Applies a Style Preset template to positive and negative prompts.\n\nSelect a Style Preset and provide positive/negative prompts. The node replaces\n{prompt} placeholders in the template with your input prompts.", "node_pack": "invokeai", "properties": { "id": { @@ -48290,83 +54479,95 @@ "title": "Use Cache", "type": "boolean" }, - "a": { - "default": 0, - "description": "The first number", + "style_preset": { + "anyOf": [ + { + "$ref": "#/components/schemas/StylePresetField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The Style Preset to use as a template", "field_kind": "input", "input": "any", - "orig_default": 0, + "orig_required": true + }, + "positive_prompt": { + "default": "", + "description": "The positive prompt to insert into the template's {prompt} placeholder", + "field_kind": "input", + "input": "any", + "orig_default": "", "orig_required": false, - "title": "A", - "type": "integer" + "title": "Positive Prompt", + "type": "string", + "ui_component": "textarea" }, - "b": { - "default": 0, - "description": "The second number", + "negative_prompt": { + "default": "", + "description": "The negative prompt to insert into the template's {prompt} placeholder", "field_kind": "input", "input": "any", - "orig_default": 0, + "orig_default": "", "orig_required": false, - "title": "B", - "type": "integer" + "title": "Negative Prompt", + "type": "string", + "ui_component": "textarea" }, "type": { - "const": "mul", - "default": "mul", + "const": "prompt_template", + "default": "prompt_template", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["math", "multiply"], - "title": "Multiply Integers", + "tags": ["prompt", "template", "style", "preset"], + "title": "Prompt Template", "type": "object", - "version": "1.0.1", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/IntegerOutput" + "$ref": "#/components/schemas/PromptTemplateOutput" } }, - "NodeFieldValue": { + "PromptTemplateOutput": { + "class": "output", + "description": "Output for the Prompt Template node", "properties": { - "node_path": { + "positive_prompt": { + "description": "The positive prompt with the template applied", + "field_kind": "output", + "title": "Positive Prompt", "type": "string", - "title": "Node Path", - "description": "The node into which this batch data item will be substituted." + "ui_hidden": false }, - "field_name": { + "negative_prompt": { + "description": "The negative prompt with the template applied", + "field_kind": "output", + "title": "Negative Prompt", "type": "string", - "title": "Field Name", - "description": "The field into which this batch data item will be substituted." + "ui_hidden": false }, - "value": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - }, - { - "type": "integer" - }, - { - "$ref": "#/components/schemas/ImageField" - } - ], - "title": "Value", - "description": "The value to substitute into the node/field." + "type": { + "const": "prompt_template_output", + "default": "prompt_template_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, - "type": "object", - "required": ["node_path", "field_name", "value"], - "title": "NodeFieldValue" + "required": ["output_meta", "positive_prompt", "negative_prompt", "type", "type"], + "title": "PromptTemplateOutput", + "type": "object" }, - "NoiseInvocation": { - "category": "latents", + "PromptsFromFileInvocation": { + "category": "prompt", "class": "invocation", "classification": "stable", - "description": "Generates latent noise.", + "description": "Loads prompts from a text file", "node_pack": "invokeai", "properties": { "id": { @@ -48393,621 +54594,807 @@ "title": "Use Cache", "type": "boolean" }, - "seed": { - "default": 0, - "description": "Seed for random number generation", + "file_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Path to prompt text file", "field_kind": "input", "input": "any", - "maximum": 4294967295, - "minimum": 0, - "orig_default": 0, + "orig_required": true, + "title": "File Path" + }, + "pre_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "String to prepend to each prompt", + "field_kind": "input", + "input": "any", + "orig_default": null, "orig_required": false, - "title": "Seed", - "type": "integer" + "title": "Pre Prompt", + "ui_component": "textarea" }, - "width": { - "default": 512, - "description": "Width of output (px)", - "exclusiveMinimum": 0, + "post_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "String to append to each prompt", "field_kind": "input", "input": "any", - "multipleOf": 8, - "orig_default": 512, + "orig_default": null, "orig_required": false, - "title": "Width", - "type": "integer" + "title": "Post Prompt", + "ui_component": "textarea" }, - "height": { - "default": 512, - "description": "Height of output (px)", - "exclusiveMinimum": 0, + "start_line": { + "default": 1, + "description": "Line in the file to start start from", "field_kind": "input", "input": "any", - "multipleOf": 8, - "orig_default": 512, + "minimum": 1, + "orig_default": 1, "orig_required": false, - "title": "Height", + "title": "Start Line", "type": "integer" }, - "use_cpu": { - "default": true, - "description": "Use CPU for noise generation (for reproducible results across platforms)", + "max_prompts": { + "default": 1, + "description": "Max lines to read from file (0=all)", "field_kind": "input", "input": "any", - "orig_default": true, + "minimum": 0, + "orig_default": 1, "orig_required": false, - "title": "Use Cpu", - "type": "boolean" + "title": "Max Prompts", + "type": "integer" }, "type": { - "const": "noise", - "default": "noise", + "const": "prompt_from_file", + "default": "prompt_from_file", "field_kind": "node_attribute", "title": "type", "type": "string" } }, - "required": ["type", "id"], - "tags": ["latents", "noise"], - "title": "Create Latent Noise", - "type": "object", - "version": "1.0.3", - "output": { - "$ref": "#/components/schemas/NoiseOutput" - } + "required": ["type", "id"], + "tags": ["prompt", "file"], + "title": "Prompts from File", + "type": "object", + "version": "1.0.2", + "output": { + "$ref": "#/components/schemas/StringCollectionOutput" + } + }, + "PruneResult": { + "properties": { + "deleted": { + "type": "integer", + "title": "Deleted", + "description": "Number of queue items deleted" + } + }, + "type": "object", + "required": ["deleted"], + "title": "PruneResult", + "description": "Result of pruning the session queue" + }, + "QueueClearedEvent": { + "description": "Event model for queue_cleared", + "properties": { + "timestamp": { + "description": "The timestamp of the event", + "title": "Timestamp", + "type": "integer" + }, + "queue_id": { + "description": "The ID of the queue", + "title": "Queue Id", + "type": "string" + } + }, + "required": ["timestamp", "queue_id"], + "title": "QueueClearedEvent", + "type": "object" + }, + "QueueItemStatusChangedEvent": { + "description": "Event model for queue_item_status_changed", + "properties": { + "timestamp": { + "description": "The timestamp of the event", + "title": "Timestamp", + "type": "integer" + }, + "queue_id": { + "description": "The ID of the queue", + "title": "Queue Id", + "type": "string" + }, + "item_id": { + "description": "The ID of the queue item", + "title": "Item Id", + "type": "integer" + }, + "batch_id": { + "description": "The ID of the queue batch", + "title": "Batch Id", + "type": "string" + }, + "origin": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The origin of the queue item", + "title": "Origin" + }, + "destination": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The destination of the queue item", + "title": "Destination" + }, + "user_id": { + "default": "system", + "description": "The ID of the user who created the queue item", + "title": "User Id", + "type": "string" + }, + "status": { + "description": "The new status of the queue item", + "enum": ["pending", "in_progress", "completed", "failed", "canceled"], + "title": "Status", + "type": "string" + }, + "error_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The error type, if any", + "title": "Error Type" + }, + "error_message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The error message, if any", + "title": "Error Message" + }, + "error_traceback": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The error traceback, if any", + "title": "Error Traceback" + }, + "created_at": { + "description": "The timestamp when the queue item was created", + "title": "Created At", + "type": "string" + }, + "updated_at": { + "description": "The timestamp when the queue item was last updated", + "title": "Updated At", + "type": "string" + }, + "started_at": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The timestamp when the queue item was started", + "title": "Started At" + }, + "completed_at": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The timestamp when the queue item was completed", + "title": "Completed At" + }, + "batch_status": { + "$ref": "#/components/schemas/BatchStatus", + "description": "The status of the batch" + }, + "queue_status": { + "$ref": "#/components/schemas/SessionQueueStatus", + "description": "The status of the queue" + }, + "session_id": { + "description": "The ID of the session (aka graph execution state)", + "title": "Session Id", + "type": "string" + } + }, + "required": [ + "timestamp", + "queue_id", + "item_id", + "batch_id", + "origin", + "destination", + "user_id", + "status", + "error_type", + "error_message", + "error_traceback", + "created_at", + "updated_at", + "started_at", + "completed_at", + "batch_status", + "queue_status", + "session_id" + ], + "title": "QueueItemStatusChangedEvent", + "type": "object" + }, + "QueueItemsRetriedEvent": { + "description": "Event model for queue_items_retried", + "properties": { + "timestamp": { + "description": "The timestamp of the event", + "title": "Timestamp", + "type": "integer" + }, + "queue_id": { + "description": "The ID of the queue", + "title": "Queue Id", + "type": "string" + }, + "retried_item_ids": { + "description": "The IDs of the queue items that were retried", + "items": { + "type": "integer" + }, + "title": "Retried Item Ids", + "type": "array" + } + }, + "required": ["timestamp", "queue_id", "retried_item_ids"], + "title": "QueueItemsRetriedEvent", + "type": "object" }, - "NoiseOutput": { - "class": "output", - "description": "Invocation noise output", + "Qwen3EncoderField": { + "description": "Field for Qwen3 text encoder used by Z-Image models.", "properties": { - "noise": { - "$ref": "#/components/schemas/LatentsField", - "description": "Noise tensor", - "field_kind": "output", - "ui_hidden": false - }, - "width": { - "description": "Width of output (px)", - "field_kind": "output", - "title": "Width", - "type": "integer", - "ui_hidden": false + "tokenizer": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Info to load tokenizer submodel" }, - "height": { - "description": "Height of output (px)", - "field_kind": "output", - "title": "Height", - "type": "integer", - "ui_hidden": false + "text_encoder": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Info to load text_encoder submodel" }, - "type": { - "const": "noise_output", - "default": "noise_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "loras": { + "description": "LoRAs to apply on model loading", + "items": { + "$ref": "#/components/schemas/LoRAField" + }, + "title": "Loras", + "type": "array" } }, - "required": ["output_meta", "noise", "width", "height", "type", "type"], - "title": "NoiseOutput", + "required": ["tokenizer", "text_encoder"], + "title": "Qwen3EncoderField", "type": "object" }, - "NormalMapInvocation": { - "category": "controlnet", - "class": "invocation", - "classification": "stable", - "description": "Generates a normal map.", - "node_pack": "invokeai", + "Qwen3Encoder_Checkpoint_Config": { "properties": { - "board": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "title": "Description", + "description": "Model description" }, - "metadata": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "cover_image": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Cover Image", + "description": "Url for image to preview model" }, - "image": { + "config_path": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The image to process", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Config Path", + "description": "Path to the config for this model, if any." + }, + "base": { + "type": "string", + "const": "any", + "title": "Base", + "default": "any" }, "type": { - "const": "normal_map", - "default": "normal_map", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["controlnet", "normal"], - "title": "Normal Map", - "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } - }, - "OffsetPaginatedResults_BoardDTO_": { - "properties": { - "limit": { - "type": "integer", - "title": "Limit", - "description": "Limit of items to get" + "type": "string", + "const": "qwen3_encoder", + "title": "Type", + "default": "qwen3_encoder" }, - "offset": { - "type": "integer", - "title": "Offset", - "description": "Offset from which to retrieve items" + "format": { + "type": "string", + "const": "checkpoint", + "title": "Format", + "default": "checkpoint" }, - "total": { - "type": "integer", - "title": "Total", - "description": "Total number of items in result" + "cpu_only": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Cpu Only", + "description": "Whether this model should run on CPU only" }, - "items": { - "items": { - "$ref": "#/components/schemas/BoardDTO" - }, - "type": "array", - "title": "Items", - "description": "Items" + "variant": { + "$ref": "#/components/schemas/Qwen3VariantType", + "description": "Qwen3 model size variant (4B or 8B)" } }, "type": "object", - "required": ["limit", "offset", "total", "items"], - "title": "OffsetPaginatedResults[BoardDTO]" + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "config_path", + "base", + "type", + "format", + "cpu_only", + "variant" + ], + "title": "Qwen3Encoder_Checkpoint_Config", + "description": "Configuration for single-file Qwen3 Encoder models (safetensors)." }, - "OffsetPaginatedResults_ImageDTO_": { + "Qwen3Encoder_GGUF_Config": { "properties": { - "limit": { - "type": "integer", - "title": "Limit", - "description": "Limit of items to get" - }, - "offset": { - "type": "integer", - "title": "Offset", - "description": "Offset from which to retrieve items" + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." }, - "total": { - "type": "integer", - "title": "Total", - "description": "Total number of items in result" + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." }, - "items": { - "items": { - "$ref": "#/components/schemas/ImageDTO" - }, - "type": "array", - "title": "Items", - "description": "Items" - } - }, - "type": "object", - "required": ["limit", "offset", "total", "items"], - "title": "OffsetPaginatedResults[ImageDTO]" - }, - "OrphanedModelInfo": { - "properties": { "path": { "type": "string", "title": "Path", - "description": "Relative path to the orphaned directory from models root" - }, - "absolute_path": { - "type": "string", - "title": "Absolute Path", - "description": "Absolute path to the orphaned directory" - }, - "files": { - "items": { - "type": "string" - }, - "type": "array", - "title": "Files", - "description": "List of model files in this directory" + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." }, - "size_bytes": { + "file_size": { "type": "integer", - "title": "Size Bytes", - "description": "Total size of all files in bytes" - } - }, - "type": "object", - "required": ["path", "absolute_path", "files", "size_bytes"], - "title": "OrphanedModelInfo", - "description": "Information about an orphaned model directory." - }, - "OutputFieldJSONSchemaExtra": { - "description": "Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor\nduring schema parsing and UI rendering.", - "properties": { - "field_kind": { - "$ref": "#/components/schemas/FieldKind" + "title": "File Size", + "description": "The size of the model in bytes." }, - "ui_hidden": { - "default": false, - "title": "Ui Hidden", - "type": "boolean" + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." }, - "ui_order": { + "description": { "anyOf": [ { - "type": "integer" + "type": "string" }, { "type": "null" } ], - "default": null, - "title": "Ui Order" + "title": "Description", + "description": "Model description" }, - "ui_type": { + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "$ref": "#/components/schemas/UIType" + "type": "string" }, { "type": "null" } ], - "default": null - } - }, - "required": ["field_kind", "ui_hidden", "ui_order", "ui_type"], - "title": "OutputFieldJSONSchemaExtra", - "type": "object" - }, - "PBRMapsInvocation": { - "category": "image", - "class": "invocation", - "classification": "stable", - "description": "Generate Normal, Displacement and Roughness Map from a given image", - "node_pack": "invokeai", - "properties": { - "board": { + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." + }, + "cover_image": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "title": "Cover Image", + "description": "Url for image to preview model" }, - "metadata": { + "config_path": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false + "title": "Config Path", + "description": "Path to the config for this model, if any." }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "base": { + "type": "string", + "const": "any", + "title": "Base", + "default": "any" }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "type": { + "type": "string", + "const": "qwen3_encoder", + "title": "Type", + "default": "qwen3_encoder" }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "format": { + "type": "string", + "const": "gguf_quantized", + "title": "Format", + "default": "gguf_quantized" }, - "image": { + "cpu_only": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "boolean" }, { "type": "null" } ], - "default": null, - "description": "Input image", - "field_kind": "input", - "input": "any", - "orig_required": true - }, - "tile_size": { - "default": 512, - "description": "Tile size", - "field_kind": "input", - "input": "any", - "orig_default": 512, - "orig_required": false, - "title": "Tile Size", - "type": "integer" - }, - "border_mode": { - "default": "none", - "description": "Border mode to apply to eliminate any artifacts or seams", - "enum": ["none", "seamless", "mirror", "replicate"], - "field_kind": "input", - "input": "any", - "orig_default": "none", - "orig_required": false, - "title": "Border Mode", - "type": "string" - }, - "type": { - "const": "pbr_maps", - "default": "pbr_maps", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["image", "material"], - "title": "PBR Maps", - "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/PBRMapsOutput" - } - }, - "PBRMapsOutput": { - "class": "output", - "properties": { - "normal_map": { - "$ref": "#/components/schemas/ImageField", - "default": null, - "description": "The generated normal map", - "field_kind": "output", - "ui_hidden": false - }, - "roughness_map": { - "$ref": "#/components/schemas/ImageField", - "default": null, - "description": "The generated roughness map", - "field_kind": "output", - "ui_hidden": false - }, - "displacement_map": { - "$ref": "#/components/schemas/ImageField", - "default": null, - "description": "The generated displacement map", - "field_kind": "output", - "ui_hidden": false - }, - "type": { - "const": "pbr_maps-output", - "default": "pbr_maps-output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "title": "Cpu Only", + "description": "Whether this model should run on CPU only" + }, + "variant": { + "$ref": "#/components/schemas/Qwen3VariantType", + "description": "Qwen3 model size variant (4B or 8B)" } }, - "required": ["output_meta", "normal_map", "roughness_map", "displacement_map", "type", "type"], - "title": "PBRMapsOutput", - "type": "object" + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "config_path", + "base", + "type", + "format", + "cpu_only", + "variant" + ], + "title": "Qwen3Encoder_GGUF_Config", + "description": "Configuration for GGUF-quantized Qwen3 Encoder models." }, - "PaginatedResults_WorkflowRecordListItemWithThumbnailDTO_": { + "Qwen3Encoder_Qwen3Encoder_Config": { "properties": { - "page": { - "type": "integer", - "title": "Page", - "description": "Current Page" + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." }, - "pages": { - "type": "integer", - "title": "Pages", - "description": "Total number of pages" + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." }, - "per_page": { - "type": "integer", - "title": "Per Page", - "description": "Number of items per page" + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." }, - "total": { + "file_size": { "type": "integer", - "title": "Total", - "description": "Total number of items in result" + "title": "File Size", + "description": "The size of the model in bytes." }, - "items": { - "items": { - "$ref": "#/components/schemas/WorkflowRecordListItemWithThumbnailDTO" - }, - "type": "array", - "title": "Items", - "description": "Items" - } - }, - "type": "object", - "required": ["page", "pages", "per_page", "total", "items"], - "title": "PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]" - }, - "PairTileImageInvocation": { - "category": "tiles", - "class": "invocation", - "classification": "stable", - "description": "Pair an image with its tile properties.", - "node_pack": "invokeai", - "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "Model description" }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." }, - "image": { + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The tile image.", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." }, - "tile": { + "cover_image": { "anyOf": [ { - "$ref": "#/components/schemas/Tile" + "type": "string" }, { "type": "null" } ], - "default": null, - "description": "The tile properties.", - "field_kind": "input", - "input": "any", - "orig_required": true + "title": "Cover Image", + "description": "Url for image to preview model" + }, + "base": { + "type": "string", + "const": "any", + "title": "Base", + "default": "any" }, "type": { - "const": "pair_tile_image", - "default": "pair_tile_image", - "field_kind": "node_attribute", - "title": "type", - "type": "string" + "type": "string", + "const": "qwen3_encoder", + "title": "Type", + "default": "qwen3_encoder" + }, + "format": { + "type": "string", + "const": "qwen3_encoder", + "title": "Format", + "default": "qwen3_encoder" + }, + "cpu_only": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Cpu Only", + "description": "Whether this model should run on CPU only" + }, + "variant": { + "$ref": "#/components/schemas/Qwen3VariantType", + "description": "Qwen3 model size variant (4B or 8B)" } }, - "required": ["type", "id"], - "tags": ["tiles"], - "title": "Pair Tile with Image", "type": "object", - "version": "1.0.1", - "output": { - "$ref": "#/components/schemas/PairTileImageOutput" - } + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "base", + "type", + "format", + "cpu_only", + "variant" + ], + "title": "Qwen3Encoder_Qwen3Encoder_Config", + "description": "Configuration for Qwen3 Encoder models in a diffusers-like format.\n\nThe model weights are expected to be in a folder called text_encoder inside the model directory,\ncompatible with Qwen2VLForConditionalGeneration or similar architectures used by Z-Image." }, - "PairTileImageOutput": { + "Qwen3VariantType": { + "type": "string", + "enum": ["qwen3_4b", "qwen3_8b", "qwen3_06b"], + "title": "Qwen3VariantType", + "description": "Qwen3 text encoder variants based on model size." + }, + "QwenImageConditioningField": { + "description": "A Qwen Image Edit conditioning tensor primitive value", + "properties": { + "conditioning_name": { + "description": "The name of conditioning tensor", + "title": "Conditioning Name", + "type": "string" + } + }, + "required": ["conditioning_name"], + "title": "QwenImageConditioningField", + "type": "object" + }, + "QwenImageConditioningOutput": { "class": "output", + "description": "Base class for nodes that output a Qwen Image Edit conditioning tensor.", "properties": { - "tile_with_image": { - "$ref": "#/components/schemas/TileWithImage", - "description": "A tile description with its corresponding image.", + "conditioning": { + "$ref": "#/components/schemas/QwenImageConditioningField", + "description": "Conditioning tensor", "field_kind": "output", "ui_hidden": false }, "type": { - "const": "pair_tile_image_output", - "default": "pair_tile_image_output", + "const": "qwen_image_conditioning_output", + "default": "qwen_image_conditioning_output", "field_kind": "node_attribute", "title": "type", "type": "string" } }, - "required": ["output_meta", "tile_with_image", "type", "type"], - "title": "PairTileImageOutput", + "required": ["output_meta", "conditioning", "type", "type"], + "title": "QwenImageConditioningOutput", "type": "object" }, - "PasteImageIntoBoundingBoxInvocation": { + "QwenImageDenoiseInvocation": { "category": "image", "class": "invocation", - "classification": "stable", - "description": "Paste the source image into the target image at the given bounding box.\n\nThe source image must be the same size as the bounding box, and the bounding box must fit within the target image.", + "classification": "prototype", + "description": "Run the denoising process with a Qwen Image model.", "node_pack": "invokeai", "properties": { "board": { @@ -49066,353 +55453,261 @@ "title": "Use Cache", "type": "boolean" }, - "source_image": { + "latents": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "$ref": "#/components/schemas/LatentsField" }, { "type": "null" } ], "default": null, - "description": "The image to paste", + "description": "Latents tensor", "field_kind": "input", - "input": "any", - "orig_required": true + "input": "connection", + "orig_default": null, + "orig_required": false }, - "target_image": { + "reference_latents": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "$ref": "#/components/schemas/LatentsField" }, { "type": "null" } ], "default": null, - "description": "The image to paste into", + "description": "Reference image latents to guide generation. Encoded through the VAE.", "field_kind": "input", - "input": "any", - "orig_required": true + "input": "connection", + "orig_default": null, + "orig_required": false }, - "bounding_box": { + "denoise_mask": { "anyOf": [ { - "$ref": "#/components/schemas/BoundingBoxField" + "$ref": "#/components/schemas/DenoiseMaskField" }, { "type": "null" } ], "default": null, - "description": "The bounding box to paste the image into", + "description": "A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved.", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false + }, + "denoising_start": { + "default": 0.0, + "description": "When to start denoising, expressed a percentage of total steps", "field_kind": "input", "input": "any", - "orig_required": true + "maximum": 1, + "minimum": 0, + "orig_default": 0.0, + "orig_required": false, + "title": "Denoising Start", + "type": "number" }, - "type": { - "const": "paste_image_into_bounding_box", - "default": "paste_image_into_bounding_box", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["image", "crop"], - "title": "Paste Image into Bounding Box", - "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } - }, - "PiDiNetEdgeDetectionInvocation": { - "category": "controlnet", - "class": "invocation", - "classification": "stable", - "description": "Generates an edge map using PiDiNet.", - "node_pack": "invokeai", - "properties": { - "board": { + "denoising_end": { + "default": 1.0, + "description": "When to stop denoising, expressed a percentage of total steps", + "field_kind": "input", + "input": "any", + "maximum": 1, + "minimum": 0, + "orig_default": 1.0, + "orig_required": false, + "title": "Denoising End", + "type": "number" + }, + "transformer": { "anyOf": [ { - "$ref": "#/components/schemas/BoardField" + "$ref": "#/components/schemas/TransformerField" }, { "type": "null" } ], "default": null, - "description": "The board to save the image to", - "field_kind": "internal", - "input": "direct", - "orig_required": false, - "ui_hidden": false + "description": "Qwen Image Edit model (Transformer) to load", + "field_kind": "input", + "input": "connection", + "orig_required": true, + "title": "Transformer" }, - "metadata": { + "positive_conditioning": { "anyOf": [ { - "$ref": "#/components/schemas/MetadataField" + "$ref": "#/components/schemas/QwenImageConditioningField" }, { "type": "null" } ], "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", + "description": "Positive conditioning tensor", + "field_kind": "input", "input": "connection", - "orig_required": false, - "ui_hidden": false - }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" - }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "orig_required": true }, - "image": { + "negative_conditioning": { "anyOf": [ { - "$ref": "#/components/schemas/ImageField" + "$ref": "#/components/schemas/QwenImageConditioningField" }, { "type": "null" } ], "default": null, - "description": "The image to process", + "description": "Negative conditioning tensor", "field_kind": "input", - "input": "any", - "orig_required": true + "input": "connection", + "orig_default": null, + "orig_required": false }, - "quantize_edges": { - "default": false, - "description": "Whether or not to use safe mode", + "cfg_scale": { + "anyOf": [ + { + "type": "number" + }, + { + "items": { + "type": "number" + }, + "type": "array" + } + ], + "default": 4.0, + "description": "Classifier-Free Guidance scale", "field_kind": "input", "input": "any", - "orig_default": false, + "orig_default": 4.0, "orig_required": false, - "title": "Quantize Edges", - "type": "boolean" + "title": "CFG Scale" }, - "scribble": { - "default": false, - "description": "Whether or not to use scribble mode", + "width": { + "default": 1024, + "description": "Width of the generated image.", "field_kind": "input", "input": "any", - "orig_default": false, + "multipleOf": 16, + "orig_default": 1024, "orig_required": false, - "title": "Scribble", - "type": "boolean" - }, - "type": { - "const": "pidi_edge_detection", - "default": "pidi_edge_detection", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["controlnet", "edge"], - "title": "PiDiNet Edge Detection", - "type": "object", - "version": "1.0.0", - "output": { - "$ref": "#/components/schemas/ImageOutput" - } - }, - "PresetData": { - "properties": { - "positive_prompt": { - "type": "string", - "title": "Positive Prompt", - "description": "Positive prompt" - }, - "negative_prompt": { - "type": "string", - "title": "Negative Prompt", - "description": "Negative prompt" - } - }, - "additionalProperties": false, - "type": "object", - "required": ["positive_prompt", "negative_prompt"], - "title": "PresetData" - }, - "PresetType": { - "type": "string", - "enum": ["user", "default"], - "title": "PresetType" - }, - "ProgressImage": { - "description": "The progress image sent intermittently during processing", - "properties": { - "width": { - "description": "The effective width of the image in pixels", - "minimum": 1, "title": "Width", "type": "integer" }, "height": { - "description": "The effective height of the image in pixels", - "minimum": 1, + "default": 1024, + "description": "Height of the generated image.", + "field_kind": "input", + "input": "any", + "multipleOf": 16, + "orig_default": 1024, + "orig_required": false, "title": "Height", "type": "integer" }, - "dataURL": { - "description": "The image data as a b64 data URL", - "title": "Dataurl", - "type": "string" - } - }, - "required": ["width", "height", "dataURL"], - "title": "ProgressImage", - "type": "object" - }, - "PromptTemplateInvocation": { - "category": "prompt", - "class": "invocation", - "classification": "stable", - "description": "Applies a Style Preset template to positive and negative prompts.\n\nSelect a Style Preset and provide positive/negative prompts. The node replaces\n{prompt} placeholders in the template with your input prompts.", - "node_pack": "invokeai", - "properties": { - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" + "steps": { + "default": 40, + "description": "Number of steps to run", + "exclusiveMinimum": 0, + "field_kind": "input", + "input": "any", + "orig_default": 40, + "orig_required": false, + "title": "Steps", + "type": "integer" }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" + "seed": { + "default": 0, + "description": "Randomness seed for reproducibility.", + "field_kind": "input", + "input": "any", + "orig_default": 0, + "orig_required": false, + "title": "Seed", + "type": "integer" }, - "style_preset": { + "shift": { "anyOf": [ { - "$ref": "#/components/schemas/StylePresetField" + "type": "number" }, { "type": "null" } ], "default": null, - "description": "The Style Preset to use as a template", - "field_kind": "input", - "input": "any", - "orig_required": true - }, - "positive_prompt": { - "default": "", - "description": "The positive prompt to insert into the template's {prompt} placeholder", - "field_kind": "input", - "input": "any", - "orig_default": "", - "orig_required": false, - "title": "Positive Prompt", - "type": "string", - "ui_component": "textarea" - }, - "negative_prompt": { - "default": "", - "description": "The negative prompt to insert into the template's {prompt} placeholder", + "description": "Override the sigma schedule shift. When set, uses a fixed shift (e.g. 3.0 for Lightning LoRAs) instead of the default dynamic shifting. Leave unset for the base model's default schedule.", "field_kind": "input", "input": "any", - "orig_default": "", + "orig_default": null, "orig_required": false, - "title": "Negative Prompt", - "type": "string", - "ui_component": "textarea" + "title": "Shift" }, "type": { - "const": "prompt_template", - "default": "prompt_template", + "const": "qwen_image_denoise", + "default": "qwen_image_denoise", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["prompt", "template", "style", "preset"], - "title": "Prompt Template", + "tags": ["image", "qwen_image"], + "title": "Denoise - Qwen Image", "type": "object", "version": "1.0.0", "output": { - "$ref": "#/components/schemas/PromptTemplateOutput" + "$ref": "#/components/schemas/LatentsOutput" } }, - "PromptTemplateOutput": { - "class": "output", - "description": "Output for the Prompt Template node", + "QwenImageImageToLatentsInvocation": { + "category": "image", + "class": "invocation", + "classification": "prototype", + "description": "Generates latents from an image using the Qwen Image VAE.", + "node_pack": "invokeai", "properties": { - "positive_prompt": { - "description": "The positive prompt with the template applied", - "field_kind": "output", - "title": "Positive Prompt", - "type": "string", + "board": { + "anyOf": [ + { + "$ref": "#/components/schemas/BoardField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, "ui_hidden": false }, - "negative_prompt": { - "description": "The negative prompt with the template applied", - "field_kind": "output", - "title": "Negative Prompt", - "type": "string", + "metadata": { + "anyOf": [ + { + "$ref": "#/components/schemas/MetadataField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, "ui_hidden": false }, - "type": { - "const": "prompt_template_output", - "default": "prompt_template_output", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["output_meta", "positive_prompt", "negative_prompt", "type", "type"], - "title": "PromptTemplateOutput", - "type": "object" - }, - "PromptsFromFileInvocation": { - "category": "prompt", - "class": "invocation", - "classification": "stable", - "description": "Loads prompts from a text file", - "node_pack": "invokeai", - "properties": { "id": { "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", "field_kind": "node_attribute", @@ -49437,675 +55732,556 @@ "title": "Use Cache", "type": "boolean" }, - "file_path": { + "image": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ImageField" }, { "type": "null" } ], "default": null, - "description": "Path to prompt text file", + "description": "The image to encode.", "field_kind": "input", "input": "any", - "orig_required": true, - "title": "File Path" + "orig_required": true }, - "pre_prompt": { + "vae": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/VAEField" }, { "type": "null" } ], "default": null, - "description": "String to prepend to each prompt", + "description": "VAE", "field_kind": "input", - "input": "any", - "orig_default": null, - "orig_required": false, - "title": "Pre Prompt", - "ui_component": "textarea" + "input": "connection", + "orig_required": true }, - "post_prompt": { + "width": { "anyOf": [ { - "type": "string" + "type": "integer" }, { "type": "null" } ], "default": null, - "description": "String to append to each prompt", + "description": "Resize the image to this width before encoding. If not set, encodes at the image's original size.", "field_kind": "input", "input": "any", "orig_default": null, "orig_required": false, - "title": "Post Prompt", - "ui_component": "textarea" - }, - "start_line": { - "default": 1, - "description": "Line in the file to start start from", - "field_kind": "input", - "input": "any", - "minimum": 1, - "orig_default": 1, - "orig_required": false, - "title": "Start Line", - "type": "integer" + "title": "Width" }, - "max_prompts": { - "default": 1, - "description": "Max lines to read from file (0=all)", + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Resize the image to this height before encoding. If not set, encodes at the image's original size.", "field_kind": "input", "input": "any", - "minimum": 0, - "orig_default": 1, + "orig_default": null, "orig_required": false, - "title": "Max Prompts", - "type": "integer" + "title": "Height" }, "type": { - "const": "prompt_from_file", - "default": "prompt_from_file", + "const": "qwen_image_i2l", + "default": "qwen_image_i2l", "field_kind": "node_attribute", "title": "type", "type": "string" } }, "required": ["type", "id"], - "tags": ["prompt", "file"], - "title": "Prompts from File", + "tags": ["image", "latents", "vae", "i2l", "qwen_image"], + "title": "Image to Latents - Qwen Image", "type": "object", - "version": "1.0.2", + "version": "1.0.0", "output": { - "$ref": "#/components/schemas/StringCollectionOutput" + "$ref": "#/components/schemas/LatentsOutput" } }, - "PruneResult": { - "properties": { - "deleted": { - "type": "integer", - "title": "Deleted", - "description": "Number of queue items deleted" - } - }, - "type": "object", - "required": ["deleted"], - "title": "PruneResult", - "description": "Result of pruning the session queue" - }, - "QueueClearedEvent": { - "description": "Event model for queue_cleared", - "properties": { - "timestamp": { - "description": "The timestamp of the event", - "title": "Timestamp", - "type": "integer" - }, - "queue_id": { - "description": "The ID of the queue", - "title": "Queue Id", - "type": "string" - } - }, - "required": ["timestamp", "queue_id"], - "title": "QueueClearedEvent", - "type": "object" - }, - "QueueItemStatusChangedEvent": { - "description": "Event model for queue_item_status_changed", + "QwenImageLatentsToImageInvocation": { + "category": "latents", + "class": "invocation", + "classification": "prototype", + "description": "Generates an image from latents using the Qwen Image VAE.", + "node_pack": "invokeai", "properties": { - "timestamp": { - "description": "The timestamp of the event", - "title": "Timestamp", - "type": "integer" - }, - "queue_id": { - "description": "The ID of the queue", - "title": "Queue Id", - "type": "string" - }, - "item_id": { - "description": "The ID of the queue item", - "title": "Item Id", - "type": "integer" - }, - "batch_id": { - "description": "The ID of the queue batch", - "title": "Batch Id", - "type": "string" - }, - "origin": { + "board": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/BoardField" }, { "type": "null" } ], "default": null, - "description": "The origin of the queue item", - "title": "Origin" + "description": "The board to save the image to", + "field_kind": "internal", + "input": "direct", + "orig_required": false, + "ui_hidden": false }, - "destination": { + "metadata": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/MetadataField" }, { "type": "null" } ], "default": null, - "description": "The destination of the queue item", - "title": "Destination" + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false }, - "status": { - "description": "The new status of the queue item", - "enum": ["pending", "in_progress", "completed", "failed", "canceled"], - "title": "Status", + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", "type": "string" }, - "error_type": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The error type, if any", - "title": "Error Type" - }, - "error_message": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The error message, if any", - "title": "Error Message" - }, - "error_traceback": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The error traceback, if any", - "title": "Error Traceback" - }, - "created_at": { - "description": "The timestamp when the queue item was created", - "title": "Created At", - "type": "string" + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "updated_at": { - "description": "The timestamp when the queue item was last updated", - "title": "Updated At", - "type": "string" + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "started_at": { + "latents": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/LatentsField" }, { "type": "null" } ], "default": null, - "description": "The timestamp when the queue item was started", - "title": "Started At" + "description": "Latents tensor", + "field_kind": "input", + "input": "connection", + "orig_required": true }, - "completed_at": { + "vae": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/VAEField" }, { "type": "null" } ], "default": null, - "description": "The timestamp when the queue item was completed", - "title": "Completed At" - }, - "batch_status": { - "$ref": "#/components/schemas/BatchStatus", - "description": "The status of the batch" - }, - "queue_status": { - "$ref": "#/components/schemas/SessionQueueStatus", - "description": "The status of the queue" + "description": "VAE", + "field_kind": "input", + "input": "connection", + "orig_required": true }, - "session_id": { - "description": "The ID of the session (aka graph execution state)", - "title": "Session Id", + "type": { + "const": "qwen_image_l2i", + "default": "qwen_image_l2i", + "field_kind": "node_attribute", + "title": "type", "type": "string" } }, - "required": [ - "timestamp", - "queue_id", - "item_id", - "batch_id", - "origin", - "destination", - "status", - "error_type", - "error_message", - "error_traceback", - "created_at", - "updated_at", - "started_at", - "completed_at", - "batch_status", - "queue_status", - "session_id" - ], - "title": "QueueItemStatusChangedEvent", - "type": "object" + "required": ["type", "id"], + "tags": ["latents", "image", "vae", "l2i", "qwen_image"], + "title": "Latents to Image - Qwen Image", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/ImageOutput" + } }, - "QueueItemsRetriedEvent": { - "description": "Event model for queue_items_retried", + "QwenImageLoRACollectionLoader": { + "category": "model", + "class": "invocation", + "classification": "prototype", + "description": "Applies a collection of LoRAs to a Qwen Image transformer.", + "node_pack": "invokeai", "properties": { - "timestamp": { - "description": "The timestamp of the event", - "title": "Timestamp", - "type": "integer" - }, - "queue_id": { - "description": "The ID of the queue", - "title": "Queue Id", + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", "type": "string" }, - "retried_item_ids": { - "description": "The IDs of the queue items that were retried", - "items": { - "type": "integer" - }, - "title": "Retried Item Ids", - "type": "array" - } - }, - "required": ["timestamp", "queue_id", "retried_item_ids"], - "title": "QueueItemsRetriedEvent", - "type": "object" - }, - "Qwen3EncoderField": { - "description": "Field for Qwen3 text encoder used by Z-Image models.", - "properties": { - "tokenizer": { - "$ref": "#/components/schemas/ModelIdentifierField", - "description": "Info to load tokenizer submodel" - }, - "text_encoder": { - "$ref": "#/components/schemas/ModelIdentifierField", - "description": "Info to load text_encoder submodel" - }, - "loras": { - "description": "LoRAs to apply on model loading", - "items": { - "$ref": "#/components/schemas/LoRAField" - }, - "title": "Loras", - "type": "array" - } - }, - "required": ["tokenizer", "text_encoder"], - "title": "Qwen3EncoderField", - "type": "object" - }, - "Qwen3Encoder_Checkpoint_Config": { - "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." - }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." - }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." - }, - "description": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Description", - "description": "Model description" - }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." - }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "source_api_response": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "cover_image": { + "loras": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/LoRAField" }, { - "type": "null" - } - ], - "title": "Cover Image", - "description": "Url for image to preview model" - }, - "config_path": { - "anyOf": [ - { - "type": "string" + "items": { + "$ref": "#/components/schemas/LoRAField" + }, + "type": "array" }, { "type": "null" } ], - "title": "Config Path", - "description": "Path to the config for this model, if any." - }, - "base": { - "type": "string", - "const": "any", - "title": "Base", - "default": "any" - }, - "type": { - "type": "string", - "const": "qwen3_encoder", - "title": "Type", - "default": "qwen3_encoder" - }, - "format": { - "type": "string", - "const": "checkpoint", - "title": "Format", - "default": "checkpoint" + "default": null, + "description": "LoRA models and weights. May be a single LoRA or collection.", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "LoRAs" }, - "cpu_only": { + "transformer": { "anyOf": [ { - "type": "boolean" + "$ref": "#/components/schemas/TransformerField" }, { "type": "null" } ], - "title": "Cpu Only", - "description": "Whether this model should run on CPU only" + "default": null, + "description": "Transformer", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "Transformer" }, - "variant": { - "$ref": "#/components/schemas/Qwen3VariantType", - "description": "Qwen3 model size variant (4B or 8B)" + "type": { + "const": "qwen_image_lora_collection_loader", + "default": "qwen_image_lora_collection_loader", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["type", "id"], + "tags": ["lora", "model", "qwen_image"], + "title": "Apply LoRA Collection - Qwen Image", "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "config_path", - "base", - "type", - "format", - "cpu_only", - "variant" - ], - "title": "Qwen3Encoder_Checkpoint_Config", - "description": "Configuration for single-file Qwen3 Encoder models (safetensors)." + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/QwenImageLoRALoaderOutput" + } }, - "Qwen3Encoder_GGUF_Config": { + "QwenImageLoRALoaderInvocation": { + "category": "model", + "class": "invocation", + "classification": "prototype", + "description": "Apply a LoRA model to a Qwen Image transformer.", + "node_pack": "invokeai", "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "description": { + "lora": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/ModelIdentifierField" }, { "type": "null" } ], - "title": "Description", - "description": "Model description" - }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." + "default": null, + "description": "LoRA model to load", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "LoRA", + "ui_model_base": ["qwen-image"], + "ui_model_type": ["lora"] }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" + "weight": { + "default": 1.0, + "description": "The weight at which the LoRA is applied to each model", + "field_kind": "input", + "input": "any", + "orig_default": 1.0, + "orig_required": false, + "title": "Weight", + "type": "number" }, - "source_api_response": { + "transformer": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/TransformerField" }, { "type": "null" } ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "default": null, + "description": "Transformer", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "Transformer" }, - "cover_image": { + "type": { + "const": "qwen_image_lora_loader", + "default": "qwen_image_lora_loader", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["lora", "model", "qwen_image"], + "title": "Apply LoRA - Qwen Image", + "type": "object", + "version": "1.0.0", + "output": { + "$ref": "#/components/schemas/QwenImageLoRALoaderOutput" + } + }, + "QwenImageLoRALoaderOutput": { + "class": "output", + "description": "Qwen Image LoRA Loader Output", + "properties": { + "transformer": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/TransformerField" }, { "type": "null" } ], - "title": "Cover Image", - "description": "Url for image to preview model" + "default": null, + "description": "Transformer", + "field_kind": "output", + "title": "Transformer", + "ui_hidden": false }, - "config_path": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Config Path", - "description": "Path to the config for this model, if any." + "type": { + "const": "qwen_image_lora_loader_output", + "default": "qwen_image_lora_loader_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "transformer", "type", "type"], + "title": "QwenImageLoRALoaderOutput", + "type": "object" + }, + "QwenImageModelLoaderInvocation": { + "category": "model", + "class": "invocation", + "classification": "prototype", + "description": "Loads a Qwen Image model, outputting its submodels.\n\nThe transformer is always loaded from the main model (Diffusers or GGUF).\n\nFor GGUF quantized models, the VAE and Qwen VL encoder must come from a\nseparate Diffusers model specified in the \"Component Source\" field.\n\nFor Diffusers models, all components are extracted from the main model\nautomatically. The \"Component Source\" field is ignored.", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "base": { - "type": "string", - "const": "any", - "title": "Base", - "default": "any" + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "type": { - "type": "string", - "const": "qwen3_encoder", - "title": "Type", - "default": "qwen3_encoder" + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "format": { - "type": "string", - "const": "gguf_quantized", - "title": "Format", - "default": "gguf_quantized" + "model": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Qwen Image Edit model (Transformer) to load", + "field_kind": "input", + "input": "direct", + "orig_required": true, + "title": "Transformer", + "ui_model_base": ["qwen-image"], + "ui_model_type": ["main"] }, - "cpu_only": { + "component_source": { "anyOf": [ { - "type": "boolean" + "$ref": "#/components/schemas/ModelIdentifierField" }, { "type": "null" } ], - "title": "Cpu Only", - "description": "Whether this model should run on CPU only" + "default": null, + "description": "Diffusers Qwen Image model to extract the VAE and Qwen VL encoder from. Required when using a GGUF quantized transformer. Ignored when the main model is already in Diffusers format.", + "field_kind": "input", + "input": "direct", + "orig_default": null, + "orig_required": false, + "title": "Component Source (Diffusers)", + "ui_model_base": ["qwen-image"], + "ui_model_format": ["diffusers"], + "ui_model_type": ["main"] }, - "variant": { - "$ref": "#/components/schemas/Qwen3VariantType", - "description": "Qwen3 model size variant (4B or 8B)" + "type": { + "const": "qwen_image_model_loader", + "default": "qwen_image_model_loader", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["model", "type", "id"], + "tags": ["model", "qwen_image"], + "title": "Main Model - Qwen Image", "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "config_path", - "base", - "type", - "format", - "cpu_only", - "variant" - ], - "title": "Qwen3Encoder_GGUF_Config", - "description": "Configuration for GGUF-quantized Qwen3 Encoder models." + "version": "1.1.0", + "output": { + "$ref": "#/components/schemas/QwenImageModelLoaderOutput" + } }, - "Qwen3Encoder_Qwen3Encoder_Config": { + "QwenImageModelLoaderOutput": { + "class": "output", + "description": "Qwen Image model loader output.", "properties": { - "key": { - "type": "string", - "title": "Key", - "description": "A unique key for this model." - }, - "hash": { - "type": "string", - "title": "Hash", - "description": "The hash of the model file(s)." - }, - "path": { - "type": "string", - "title": "Path", - "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + "transformer": { + "$ref": "#/components/schemas/TransformerField", + "description": "Transformer", + "field_kind": "output", + "title": "Transformer", + "ui_hidden": false }, - "file_size": { - "type": "integer", - "title": "File Size", - "description": "The size of the model in bytes." + "qwen_vl_encoder": { + "$ref": "#/components/schemas/QwenVLEncoderField", + "description": "Qwen2.5-VL tokenizer, processor and text/vision encoder", + "field_kind": "output", + "title": "Qwen VL Encoder", + "ui_hidden": false }, - "name": { - "type": "string", - "title": "Name", - "description": "Name of the model." + "vae": { + "$ref": "#/components/schemas/VAEField", + "description": "VAE", + "field_kind": "output", + "title": "VAE", + "ui_hidden": false }, - "description": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Description", - "description": "Model description" + "type": { + "const": "qwen_image_model_loader_output", + "default": "qwen_image_model_loader_output", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["output_meta", "transformer", "qwen_vl_encoder", "vae", "type", "type"], + "title": "QwenImageModelLoaderOutput", + "type": "object" + }, + "QwenImageTextEncoderInvocation": { + "category": "conditioning", + "class": "invocation", + "classification": "prototype", + "description": "Encodes text and reference images for Qwen Image using Qwen2.5-VL.", + "node_pack": "invokeai", + "properties": { + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" }, - "source": { - "type": "string", - "title": "Source", - "description": "The original source of the model (path, URL or repo_id)." + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" }, - "source_type": { - "$ref": "#/components/schemas/ModelSourceType", - "description": "The type of source" + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" }, - "source_api_response": { + "prompt": { "anyOf": [ { "type": "string" @@ -50114,82 +56290,92 @@ "type": "null" } ], - "title": "Source Api Response", - "description": "The original API response from the source, as stringified JSON." + "default": null, + "description": "Text prompt describing the desired edit.", + "field_kind": "input", + "input": "any", + "orig_required": true, + "title": "Prompt", + "ui_component": "textarea" }, - "cover_image": { + "reference_images": { + "default": [], + "description": "Reference images to guide the edit. The model can use multiple reference images.", + "field_kind": "input", + "input": "any", + "items": { + "$ref": "#/components/schemas/ImageField" + }, + "orig_default": [], + "orig_required": false, + "title": "Reference Images", + "type": "array" + }, + "qwen_vl_encoder": { "anyOf": [ { - "type": "string" + "$ref": "#/components/schemas/QwenVLEncoderField" }, { "type": "null" } ], - "title": "Cover Image", - "description": "Url for image to preview model" + "default": null, + "description": "Qwen2.5-VL tokenizer, processor and text/vision encoder", + "field_kind": "input", + "input": "connection", + "orig_required": true, + "title": "Qwen VL Encoder" }, - "base": { - "type": "string", - "const": "any", - "title": "Base", - "default": "any" + "quantization": { + "default": "none", + "description": "Quantize the Qwen VL encoder to reduce VRAM usage. 'nf4' (4-bit) saves the most memory, 'int8' (8-bit) is a middle ground.", + "enum": ["none", "int8", "nf4"], + "field_kind": "input", + "input": "any", + "orig_default": "none", + "orig_required": false, + "title": "Quantization", + "type": "string" }, "type": { - "type": "string", - "const": "qwen3_encoder", - "title": "Type", - "default": "qwen3_encoder" - }, - "format": { - "type": "string", - "const": "qwen3_encoder", - "title": "Format", - "default": "qwen3_encoder" - }, - "cpu_only": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "title": "Cpu Only", - "description": "Whether this model should run on CPU only" - }, - "variant": { - "$ref": "#/components/schemas/Qwen3VariantType", - "description": "Qwen3 model size variant (4B or 8B)" + "const": "qwen_image_text_encoder", + "default": "qwen_image_text_encoder", + "field_kind": "node_attribute", + "title": "type", + "type": "string" } }, + "required": ["type", "id"], + "tags": ["prompt", "conditioning", "qwen_image"], + "title": "Prompt - Qwen Image", "type": "object", - "required": [ - "key", - "hash", - "path", - "file_size", - "name", - "description", - "source", - "source_type", - "source_api_response", - "cover_image", - "base", - "type", - "format", - "cpu_only", - "variant" - ], - "title": "Qwen3Encoder_Qwen3Encoder_Config", - "description": "Configuration for Qwen3 Encoder models in a diffusers-like format.\n\nThe model weights are expected to be in a folder called text_encoder inside the model directory,\ncompatible with Qwen2VLForConditionalGeneration or similar architectures used by Z-Image." + "version": "1.2.0", + "output": { + "$ref": "#/components/schemas/QwenImageConditioningOutput" + } }, - "Qwen3VariantType": { + "QwenImageVariantType": { "type": "string", - "enum": ["qwen3_4b", "qwen3_8b"], - "title": "Qwen3VariantType", - "description": "Qwen3 text encoder variants based on model size." + "enum": ["generate", "edit"], + "title": "QwenImageVariantType", + "description": "Qwen Image model variants." + }, + "QwenVLEncoderField": { + "description": "Field for Qwen2.5-VL encoder used by Qwen Image Edit models.", + "properties": { + "tokenizer": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Info to load tokenizer submodel" + }, + "text_encoder": { + "$ref": "#/components/schemas/ModelIdentifierField", + "description": "Info to load text_encoder submodel" + } + }, + "required": ["tokenizer", "text_encoder"], + "title": "QwenVLEncoderField", + "type": "object" }, "RandomFloatInvocation": { "category": "math", @@ -50584,6 +56770,377 @@ "$ref": "#/components/schemas/IntegerCollectionOutput" } }, + "RecallParameter": { + "properties": { + "positive_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Positive Prompt", + "description": "Positive prompt text" + }, + "negative_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Negative Prompt", + "description": "Negative prompt text" + }, + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Model", + "description": "Main model name/identifier" + }, + "refiner_model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Refiner Model", + "description": "Refiner model name/identifier" + }, + "vae_model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Vae Model", + "description": "VAE model name/identifier" + }, + "scheduler": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Scheduler", + "description": "Scheduler name" + }, + "steps": { + "anyOf": [ + { + "type": "integer", + "minimum": 1.0 + }, + { + "type": "null" + } + ], + "title": "Steps", + "description": "Number of generation steps" + }, + "refiner_steps": { + "anyOf": [ + { + "type": "integer", + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Refiner Steps", + "description": "Number of refiner steps" + }, + "cfg_scale": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Cfg Scale", + "description": "CFG scale for guidance" + }, + "cfg_rescale_multiplier": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Cfg Rescale Multiplier", + "description": "CFG rescale multiplier" + }, + "refiner_cfg_scale": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Refiner Cfg Scale", + "description": "Refiner CFG scale" + }, + "guidance": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Guidance", + "description": "Guidance scale" + }, + "width": { + "anyOf": [ + { + "type": "integer", + "minimum": 64.0 + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "Image width in pixels" + }, + "height": { + "anyOf": [ + { + "type": "integer", + "minimum": 64.0 + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "Image height in pixels" + }, + "seed": { + "anyOf": [ + { + "type": "integer", + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed" + }, + "denoise_strength": { + "anyOf": [ + { + "type": "number", + "maximum": 1.0, + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Denoise Strength", + "description": "Denoising strength" + }, + "refiner_denoise_start": { + "anyOf": [ + { + "type": "number", + "maximum": 1.0, + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Refiner Denoise Start", + "description": "Refiner denoising start" + }, + "clip_skip": { + "anyOf": [ + { + "type": "integer", + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Clip Skip", + "description": "CLIP skip layers" + }, + "seamless_x": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Seamless X", + "description": "Enable seamless X tiling" + }, + "seamless_y": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Seamless Y", + "description": "Enable seamless Y tiling" + }, + "refiner_positive_aesthetic_score": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Refiner Positive Aesthetic Score", + "description": "Refiner positive aesthetic score" + }, + "refiner_negative_aesthetic_score": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Refiner Negative Aesthetic Score", + "description": "Refiner negative aesthetic score" + }, + "loras": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/LoRARecallParameter" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Loras", + "description": "List of LoRAs with their weights" + }, + "control_layers": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/ControlNetRecallParameter" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Control Layers", + "description": "List of control adapters (ControlNet, T2I Adapter, Control LoRA) with their settings" + }, + "ip_adapters": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/IPAdapterRecallParameter" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Ip Adapters", + "description": "List of IP Adapters with their settings" + }, + "reference_images": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/ReferenceImageRecallParameter" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Reference Images", + "description": "List of model-free reference images for architectures that consume reference images directly (FLUX.2 Klein, FLUX Kontext, Qwen Image Edit). The frontend picks the correct config type based on the currently-selected main model." + } + }, + "additionalProperties": false, + "type": "object", + "title": "RecallParameter", + "description": "Request model for updating recallable parameters." + }, + "RecallParametersUpdatedEvent": { + "description": "Event model for recall_parameters_updated", + "properties": { + "timestamp": { + "description": "The timestamp of the event", + "title": "Timestamp", + "type": "integer" + }, + "queue_id": { + "description": "The ID of the queue", + "title": "Queue Id", + "type": "string" + }, + "user_id": { + "description": "The ID of the user whose recall parameters were updated", + "title": "User Id", + "type": "string" + }, + "parameters": { + "additionalProperties": true, + "description": "The recall parameters that were updated", + "title": "Parameters", + "type": "object" + } + }, + "required": ["timestamp", "queue_id", "user_id", "parameters"], + "title": "RecallParametersUpdatedEvent", + "type": "object" + }, "RectangleMaskInvocation": { "category": "conditioning", "class": "invocation", @@ -50744,6 +57301,19 @@ "$ref": "#/components/schemas/MaskOutput" } }, + "ReferenceImageRecallParameter": { + "properties": { + "image_name": { + "type": "string", + "title": "Image Name", + "description": "The filename of the reference image in outputs/images" + } + }, + "type": "object", + "required": ["image_name"], + "title": "ReferenceImageRecallParameter", + "description": "Global reference-image configuration for recall.\n\nUsed for reference images that feed directly into the main model rather\nthan through a separate IP-Adapter / ControlNet model \u2014 for example\nFLUX.2 Klein, FLUX Kontext, and Qwen Image Edit. The receiving frontend\npicks the correct config type (``flux2_reference_image`` /\n``qwen_image_reference_image`` / ``flux_kontext_reference_image``) based\non the currently-selected main model." + }, "RemoteModelFile": { "properties": { "url": { @@ -53569,6 +60139,36 @@ "title": "Queue Id", "description": "The id of the queue with which this item is associated" }, + "user_id": { + "type": "string", + "title": "User Id", + "description": "The id of the user who created this queue item", + "default": "system" + }, + "user_display_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "User Display Name", + "description": "The display name of the user who created this queue item, if available" + }, + "user_email": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "User Email", + "description": "The email of the user who created this queue item, if available" + }, "field_values": { "anyOf": [ { @@ -53610,36 +60210,6 @@ } ], "description": "The workflow associated with this queue item" - }, - "user_id": { - "type": "string", - "title": "User Id", - "description": "The id of the user who created this queue item", - "default": "system" - }, - "user_display_name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "User Display Name", - "description": "The display name of the user who created this queue item, if available" - }, - "user_email": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "User Email", - "description": "The email of the user who created this queue item, if available" } }, "type": "object", @@ -53730,46 +60300,104 @@ "type": "integer", "title": "Total", "description": "Total number of queue items" + } + }, + "type": "object", + "required": [ + "queue_id", + "item_id", + "batch_id", + "session_id", + "pending", + "in_progress", + "completed", + "failed", + "canceled", + "total" + ], + "title": "SessionQueueStatus" + }, + "SetupRequest": { + "properties": { + "email": { + "type": "string", + "title": "Email", + "description": "Admin email address" }, - "user_pending": { + "display_name": { "anyOf": [ { - "type": "integer" + "type": "string" }, { "type": "null" } ], - "title": "User Pending", - "description": "Number of queue items with status 'pending' for the current user" + "title": "Display Name", + "description": "Admin display name" + }, + "password": { + "type": "string", + "title": "Password", + "description": "Admin password" + } + }, + "type": "object", + "required": ["email", "password"], + "title": "SetupRequest", + "description": "Request body for initial admin setup." + }, + "SetupResponse": { + "properties": { + "success": { + "type": "boolean", + "title": "Success", + "description": "Whether setup was successful" + }, + "user": { + "$ref": "#/components/schemas/UserDTO", + "description": "Created admin user information" + } + }, + "type": "object", + "required": ["success", "user"], + "title": "SetupResponse", + "description": "Response from successful admin setup." + }, + "SetupStatusResponse": { + "properties": { + "setup_required": { + "type": "boolean", + "title": "Setup Required", + "description": "Whether initial setup is required" + }, + "multiuser_enabled": { + "type": "boolean", + "title": "Multiuser Enabled", + "description": "Whether multiuser mode is enabled" }, - "user_in_progress": { + "strict_password_checking": { + "type": "boolean", + "title": "Strict Password Checking", + "description": "Whether strict password requirements are enforced" + }, + "admin_email": { "anyOf": [ { - "type": "integer" + "type": "string" }, { "type": "null" } ], - "title": "User In Progress", - "description": "Number of queue items with status 'in_progress' for the current user" + "title": "Admin Email", + "description": "Email of the first active admin user, if any" } }, "type": "object", - "required": [ - "queue_id", - "item_id", - "batch_id", - "session_id", - "pending", - "in_progress", - "completed", - "failed", - "canceled", - "total" - ], - "title": "SessionQueueStatus" + "required": ["setup_required", "multiuser_enabled", "strict_password_checking"], + "title": "SetupStatusResponse", + "description": "Response for setup status check." }, "ShowImageInvocation": { "category": "image", @@ -54391,6 +61019,35 @@ } ] }, + "variant": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelVariantType" + }, + { + "$ref": "#/components/schemas/ClipVariantType" + }, + { + "$ref": "#/components/schemas/FluxVariantType" + }, + { + "$ref": "#/components/schemas/Flux2VariantType" + }, + { + "$ref": "#/components/schemas/ZImageVariantType" + }, + { + "$ref": "#/components/schemas/QwenImageVariantType" + }, + { + "$ref": "#/components/schemas/Qwen3VariantType" + }, + { + "type": "null" + } + ], + "title": "Variant" + }, "is_installed": { "type": "boolean", "title": "Is Installed", @@ -54492,6 +61149,35 @@ } ] }, + "variant": { + "anyOf": [ + { + "$ref": "#/components/schemas/ModelVariantType" + }, + { + "$ref": "#/components/schemas/ClipVariantType" + }, + { + "$ref": "#/components/schemas/FluxVariantType" + }, + { + "$ref": "#/components/schemas/Flux2VariantType" + }, + { + "$ref": "#/components/schemas/ZImageVariantType" + }, + { + "$ref": "#/components/schemas/QwenImageVariantType" + }, + { + "$ref": "#/components/schemas/Qwen3VariantType" + }, + { + "type": "null" + } + ], + "title": "Variant" + }, "is_installed": { "type": "boolean", "title": "Is Installed", @@ -55368,6 +62054,12 @@ { "$ref": "#/components/schemas/Flux2VariantType" }, + { + "$ref": "#/components/schemas/ZImageVariantType" + }, + { + "$ref": "#/components/schemas/QwenImageVariantType" + }, { "$ref": "#/components/schemas/Qwen3VariantType" }, @@ -57957,6 +64649,116 @@ "required": ["affected_boards", "unstarred_images"], "title": "UnstarredImagesResult" }, + "UserDTO": { + "properties": { + "user_id": { + "type": "string", + "title": "User Id", + "description": "Unique user identifier" + }, + "email": { + "type": "string", + "title": "Email", + "description": "User email address" + }, + "display_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Display Name", + "description": "Display name" + }, + "is_admin": { + "type": "boolean", + "title": "Is Admin", + "description": "Whether user has admin privileges", + "default": false + }, + "is_active": { + "type": "boolean", + "title": "Is Active", + "description": "Whether user account is active", + "default": true + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At", + "description": "When the user was created" + }, + "updated_at": { + "type": "string", + "format": "date-time", + "title": "Updated At", + "description": "When the user was last updated" + }, + "last_login_at": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Last Login At", + "description": "When user last logged in" + } + }, + "type": "object", + "required": ["user_id", "email", "created_at", "updated_at"], + "title": "UserDTO", + "description": "User data transfer object." + }, + "UserProfileUpdateRequest": { + "properties": { + "display_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Display Name", + "description": "New display name" + }, + "current_password": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Current Password", + "description": "Current password (required when changing password)" + }, + "new_password": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "New Password", + "description": "New password" + } + }, + "type": "object", + "title": "UserProfileUpdateRequest", + "description": "Request body for a user to update their own profile." + }, "VAEField": { "properties": { "vae": { @@ -58065,6 +64867,129 @@ "title": "VAEOutput", "type": "object" }, + "VAE_Checkpoint_Anima_Config": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "A unique key for this model." + }, + "hash": { + "type": "string", + "title": "Hash", + "description": "The hash of the model file(s)." + }, + "path": { + "type": "string", + "title": "Path", + "description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." + }, + "file_size": { + "type": "integer", + "title": "File Size", + "description": "The size of the model in bytes." + }, + "name": { + "type": "string", + "title": "Name", + "description": "Name of the model." + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "Model description" + }, + "source": { + "type": "string", + "title": "Source", + "description": "The original source of the model (path, URL or repo_id)." + }, + "source_type": { + "$ref": "#/components/schemas/ModelSourceType", + "description": "The type of source" + }, + "source_api_response": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Source Api Response", + "description": "The original API response from the source, as stringified JSON." + }, + "cover_image": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Cover Image", + "description": "Url for image to preview model" + }, + "config_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Config Path", + "description": "Path to the config for this model, if any." + }, + "type": { + "type": "string", + "const": "vae", + "title": "Type", + "default": "vae" + }, + "format": { + "type": "string", + "const": "checkpoint", + "title": "Format", + "default": "checkpoint" + }, + "base": { + "type": "string", + "const": "anima", + "title": "Base", + "default": "anima" + } + }, + "type": "object", + "required": [ + "key", + "hash", + "path", + "file_size", + "name", + "description", + "source", + "source_type", + "source_api_response", + "cover_image", + "config_path", + "type", + "format", + "base" + ], + "title": "VAE_Checkpoint_Anima_Config", + "description": "Model config for Anima QwenImage VAE checkpoint models (AutoencoderKLQwenImage)." + }, "VAE_Checkpoint_FLUX_Config": { "properties": { "key": { @@ -59263,10 +66188,6 @@ "title": "Opened At", "description": "The opened timestamp of the workflow." }, - "workflow": { - "$ref": "#/components/schemas/Workflow", - "description": "The workflow." - }, "user_id": { "type": "string", "title": "User Id", @@ -59276,10 +66197,14 @@ "type": "boolean", "title": "Is Public", "description": "Whether this workflow is shared with all users." + }, + "workflow": { + "$ref": "#/components/schemas/Workflow", + "description": "The workflow." } }, "type": "object", - "required": ["workflow_id", "name", "created_at", "updated_at", "workflow", "user_id", "is_public"], + "required": ["workflow_id", "name", "created_at", "updated_at", "user_id", "is_public", "workflow"], "title": "WorkflowRecordDTO" }, "WorkflowRecordListItemWithThumbnailDTO": { @@ -59336,6 +66261,16 @@ "title": "Opened At", "description": "The opened timestamp of the workflow." }, + "user_id": { + "type": "string", + "title": "User Id", + "description": "The id of the user who owns this workflow." + }, + "is_public": { + "type": "boolean", + "title": "Is Public", + "description": "Whether this workflow is shared with all users." + }, "description": { "type": "string", "title": "Description", @@ -59361,16 +66296,6 @@ ], "title": "Thumbnail Url", "description": "The URL of the workflow thumbnail." - }, - "user_id": { - "type": "string", - "title": "User Id", - "description": "The id of the user who owns this workflow." - }, - "is_public": { - "type": "boolean", - "title": "Is Public", - "description": "Whether this workflow is shared with all users." } }, "type": "object", @@ -59379,11 +66304,11 @@ "name", "created_at", "updated_at", + "user_id", + "is_public", "description", "category", - "tags", - "user_id", - "is_public" + "tags" ], "title": "WorkflowRecordListItemWithThumbnailDTO" }, @@ -59447,6 +66372,16 @@ "title": "Opened At", "description": "The opened timestamp of the workflow." }, + "user_id": { + "type": "string", + "title": "User Id", + "description": "The id of the user who owns this workflow." + }, + "is_public": { + "type": "boolean", + "title": "Is Public", + "description": "Whether this workflow is shared with all users." + }, "workflow": { "$ref": "#/components/schemas/Workflow", "description": "The workflow." @@ -59462,20 +66397,10 @@ ], "title": "Thumbnail Url", "description": "The URL of the workflow thumbnail." - }, - "user_id": { - "type": "string", - "title": "User Id", - "description": "The id of the user who owns this workflow." - }, - "is_public": { - "type": "boolean", - "title": "Is Public", - "description": "Whether this workflow is shared with all users." } }, "type": "object", - "required": ["workflow_id", "name", "created_at", "updated_at", "workflow", "user_id", "is_public"], + "required": ["workflow_id", "name", "created_at", "updated_at", "user_id", "is_public", "workflow"], "title": "WorkflowRecordWithThumbnailDTO" }, "WorkflowWithoutID": { @@ -60052,302 +66977,338 @@ "orig_default": null, "orig_required": false }, - "scheduler": { - "default": "euler", - "description": "Scheduler (sampler) for the denoising process. Euler is the default and recommended for Z-Image-Turbo. Heun is 2nd-order (better quality, 2x slower). LCM is optimized for few steps.", - "enum": ["euler", "heun", "lcm"], - "field_kind": "input", - "input": "any", - "orig_default": "euler", - "orig_required": false, - "title": "Scheduler", - "type": "string", - "ui_choice_labels": { - "euler": "Euler", - "heun": "Heun (2nd order)", - "lcm": "LCM" - } - }, - "type": { - "const": "z_image_denoise", - "default": "z_image_denoise", - "field_kind": "node_attribute", - "title": "type", - "type": "string" - } - }, - "required": ["type", "id"], - "tags": ["image", "z-image"], - "title": "Denoise - Z-Image", - "type": "object", - "version": "1.4.0", - "output": { - "$ref": "#/components/schemas/LatentsOutput" - } - }, - "ZImageDenoiseMetaInvocation": { - "category": "latents", - "class": "invocation", - "classification": "stable", - "description": "Run denoising process with a Z-Image transformer model + metadata.", - "node_pack": "invokeai", - "properties": { - "metadata": { - "anyOf": [ - { - "$ref": "#/components/schemas/MetadataField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Optional metadata to be saved with the image", - "field_kind": "internal", - "input": "connection", - "orig_required": false, - "ui_hidden": false - }, - "id": { - "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", - "field_kind": "node_attribute", - "title": "Id", - "type": "string" - }, - "is_intermediate": { - "default": false, - "description": "Whether or not this is an intermediate invocation.", - "field_kind": "node_attribute", - "input": "direct", - "orig_required": true, - "title": "Is Intermediate", - "type": "boolean", - "ui_hidden": false, - "ui_type": "IsIntermediate" - }, - "use_cache": { - "default": true, - "description": "Whether or not to use the cache", - "field_kind": "node_attribute", - "title": "Use Cache", - "type": "boolean" - }, - "latents": { - "anyOf": [ - { - "$ref": "#/components/schemas/LatentsField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Latents tensor", - "field_kind": "input", - "input": "connection", - "orig_default": null, - "orig_required": false - }, - "denoise_mask": { + "shift": { + "anyOf": [ + { + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Override the timestep shift (mu) for the sigma schedule. Leave blank to auto-calculate based on image dimensions (recommended). Lower values (~0.5) produce less noise shifting, higher values (~1.15) produce more.", + "field_kind": "input", + "input": "any", + "orig_default": null, + "orig_required": false, + "title": "Shift" + }, + "scheduler": { + "default": "euler", + "description": "Scheduler (sampler) for the denoising process. Euler is the default and recommended. Heun is 2nd-order (better quality, 2x slower). LCM works with Turbo only (not Base).", + "enum": ["euler", "heun", "lcm"], + "field_kind": "input", + "input": "any", + "orig_default": "euler", + "orig_required": false, + "title": "Scheduler", + "type": "string", + "ui_choice_labels": { + "euler": "Euler", + "heun": "Heun (2nd order)", + "lcm": "LCM" + } + }, + "type": { + "const": "z_image_denoise", + "default": "z_image_denoise", + "field_kind": "node_attribute", + "title": "type", + "type": "string" + } + }, + "required": ["type", "id"], + "tags": ["image", "z-image"], + "title": "Denoise - Z-Image", + "type": "object", + "version": "1.5.0", + "output": { + "$ref": "#/components/schemas/LatentsOutput" + } + }, + "ZImageDenoiseMetaInvocation": { + "category": "latents", + "class": "invocation", + "classification": "stable", + "description": "Run denoising process with a Z-Image transformer model + metadata.", + "node_pack": "invokeai", + "properties": { + "metadata": { + "anyOf": [ + { + "$ref": "#/components/schemas/MetadataField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Optional metadata to be saved with the image", + "field_kind": "internal", + "input": "connection", + "orig_required": false, + "ui_hidden": false + }, + "id": { + "description": "The id of this instance of an invocation. Must be unique among all instances of invocations.", + "field_kind": "node_attribute", + "title": "Id", + "type": "string" + }, + "is_intermediate": { + "default": false, + "description": "Whether or not this is an intermediate invocation.", + "field_kind": "node_attribute", + "input": "direct", + "orig_required": true, + "title": "Is Intermediate", + "type": "boolean", + "ui_hidden": false, + "ui_type": "IsIntermediate" + }, + "use_cache": { + "default": true, + "description": "Whether or not to use the cache", + "field_kind": "node_attribute", + "title": "Use Cache", + "type": "boolean" + }, + "latents": { + "anyOf": [ + { + "$ref": "#/components/schemas/LatentsField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Latents tensor", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false + }, + "denoise_mask": { + "anyOf": [ + { + "$ref": "#/components/schemas/DenoiseMaskField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved.", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false + }, + "denoising_start": { + "default": 0.0, + "description": "When to start denoising, expressed a percentage of total steps", + "field_kind": "input", + "input": "any", + "maximum": 1, + "minimum": 0, + "orig_default": 0.0, + "orig_required": false, + "title": "Denoising Start", + "type": "number" + }, + "denoising_end": { + "default": 1.0, + "description": "When to stop denoising, expressed a percentage of total steps", + "field_kind": "input", + "input": "any", + "maximum": 1, + "minimum": 0, + "orig_default": 1.0, + "orig_required": false, + "title": "Denoising End", + "type": "number" + }, + "add_noise": { + "default": true, + "description": "Add noise based on denoising start.", + "field_kind": "input", + "input": "any", + "orig_default": true, + "orig_required": false, + "title": "Add Noise", + "type": "boolean" + }, + "transformer": { + "anyOf": [ + { + "$ref": "#/components/schemas/TransformerField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Z-Image model (Transformer) to load", + "field_kind": "input", + "input": "connection", + "orig_required": true, + "title": "Transformer" + }, + "positive_conditioning": { + "anyOf": [ + { + "$ref": "#/components/schemas/ZImageConditioningField" + }, + { + "items": { + "$ref": "#/components/schemas/ZImageConditioningField" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Positive conditioning tensor", + "field_kind": "input", + "input": "connection", + "orig_required": true, + "title": "Positive Conditioning" + }, + "negative_conditioning": { + "anyOf": [ + { + "$ref": "#/components/schemas/ZImageConditioningField" + }, + { + "items": { + "$ref": "#/components/schemas/ZImageConditioningField" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Negative conditioning tensor", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false, + "title": "Negative Conditioning" + }, + "guidance_scale": { + "default": 1.0, + "description": "Guidance scale for classifier-free guidance. 1.0 = no CFG (recommended for Z-Image-Turbo). Values > 1.0 amplify guidance.", + "field_kind": "input", + "input": "any", + "minimum": 1.0, + "orig_default": 1.0, + "orig_required": false, + "title": "Guidance Scale", + "type": "number" + }, + "width": { + "default": 1024, + "description": "Width of the generated image.", + "field_kind": "input", + "input": "any", + "multipleOf": 16, + "orig_default": 1024, + "orig_required": false, + "title": "Width", + "type": "integer" + }, + "height": { + "default": 1024, + "description": "Height of the generated image.", + "field_kind": "input", + "input": "any", + "multipleOf": 16, + "orig_default": 1024, + "orig_required": false, + "title": "Height", + "type": "integer" + }, + "steps": { + "default": 8, + "description": "Number of denoising steps. 8 recommended for Z-Image-Turbo.", + "exclusiveMinimum": 0, + "field_kind": "input", + "input": "any", + "orig_default": 8, + "orig_required": false, + "title": "Steps", + "type": "integer" + }, + "seed": { + "default": 0, + "description": "Randomness seed for reproducibility.", + "field_kind": "input", + "input": "any", + "orig_default": 0, + "orig_required": false, + "title": "Seed", + "type": "integer" + }, + "control": { + "anyOf": [ + { + "$ref": "#/components/schemas/ZImageControlField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Z-Image control conditioning for spatial control (Canny, HED, Depth, Pose, MLSD).", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false + }, + "vae": { + "anyOf": [ + { + "$ref": "#/components/schemas/VAEField" + }, + { + "type": "null" + } + ], + "default": null, + "description": "VAE Required for control conditioning.", + "field_kind": "input", + "input": "connection", + "orig_default": null, + "orig_required": false + }, + "shift": { "anyOf": [ { - "$ref": "#/components/schemas/DenoiseMaskField" + "minimum": 0.0, + "type": "number" }, { "type": "null" } ], "default": null, - "description": "A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved.", - "field_kind": "input", - "input": "connection", - "orig_default": null, - "orig_required": false - }, - "denoising_start": { - "default": 0.0, - "description": "When to start denoising, expressed a percentage of total steps", + "description": "Override the timestep shift (mu) for the sigma schedule. Leave blank to auto-calculate based on image dimensions (recommended). Lower values (~0.5) produce less noise shifting, higher values (~1.15) produce more.", "field_kind": "input", "input": "any", - "maximum": 1, - "minimum": 0, - "orig_default": 0.0, - "orig_required": false, - "title": "Denoising Start", - "type": "number" - }, - "denoising_end": { - "default": 1.0, - "description": "When to stop denoising, expressed a percentage of total steps", - "field_kind": "input", - "input": "any", - "maximum": 1, - "minimum": 0, - "orig_default": 1.0, - "orig_required": false, - "title": "Denoising End", - "type": "number" - }, - "add_noise": { - "default": true, - "description": "Add noise based on denoising start.", - "field_kind": "input", - "input": "any", - "orig_default": true, - "orig_required": false, - "title": "Add Noise", - "type": "boolean" - }, - "transformer": { - "anyOf": [ - { - "$ref": "#/components/schemas/TransformerField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Z-Image model (Transformer) to load", - "field_kind": "input", - "input": "connection", - "orig_required": true, - "title": "Transformer" - }, - "positive_conditioning": { - "anyOf": [ - { - "$ref": "#/components/schemas/ZImageConditioningField" - }, - { - "items": { - "$ref": "#/components/schemas/ZImageConditioningField" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Positive conditioning tensor", - "field_kind": "input", - "input": "connection", - "orig_required": true, - "title": "Positive Conditioning" - }, - "negative_conditioning": { - "anyOf": [ - { - "$ref": "#/components/schemas/ZImageConditioningField" - }, - { - "items": { - "$ref": "#/components/schemas/ZImageConditioningField" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Negative conditioning tensor", - "field_kind": "input", - "input": "connection", "orig_default": null, "orig_required": false, - "title": "Negative Conditioning" - }, - "guidance_scale": { - "default": 1.0, - "description": "Guidance scale for classifier-free guidance. 1.0 = no CFG (recommended for Z-Image-Turbo). Values > 1.0 amplify guidance.", - "field_kind": "input", - "input": "any", - "minimum": 1.0, - "orig_default": 1.0, - "orig_required": false, - "title": "Guidance Scale", - "type": "number" - }, - "width": { - "default": 1024, - "description": "Width of the generated image.", - "field_kind": "input", - "input": "any", - "multipleOf": 16, - "orig_default": 1024, - "orig_required": false, - "title": "Width", - "type": "integer" - }, - "height": { - "default": 1024, - "description": "Height of the generated image.", - "field_kind": "input", - "input": "any", - "multipleOf": 16, - "orig_default": 1024, - "orig_required": false, - "title": "Height", - "type": "integer" - }, - "steps": { - "default": 8, - "description": "Number of denoising steps. 8 recommended for Z-Image-Turbo.", - "exclusiveMinimum": 0, - "field_kind": "input", - "input": "any", - "orig_default": 8, - "orig_required": false, - "title": "Steps", - "type": "integer" - }, - "seed": { - "default": 0, - "description": "Randomness seed for reproducibility.", - "field_kind": "input", - "input": "any", - "orig_default": 0, - "orig_required": false, - "title": "Seed", - "type": "integer" - }, - "control": { - "anyOf": [ - { - "$ref": "#/components/schemas/ZImageControlField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Z-Image control conditioning for spatial control (Canny, HED, Depth, Pose, MLSD).", - "field_kind": "input", - "input": "connection", - "orig_default": null, - "orig_required": false - }, - "vae": { - "anyOf": [ - { - "$ref": "#/components/schemas/VAEField" - }, - { - "type": "null" - } - ], - "default": null, - "description": "VAE Required for control conditioning.", - "field_kind": "input", - "input": "connection", - "orig_default": null, - "orig_required": false + "title": "Shift" }, "scheduler": { "default": "euler", - "description": "Scheduler (sampler) for the denoising process. Euler is the default and recommended for Z-Image-Turbo. Heun is 2nd-order (better quality, 2x slower). LCM is optimized for few steps.", + "description": "Scheduler (sampler) for the denoising process. Euler is the default and recommended. Heun is 2nd-order (better quality, 2x slower). LCM works with Turbo only (not Base).", "enum": ["euler", "heun", "lcm"], "field_kind": "input", "input": "any", @@ -61207,44 +68168,17 @@ "$ref": "#/components/schemas/ZImageConditioningOutput" } }, - "UserDTO": { - "type": "object", - "required": ["user_id", "email", "is_admin", "is_active"], - "properties": { - "user_id": { - "type": "string", - "title": "User Id", - "description": "The user ID" - }, - "email": { - "type": "string", - "title": "Email", - "description": "The user email" - }, - "display_name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Display Name", - "description": "The user display name" - }, - "is_admin": { - "type": "boolean", - "title": "Is Admin", - "description": "Whether the user is an admin" - }, - "is_active": { - "type": "boolean", - "title": "Is Active", - "description": "Whether the user is active" - } - }, - "title": "UserDTO" + "ZImageVariantType": { + "type": "string", + "enum": ["turbo", "zbase"], + "title": "ZImageVariantType", + "description": "Z-Image model variants." + } + }, + "securitySchemes": { + "HTTPBearer": { + "type": "http", + "scheme": "bearer" } } } diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 75c5ad6671f..80cc0a6f4f7 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -378,7 +378,16 @@ "promptHistory": "Prompt History", "clearHistory": "Clear History", "usePrompt": "Use prompt", - "searchPrompts": "Search..." + "searchPrompts": "Search...", + "imageToPrompt": "Image to Prompt", + "selectVisionModel": "Select Vision Model...", + "changeImage": "Change Image", + "uploadImage": "Upload Image", + "generatePrompt": "Generate Prompt", + "expandPromptWithLLM": "Expand Prompt with LLM", + "expandPrompt": "Expand Prompt", + "selectTextLLM": "Select Text LLM...", + "expand": "Expand" }, "queue": { "queue": "Queue", @@ -1256,6 +1265,7 @@ "settings": "Settings", "simpleModelPlaceholder": "URL or path to a local file or diffusers folder", "source": "Source", + "sourceUrl": "Source URL", "sigLip": "SigLIP", "spandrelImageToImage": "Image to Image (Spandrel)", "starterBundles": "Starter Bundles", @@ -1284,6 +1294,7 @@ }, "controlLora": "Control LoRA", "llavaOnevision": "LLaVA OneVision", + "textLLM": "Text LLM", "syncModels": "Sync Models", "syncModelsTooltip": "Identify and remove unused model files in the InvokeAI root directory.", "syncModelsDirectory": "Synchronize Models Directory", @@ -3111,6 +3122,26 @@ "switchOnStartDesc": "Switch on start", "switchOnFinish": "On Finish", "switchOnFinishDesc": "Switch on finish" + }, + "snapshot": { + "snapshots": "Save or Load Canvas Snapshot", + "saveSnapshot": "Save Snapshot", + "restoreSnapshot": "Restore Snapshot", + "snapshotNamePlaceholder": "Snapshot name", + "save": "Save", + "delete": "Delete", + "snapshotSaved": "Snapshot \"{{name}}\" saved", + "snapshotRestored": "Snapshot \"{{name}}\" restored", + "snapshotDeleted": "Snapshot \"{{name}}\" deleted", + "snapshotSaveFailed": "Failed to save snapshot", + "snapshotRestoreFailed": "Failed to restore snapshot", + "snapshotDeleteFailed": "Failed to delete snapshot", + "snapshotMissingImages_one": "{{count}} image referenced by this snapshot no longer exists and will appear as a placeholder", + "snapshotMissingImages_other": "{{count}} images referenced by this snapshot no longer exist and will appear as placeholders", + "snapshotIncompatible": "This snapshot was created with a different version and is no longer compatible", + "overwriteSnapshotTitle": "Overwrite snapshot?", + "overwriteSnapshotMessage": "A snapshot named \"{{name}}\" already exists. Overwrite it?", + "overwrite": "Overwrite" } }, "upscaling": { @@ -3197,6 +3228,8 @@ "queue": "Queue", "upscaling": "Upscaling", "upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)", + "customNodes": "Nodes", + "customNodesTab": "$t(ui.tabs.customNodes) $t(common.tab)", "gallery": "Gallery" }, "panels": { @@ -3324,6 +3357,10 @@ "whatsNew": { "whatsNewInInvoke": "What's New in Invoke", "items": [ + "LLM Prompt Tools: Use local language models to expand prompts or generate prompts from images. Install a Text LLM model (e.g. Qwen2.5-1.5B-Instruct) to get started.", + "FLUX.2 Klein Support: InvokeAI now supports the new FLUX.2 Klein models (4B and 9B variants) with GGUF, FP8, and Diffusers formats. Features include txt2img, img2img, inpainting, and outpainting. See 'Starter Models' to get started.", + "DyPE support for FLUX models improves high-resolution (>1536 px up to 4K) images. Go to the 'Advanced Options' section to activate.", + "Z-Image Turbo diversity: Active 'Seed Variance Enhancer' under 'Advanced Options' to add diversity to your ZiT gens.", "Multi-user mode supports multiple isolated users on the same server.", "Enhanced support for Z-Image and FLUX.2 Models.", "Multiple user interface enhancements and new canvas features." @@ -3360,5 +3397,35 @@ "description": "Deep dive sessions exploring advanced Invoke features, creative workflows, and community discussions." } } + }, + "customNodes": { + "title": "Custom Nodes", + "installTitle": "Install Node Pack", + "gitUrl": "Git Repository URL", + "gitUrlLabel": "Repository URL", + "gitUrlPlaceholder": "https://github.com/user/node-pack.git", + "install": "Install", + "installing": "Installing", + "installSuccess": "Node pack installed", + "installFailed": "Installation failed", + "installError": "An unexpected error occurred during installation.", + "securityWarning": "Custom nodes execute code on your system. Only install node packs from authors you trust. Malicious nodes could harm your system or compromise your data.", + "installDescription": "Clones the repository into your nodes directory. Workflow files (.json) are imported into your library. Python dependencies (requirements.txt or pyproject.toml) are NOT installed automatically — follow the node pack's documentation to install them manually.", + "dependenciesRequiredTitle": "Manual dependency install required", + "dependenciesRequiredDescription": "'{{name}}' includes a {{file}}. Follow the node pack's documentation to install its Python dependencies before using its nodes.", + "uninstall": "Uninstall", + "reload": "Reload", + "reloading": "Reloading", + "noNodePacks": "No custom node packs installed.", + "scanFolder": "Scan Folder", + "scanFolderDescription": "Node packs placed in the nodes directory are automatically detected at startup. Use the Reload button to detect newly added packs without restarting.", + "nodesDirectory": "Nodes directory", + "installQueue": "Install Log", + "queueEmpty": "No recent install activity.", + "name": "Name", + "message": "Message", + "nodeCount_one": "{{count}} node", + "nodeCount_other": "{{count}} nodes", + "uninstalled": "Uninstalled" } } diff --git a/invokeai/frontend/web/scripts/package.json b/invokeai/frontend/web/scripts/package.json index 3dbc1ca591c..985bcf7d652 100644 --- a/invokeai/frontend/web/scripts/package.json +++ b/invokeai/frontend/web/scripts/package.json @@ -1,3 +1,4 @@ { - "type": "module" + "type": "module", + "packageManager": "pnpm@10.12.4" } diff --git a/invokeai/frontend/web/src/features/controlLayers/components/Toolbar/CanvasToolbar.tsx b/invokeai/frontend/web/src/features/controlLayers/components/Toolbar/CanvasToolbar.tsx index fc34f4331c7..855659681d7 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/Toolbar/CanvasToolbar.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/Toolbar/CanvasToolbar.tsx @@ -16,6 +16,7 @@ import { CanvasToolbarRedoButton } from 'features/controlLayers/components/Toolb import { CanvasToolbarResetViewButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarResetViewButton'; import { CanvasToolbarSaveToGalleryButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarSaveToGalleryButton'; import { CanvasToolbarScale } from 'features/controlLayers/components/Toolbar/CanvasToolbarScale'; +import { CanvasToolbarSnapshotMenuButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarSnapshotMenuButton'; import { CanvasToolbarUndoButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarUndoButton'; import { useCanvasDeleteLayerHotkey } from 'features/controlLayers/hooks/useCanvasDeleteLayerHotkey'; import { useCanvasEntityQuickSwitchHotkey } from 'features/controlLayers/hooks/useCanvasEntityQuickSwitchHotkey'; @@ -77,6 +78,7 @@ export const CanvasToolbar = memo(() => { + diff --git a/invokeai/frontend/web/src/features/controlLayers/components/Toolbar/CanvasToolbarSnapshotMenuButton.tsx b/invokeai/frontend/web/src/features/controlLayers/components/Toolbar/CanvasToolbarSnapshotMenuButton.tsx new file mode 100644 index 00000000000..6fe3b23a82f --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/components/Toolbar/CanvasToolbarSnapshotMenuButton.tsx @@ -0,0 +1,238 @@ +import { + ConfirmationAlertDialog, + Flex, + IconButton, + Input, + Menu, + MenuButton, + MenuDivider, + MenuGroup, + MenuItem, + MenuList, + Text, + useDisclosure, +} from '@invoke-ai/ui-library'; +import type { SnapshotInfo } from 'features/controlLayers/hooks/useCanvasSnapshots'; +import { useCanvasSnapshots } from 'features/controlLayers/hooks/useCanvasSnapshots'; +import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice'; +import { toast } from 'features/toast/toast'; +import type { ChangeEvent, KeyboardEvent, MouseEvent } from 'react'; +import { memo, useCallback, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiCameraBold, PiFloppyDiskBold, PiTrashBold } from 'react-icons/pi'; + +const SnapshotItem = memo( + ({ + snapshot, + onRestore, + onDelete, + isRestoreDisabled, + }: { + snapshot: SnapshotInfo; + onRestore: (key: string, name: string) => void; + onDelete: (e: MouseEvent, key: string, name: string) => void; + isRestoreDisabled: boolean; + }) => { + const handleClick = useCallback(() => { + onRestore(snapshot.key, snapshot.name); + }, [onRestore, snapshot.key, snapshot.name]); + + const handleDelete = useCallback( + (e: MouseEvent) => { + onDelete(e, snapshot.key, snapshot.name); + }, + [onDelete, snapshot.key, snapshot.name] + ); + + return ( + + + + {snapshot.name} + + } + size="xs" + variant="ghost" + colorScheme="error" + onClick={handleDelete} + isDisabled={isRestoreDisabled} + /> + + + ); + } +); + +SnapshotItem.displayName = 'SnapshotItem'; + +const getDefaultSnapshotName = (): string => { + const now = new Date(); + const y = now.getFullYear(); + const mo = String(now.getMonth() + 1).padStart(2, '0'); + const d = String(now.getDate()).padStart(2, '0'); + const h = String(now.getHours()).padStart(2, '0'); + const mi = String(now.getMinutes()).padStart(2, '0'); + return `${y}-${mo}-${d} ${h}-${mi}`; +}; + +export const CanvasToolbarSnapshotMenuButton = memo(() => { + const { t } = useTranslation(); + const { snapshots, saveSnapshot, restoreSnapshot, deleteSnapshot } = useCanvasSnapshots(); + const isStaging = useCanvasIsStaging(); + const [snapshotName, setSnapshotName] = useState(''); + const overwriteDialog = useDisclosure(); + const [pendingOverwriteName, setPendingOverwriteName] = useState(null); + + const onNameChange = useCallback((e: ChangeEvent) => { + setSnapshotName(e.target.value); + }, []); + + const doSave = useCallback( + async (name: string) => { + const success = await saveSnapshot(name); + if (success) { + toast({ title: t('controlLayers.snapshot.snapshotSaved', { name }), status: 'info' }); + setSnapshotName(''); + } else { + toast({ title: t('controlLayers.snapshot.snapshotSaveFailed'), status: 'error' }); + } + }, + [saveSnapshot, t] + ); + + const onSave = useCallback(async () => { + const name = snapshotName.trim() || getDefaultSnapshotName(); + if (snapshots.some((s) => s.name === name)) { + setPendingOverwriteName(name); + overwriteDialog.onOpen(); + return; + } + await doSave(name); + }, [snapshotName, snapshots, doSave, overwriteDialog]); + + const onConfirmOverwrite = useCallback(() => { + if (pendingOverwriteName) { + doSave(pendingOverwriteName); + setPendingOverwriteName(null); + } + }, [pendingOverwriteName, doSave]); + + const onCloseOverwriteDialog = useCallback(() => { + setPendingOverwriteName(null); + overwriteDialog.onClose(); + }, [overwriteDialog]); + + const onKeyDown = useCallback( + (e: KeyboardEvent) => { + if (e.key === 'Enter') { + e.preventDefault(); + e.stopPropagation(); + onSave(); + } + }, + [onSave] + ); + + const onRestore = useCallback( + async (key: string, name: string) => { + const result = await restoreSnapshot(key); + if (result.success) { + if (result.missingImageCount && result.missingImageCount > 0) { + toast({ + title: t('controlLayers.snapshot.snapshotRestored', { name }), + description: t('controlLayers.snapshot.snapshotMissingImages', { count: result.missingImageCount }), + status: 'warning', + }); + } else { + toast({ title: t('controlLayers.snapshot.snapshotRestored', { name }), status: 'info' }); + } + } else if (result.error === 'incompatible') { + toast({ + title: t('controlLayers.snapshot.snapshotRestoreFailed'), + description: t('controlLayers.snapshot.snapshotIncompatible'), + status: 'error', + }); + } else { + toast({ title: t('controlLayers.snapshot.snapshotRestoreFailed'), status: 'error' }); + } + }, + [restoreSnapshot, t] + ); + + const onDelete = useCallback( + async (e: MouseEvent, key: string, name: string) => { + e.stopPropagation(); + const success = await deleteSnapshot(key); + if (success) { + toast({ title: t('controlLayers.snapshot.snapshotDeleted', { name }), status: 'info' }); + } else { + toast({ title: t('controlLayers.snapshot.snapshotDeleteFailed'), status: 'error' }); + } + }, + [deleteSnapshot, t] + ); + + return ( + <> + + } + variant="link" + alignSelf="stretch" + /> + + + + + } + size="sm" + onClick={onSave} + /> + + + {snapshots.length > 0 && ( + <> + + + {snapshots.map((snapshot) => ( + + ))} + + + )} + + + + {t('controlLayers.snapshot.overwriteSnapshotMessage', { name: pendingOverwriteName ?? '' })} + + + ); +}); + +CanvasToolbarSnapshotMenuButton.displayName = 'CanvasToolbarSnapshotMenuButton'; diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/useCanvasSnapshots.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/useCanvasSnapshots.ts new file mode 100644 index 00000000000..6352e23f4f0 --- /dev/null +++ b/invokeai/frontend/web/src/features/controlLayers/hooks/useCanvasSnapshots.ts @@ -0,0 +1,181 @@ +import { logger } from 'app/logging/logger'; +import { useAppDispatch, useAppStore } from 'app/store/storeHooks'; +import { canvasSnapshotRestored } from 'features/controlLayers/store/canvasSlice'; +import { selectCanvasSlice } from 'features/controlLayers/store/selectors'; +import { zCanvasState } from 'features/controlLayers/store/types'; +import { collectImageNames } from 'features/controlLayers/util/canvasProjectFile'; +import { useCallback, useMemo } from 'react'; +import { serializeError } from 'serialize-error'; +import { appInfoApi } from 'services/api/endpoints/appInfo'; +import { + clientStateApi, + useDeleteClientStateByKeyMutation, + useGetClientStateKeysByPrefixQuery, + useSetClientStateByKeyMutation, +} from 'services/api/endpoints/clientState'; +import { getImageDTOSafe } from 'services/api/endpoints/images'; +import type { JsonObject } from 'type-fest'; +import { z } from 'zod'; + +const log = logger('canvas'); + +const SNAPSHOT_PREFIX = 'canvas_snapshot:'; + +/** + * Quick health check to determine if the backend is reachable. + * Uses the existing appInfoApi RTKQ endpoint for consistency. + */ +const isBackendReachable = async (dispatch: ReturnType): Promise => { + const req = dispatch(appInfoApi.endpoints.getAppVersion.initiate(undefined, { subscribe: false })); + try { + await req.unwrap(); + return true; + } catch { + return false; + } finally { + req.unsubscribe(); + } +}; + +/** + * Check which image_names still exist on the server. + * Returns the list of missing image names. If the backend is unreachable, + * skips all checks and returns an empty array to avoid false warnings. + */ +const findMissingImages = async ( + imageNames: string[], + dispatch: ReturnType +): Promise => { + // Pre-flight: verify backend is reachable before checking individual images + if (!(await isBackendReachable(dispatch))) { + log.warn('Backend unreachable — skipping missing image check'); + return []; + } + + const results = await Promise.all( + imageNames.map(async (name) => { + const dto = await getImageDTOSafe(name); + return dto === null ? name : null; + }) + ); + return results.filter((name): name is string => name !== null); +}; + +export type SnapshotInfo = { + key: string; + name: string; +}; + +type RestoreResult = { + success: boolean; + missingImageCount?: number; + error?: 'incompatible' | 'not_found' | 'unknown'; +}; + +export const useCanvasSnapshots = () => { + const dispatch = useAppDispatch(); + const store = useAppStore(); + + const { data: keys } = useGetClientStateKeysByPrefixQuery(SNAPSHOT_PREFIX); + const [setClientState] = useSetClientStateByKeyMutation(); + const [deleteClientState] = useDeleteClientStateByKeyMutation(); + + const snapshots: SnapshotInfo[] = useMemo( + () => + (keys ?? []).map((key) => ({ + key, + name: key.slice(SNAPSHOT_PREFIX.length), + })), + [keys] + ); + + const saveSnapshot = useCallback( + async (name: string) => { + try { + const state = selectCanvasSlice(store.getState()); + const value = JSON.stringify(state); + const key = `${SNAPSHOT_PREFIX}${name}`; + await setClientState({ key, value }).unwrap(); + return true; + } catch (e) { + log.error({ error: serializeError(e) } as JsonObject, 'Failed to save snapshot'); + return false; + } + }, + [store, setClientState] + ); + + const restoreSnapshot = useCallback( + async (key: string): Promise => { + const req = dispatch(clientStateApi.endpoints.getClientStateByKey.initiate(key, { subscribe: false })); + try { + const raw = await req.unwrap(); + if (raw === null) { + throw new Error('Snapshot data not found'); + } + const parsed = JSON.parse(raw); + const canvasState = zCanvasState.parse(parsed); + + // Check for missing images before restoring + // Reuse the shared collectImageNames from canvasProjectFile — snapshots only + // contain canvas entities (no global ref images), so we pass an empty array. + const imageNames = collectImageNames( + { + rasterLayers: canvasState.rasterLayers.entities, + controlLayers: canvasState.controlLayers.entities, + inpaintMasks: canvasState.inpaintMasks.entities, + regionalGuidance: canvasState.regionalGuidance.entities, + bbox: canvasState.bbox, + selectedEntityIdentifier: canvasState.selectedEntityIdentifier, + bookmarkedEntityIdentifier: canvasState.bookmarkedEntityIdentifier, + }, + [] + ); + const imageNamesList = [...imageNames]; + const missingImages = imageNamesList.length > 0 ? await findMissingImages(imageNamesList, dispatch) : []; + + if (missingImages.length > 0) { + log.warn( + { missingCount: missingImages.length, total: imageNamesList.length } as unknown as JsonObject, + 'Snapshot references images that no longer exist' + ); + } + + dispatch(canvasSnapshotRestored(canvasState)); + return { success: true, missingImageCount: missingImages.length }; + } catch (e) { + log.error({ error: serializeError(e) } as JsonObject, 'Failed to restore snapshot'); + // Distinguish Zod validation errors (incompatible snapshot) from other failures + const isZodError = e instanceof z.ZodError; + const isNotFound = e instanceof Error && e.message === 'Snapshot data not found'; + return { + success: false, + error: isZodError ? 'incompatible' : isNotFound ? 'not_found' : 'unknown', + }; + } finally { + req.unsubscribe(); + } + }, + [dispatch] + ); + + const deleteSnapshot = useCallback( + async (key: string) => { + try { + await deleteClientState(key).unwrap(); + return true; + } catch (e) { + log.error({ error: serializeError(e) } as JsonObject, 'Failed to delete snapshot'); + return false; + } + }, + [deleteClientState] + ); + + return { + snapshots, + saveSnapshot, + restoreSnapshot, + deleteSnapshot, + }; +}; diff --git a/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts index dad599374ce..9e639c8e7af 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts @@ -1783,11 +1783,32 @@ const slice = createSlice({ state.controlLayers.entities = controlLayers; state.inpaintMasks.entities = inpaintMasks; state.regionalGuidance.entities = regionalGuidance; + // Preserve the current modelBase to avoid desync with the currently selected model + // (same pattern as canvasSnapshotRestored and resetState). + const currentModelBase = state.bbox.modelBase; state.bbox = bbox; + state.bbox.modelBase = currentModelBase; + syncScaledSize(state); state.selectedEntityIdentifier = selectedEntityIdentifier; state.bookmarkedEntityIdentifier = bookmarkedEntityIdentifier; return state; }, + canvasSnapshotRestored: (state, action: PayloadAction) => { + const snapshot = action.payload; + state.controlLayers = snapshot.controlLayers; + state.inpaintMasks = snapshot.inpaintMasks; + state.rasterLayers = snapshot.rasterLayers; + state.regionalGuidance = snapshot.regionalGuidance; + // Restore bbox from snapshot but preserve the current modelBase to avoid desync + // with the currently selected model (same pattern as resetState). + const currentModelBase = state.bbox.modelBase; + state.bbox = snapshot.bbox; + state.bbox.modelBase = currentModelBase; + syncScaledSize(state); + state.selectedEntityIdentifier = snapshot.selectedEntityIdentifier; + state.bookmarkedEntityIdentifier = snapshot.bookmarkedEntityIdentifier; + return state; + }, canvasUndo: () => {}, canvasRedo: () => {}, canvasClearHistory: () => {}, @@ -1869,6 +1890,7 @@ const resetState = (state: CanvasState) => { export const { canvasMetadataRecalled, + canvasSnapshotRestored, canvasProjectRecalled, canvasUndo, canvasRedo, @@ -1997,6 +2019,10 @@ const canvasUndoableConfig: UndoableOptions = { if (!action.type.startsWith(slice.name)) { return false; } + // Snapshot restore and project load replace the canvas state and should not be undoable + if (action.type === canvasSnapshotRestored.type || action.type === canvasProjectRecalled.type) { + return false; + } // Throttle rapid actions of the same type filter = actionsThrottlingFilter(action); return filter; diff --git a/invokeai/frontend/web/src/features/customNodes/CustomNodesInstallLog.tsx b/invokeai/frontend/web/src/features/customNodes/CustomNodesInstallLog.tsx new file mode 100644 index 00000000000..d8e53f45cc8 --- /dev/null +++ b/invokeai/frontend/web/src/features/customNodes/CustomNodesInstallLog.tsx @@ -0,0 +1,130 @@ +import type { SystemStyleObject } from '@invoke-ai/ui-library'; +import { Badge, Box, Button, Flex, Heading, Table, Tbody, Td, Text, Th, Thead, Tr } from '@invoke-ai/ui-library'; +import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent'; +import type { TFunction } from 'i18next'; +import { memo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiBroomBold } from 'react-icons/pi'; + +import type { InstallLogEntry } from './useCustomNodesInstallLog'; +import { useCustomNodesInstallLog } from './useCustomNodesInstallLog'; + +const tableSx: SystemStyleObject = { + '& tbody tr:nth-of-type(odd)': { + backgroundColor: 'rgba(255, 255, 255, 0.04)', + }, + '& tbody tr:nth-of-type(even)': { + backgroundColor: 'transparent', + }, + 'td, th': { + borderColor: 'base.700', + }, + th: { + position: 'sticky', + top: 0, + zIndex: 1, + backgroundColor: 'base.800', + py: 2, + }, + 'th:first-of-type': { + borderTopLeftRadius: 'base', + }, + 'th:last-of-type': { + borderTopRightRadius: 'base', + }, + 'tr:last-of-type td:first-of-type': { + borderBottomLeftRadius: 'base', + }, + 'tr:last-of-type td:last-of-type': { + borderBottomRightRadius: 'base', + }, +}; + +const getStatusColor = (status: InstallLogEntry['status']) => { + switch (status) { + case 'installing': + return 'invokeBlue'; + case 'completed': + return 'invokeGreen'; + case 'error': + return 'error'; + case 'uninstalled': + return 'invokeYellow'; + default: + return 'base'; + } +}; + +const getStatusLabel = (status: InstallLogEntry['status'], t: TFunction) => { + switch (status) { + case 'installing': + return t('customNodes.installing'); + case 'completed': + return t('queue.completed'); + case 'error': + return t('common.error'); + case 'uninstalled': + return t('customNodes.uninstalled'); + default: + return status; + } +}; + +export const CustomNodesInstallLog = memo(() => { + const { t } = useTranslation(); + const { log, clearLog } = useCustomNodesInstallLog(); + + return ( + + + {t('customNodes.installQueue')} + + + + + + + + + + + + + + + {log.length === 0 ? ( + + + + ) : ( + log.map((entry) => ( + + + + + + )) + )} + +
{t('customNodes.name')}{t('queue.status')}{t('customNodes.message')}
+ {t('customNodes.queueEmpty')} +
+ + {entry.name} + + + {getStatusLabel(entry.status, t)} + + + {entry.message} + +
+
+
+
+ ); +}); + +CustomNodesInstallLog.displayName = 'CustomNodesInstallLog'; diff --git a/invokeai/frontend/web/src/features/customNodes/CustomNodesInstallPane.tsx b/invokeai/frontend/web/src/features/customNodes/CustomNodesInstallPane.tsx new file mode 100644 index 00000000000..f1b488c0a4e --- /dev/null +++ b/invokeai/frontend/web/src/features/customNodes/CustomNodesInstallPane.tsx @@ -0,0 +1,66 @@ +import type { SystemStyleObject } from '@invoke-ai/ui-library'; +import { Box, Divider, Flex, Heading, Tab, TabList, TabPanel, TabPanels, Tabs } from '@invoke-ai/ui-library'; +import { memo, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiFolderOpenBold, PiLinkSimpleBold } from 'react-icons/pi'; + +import { CustomNodesInstallLog } from './CustomNodesInstallLog'; +import { InstallFromGitForm } from './InstallFromGitForm'; +import { ScanNodesForm } from './ScanNodesForm'; + +const paneSx: SystemStyleObject = { + layerStyle: 'first', + p: 4, + borderRadius: 'base', + w: { + base: '50%', + lg: '75%', + '2xl': '85%', + }, + h: 'full', + minWidth: '300px', + overflow: 'auto', +}; + +const installTabSx: SystemStyleObject = { + display: 'flex', + gap: 2, + px: 2, +}; + +export const CustomNodesInstallPane = memo(() => { + const { t } = useTranslation(); + const [tabIndex, setTabIndex] = useState(0); + + return ( + + {t('customNodes.installTitle')} + + + + + {t('customNodes.gitUrl')} + + + + {t('customNodes.scanFolder')} + + + + + + + + + + + + + + + + + ); +}); + +CustomNodesInstallPane.displayName = 'CustomNodesInstallPane'; diff --git a/invokeai/frontend/web/src/features/customNodes/CustomNodesList.tsx b/invokeai/frontend/web/src/features/customNodes/CustomNodesList.tsx new file mode 100644 index 00000000000..e7d1f518be0 --- /dev/null +++ b/invokeai/frontend/web/src/features/customNodes/CustomNodesList.tsx @@ -0,0 +1,109 @@ +import type { SystemStyleObject } from '@invoke-ai/ui-library'; +import { Badge, Button, Flex, Heading, Spinner, Text } from '@invoke-ai/ui-library'; +import { memo, useCallback } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiArrowClockwiseBold } from 'react-icons/pi'; +import { + useListCustomNodePacksQuery, + useReloadCustomNodesMutation, + useUninstallCustomNodePackMutation, +} from 'services/api/endpoints/customNodes'; + +const listSx: SystemStyleObject = { + flexDir: 'column', + p: 4, + gap: 4, + borderRadius: 'base', + w: '50%', + minWidth: '360px', + h: 'full', +}; + +type NodePackInfo = { + name: string; + path: string; + node_count: number; + node_types: string[]; +}; + +const NodePackItem = memo(({ pack }: { pack: NodePackInfo }) => { + const { t } = useTranslation(); + const [uninstallPack] = useUninstallCustomNodePackMutation(); + + const handleUninstall = useCallback(() => { + uninstallPack(pack.name); + }, [uninstallPack, pack.name]); + + return ( + + + {pack.name} + + + + {t('customNodes.nodeCount', { count: pack.node_count })} + {pack.node_types.map((nodeType) => ( + + {nodeType} + + ))} + + + {pack.path} + + + ); +}); + +NodePackItem.displayName = 'NodePackItem'; + +export const CustomNodesList = memo(() => { + const { t } = useTranslation(); + const { data, isLoading } = useListCustomNodePacksQuery(); + const [reloadNodes, { isLoading: isReloading }] = useReloadCustomNodesMutation(); + + const handleReload = useCallback(() => { + reloadNodes(); + }, [reloadNodes]); + + return ( + + + + {t('customNodes.title')} + + + + + + {isLoading && ( + + + + )} + + {data && data.node_packs.length === 0 && ( + + {t('customNodes.noNodePacks')} + + )} + + {data?.node_packs.map((pack) => ( + + ))} + + + ); +}); + +CustomNodesList.displayName = 'CustomNodesList'; diff --git a/invokeai/frontend/web/src/features/customNodes/InstallFromGitForm.tsx b/invokeai/frontend/web/src/features/customNodes/InstallFromGitForm.tsx new file mode 100644 index 00000000000..1c1e9f5bf13 --- /dev/null +++ b/invokeai/frontend/web/src/features/customNodes/InstallFromGitForm.tsx @@ -0,0 +1,99 @@ +import { + Alert, + AlertDescription, + AlertIcon, + Button, + Flex, + FormControl, + FormHelperText, + FormLabel, + Input, +} from '@invoke-ai/ui-library'; +import { toast } from 'features/toast/toast'; +import type { ChangeEvent, KeyboardEvent } from 'react'; +import { memo, useCallback, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useInstallCustomNodePackMutation } from 'services/api/endpoints/customNodes'; + +import { useCustomNodesInstallLog } from './useCustomNodesInstallLog'; + +export const InstallFromGitForm = memo(() => { + const { t } = useTranslation(); + const [source, setSource] = useState(''); + const [installPack, { isLoading }] = useInstallCustomNodePackMutation(); + const { addLogEntry } = useCustomNodesInstallLog(); + + const handleInstall = useCallback(async () => { + if (!source.trim()) { + return; + } + + const trimmedSource = source.trim(); + addLogEntry({ name: trimmedSource, status: 'installing' }); + + try { + const result = await installPack({ source: trimmedSource }).unwrap(); + if (result.success) { + addLogEntry({ name: result.name, status: 'completed', message: result.message }); + setSource(''); + if (result.requires_dependencies) { + toast({ + id: `custom-nodes-deps-${result.name}`, + title: t('customNodes.dependenciesRequiredTitle'), + description: t('customNodes.dependenciesRequiredDescription', { + name: result.name, + file: result.dependency_file ?? 'requirements.txt', + }), + status: 'warning', + duration: null, + isClosable: true, + }); + } + } else { + addLogEntry({ name: result.name, status: 'error', message: result.message }); + } + } catch { + addLogEntry({ name: trimmedSource, status: 'error', message: t('customNodes.installError') }); + } + }, [source, installPack, addLogEntry, t]); + + const handleSourceChange = useCallback((e: ChangeEvent) => { + setSource(e.target.value); + }, []); + + const handleKeyDown = useCallback( + (e: KeyboardEvent) => { + if (e.key === 'Enter') { + handleInstall(); + } + }, + [handleInstall] + ); + + return ( + + + + {t('customNodes.securityWarning')} + + + + {t('customNodes.gitUrlLabel')} + + + + + {t('customNodes.installDescription')} + + + ); +}); + +InstallFromGitForm.displayName = 'InstallFromGitForm'; diff --git a/invokeai/frontend/web/src/features/customNodes/ScanNodesForm.tsx b/invokeai/frontend/web/src/features/customNodes/ScanNodesForm.tsx new file mode 100644 index 00000000000..e53b64f2514 --- /dev/null +++ b/invokeai/frontend/web/src/features/customNodes/ScanNodesForm.tsx @@ -0,0 +1,24 @@ +import { Flex, Text } from '@invoke-ai/ui-library'; +import { memo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { useListCustomNodePacksQuery } from 'services/api/endpoints/customNodes'; + +export const ScanNodesForm = memo(() => { + const { t } = useTranslation(); + const { data } = useListCustomNodePacksQuery(); + + return ( + + + {t('customNodes.scanFolderDescription')} + + {data?.custom_nodes_path && ( + + {t('customNodes.nodesDirectory')}: {data.custom_nodes_path} + + )} + + ); +}); + +ScanNodesForm.displayName = 'ScanNodesForm'; diff --git a/invokeai/frontend/web/src/features/customNodes/useCustomNodesInstallLog.test.ts b/invokeai/frontend/web/src/features/customNodes/useCustomNodesInstallLog.test.ts new file mode 100644 index 00000000000..6e485e19dcb --- /dev/null +++ b/invokeai/frontend/web/src/features/customNodes/useCustomNodesInstallLog.test.ts @@ -0,0 +1,87 @@ +import { beforeEach, describe, expect, it } from 'vitest'; + +import { _resetIdCounter, $installLog, addInstallLogEntry, clearInstallLog } from './useCustomNodesInstallLog'; + +describe('Install Log Store', () => { + beforeEach(() => { + clearInstallLog(); + _resetIdCounter(); + }); + + it('starts with an empty log', () => { + expect($installLog.get()).toEqual([]); + }); + + it('adds an entry to the log', () => { + addInstallLogEntry({ name: 'test-pack', status: 'installing' }); + const log = $installLog.get(); + expect(log).toHaveLength(1); + expect(log[0]!.name).toBe('test-pack'); + expect(log[0]!.status).toBe('installing'); + expect(log[0]!.id).toBe('0'); + }); + + it('assigns incrementing IDs', () => { + addInstallLogEntry({ name: 'pack-1', status: 'installing' }); + addInstallLogEntry({ name: 'pack-2', status: 'completed' }); + const log = $installLog.get(); + // Newest first + expect(log[0]!.id).toBe('1'); + expect(log[1]!.id).toBe('0'); + }); + + it('prepends new entries (newest first)', () => { + addInstallLogEntry({ name: 'first', status: 'installing' }); + addInstallLogEntry({ name: 'second', status: 'completed' }); + addInstallLogEntry({ name: 'third', status: 'error' }); + const log = $installLog.get(); + expect(log[0]!.name).toBe('third'); + expect(log[1]!.name).toBe('second'); + expect(log[2]!.name).toBe('first'); + }); + + it('includes a timestamp', () => { + const before = Date.now(); + addInstallLogEntry({ name: 'pack', status: 'installing' }); + const after = Date.now(); + const entry = $installLog.get()[0]!; + expect(entry.timestamp).toBeGreaterThanOrEqual(before); + expect(entry.timestamp).toBeLessThanOrEqual(after); + }); + + it('preserves the message field', () => { + addInstallLogEntry({ name: 'pack', status: 'error', message: 'Something went wrong' }); + expect($installLog.get()[0]!.message).toBe('Something went wrong'); + }); + + it('allows message to be undefined', () => { + addInstallLogEntry({ name: 'pack', status: 'completed' }); + expect($installLog.get()[0]!.message).toBeUndefined(); + }); + + it('clears the log', () => { + addInstallLogEntry({ name: 'pack-1', status: 'installing' }); + addInstallLogEntry({ name: 'pack-2', status: 'completed' }); + expect($installLog.get()).toHaveLength(2); + + clearInstallLog(); + expect($installLog.get()).toEqual([]); + }); + + it('supports all status types', () => { + const statuses = ['installing', 'completed', 'error', 'uninstalled'] as const; + for (const status of statuses) { + addInstallLogEntry({ name: `pack-${status}`, status }); + } + const log = $installLog.get(); + expect(log).toHaveLength(4); + expect(log.map((e) => e.status).sort()).toEqual(['completed', 'error', 'installing', 'uninstalled']); + }); + + it('returns the created entry', () => { + const entry = addInstallLogEntry({ name: 'my-pack', status: 'installing' }); + expect(entry.name).toBe('my-pack'); + expect(entry.id).toBeDefined(); + expect(entry.timestamp).toBeDefined(); + }); +}); diff --git a/invokeai/frontend/web/src/features/customNodes/useCustomNodesInstallLog.ts b/invokeai/frontend/web/src/features/customNodes/useCustomNodesInstallLog.ts new file mode 100644 index 00000000000..65a37edb133 --- /dev/null +++ b/invokeai/frontend/web/src/features/customNodes/useCustomNodesInstallLog.ts @@ -0,0 +1,46 @@ +import { useStore } from '@nanostores/react'; +import { atom } from 'nanostores'; +import { useCallback } from 'react'; + +export type InstallLogEntry = { + id: string; + name: string; + status: 'installing' | 'completed' | 'error' | 'uninstalled'; + message?: string; + timestamp: number; +}; + +export const $installLog = atom([]); + +let nextId = 0; + +/** + * Resets the internal ID counter. Only for testing. + */ +export const _resetIdCounter = () => { + nextId = 0; +}; + +export const addInstallLogEntry = (entry: Omit): InstallLogEntry => { + const newEntry: InstallLogEntry = { + ...entry, + id: String(nextId++), + timestamp: Date.now(), + }; + $installLog.set([newEntry, ...$installLog.get()]); + return newEntry; +}; + +export const clearInstallLog = () => { + $installLog.set([]); +}; + +export const useCustomNodesInstallLog = () => { + const log = useStore($installLog); + + const addLogEntry = useCallback((entry: Omit) => { + addInstallLogEntry(entry); + }, []); + + return { log, addLogEntry, clearLog: clearInstallLog }; +}; diff --git a/invokeai/frontend/web/src/features/customNodes/useIsCustomNodesEnabled.test.ts b/invokeai/frontend/web/src/features/customNodes/useIsCustomNodesEnabled.test.ts new file mode 100644 index 00000000000..5f9a882de14 --- /dev/null +++ b/invokeai/frontend/web/src/features/customNodes/useIsCustomNodesEnabled.test.ts @@ -0,0 +1,88 @@ +import { describe, expect, it } from 'vitest'; + +import { deriveCustomNodesPermission, getIsCustomNodesEnabled } from './useIsCustomNodesEnabled'; + +describe('getIsCustomNodesEnabled', () => { + it('returns true in single-user mode regardless of admin status', () => { + expect(getIsCustomNodesEnabled(false, false)).toBe(true); + expect(getIsCustomNodesEnabled(false, true)).toBe(true); + expect(getIsCustomNodesEnabled(false, undefined)).toBe(true); + }); + + it('returns true in multiuser mode for admin users', () => { + expect(getIsCustomNodesEnabled(true, true)).toBe(true); + }); + + it('returns false in multiuser mode for non-admin users', () => { + expect(getIsCustomNodesEnabled(true, false)).toBe(false); + }); + + it('returns false in multiuser mode when user is not yet loaded', () => { + expect(getIsCustomNodesEnabled(true, undefined)).toBe(false); + }); +}); + +/** + * Permission-state tests. + * + * These call deriveCustomNodesPermission directly — the same function the hook + * uses internally — so the test and the hook can never drift. The contract is: + * loading (setupStatus undefined) -> { isKnown: false, isAllowed: false } + * resolved -> { isKnown: true, isAllowed: getIsCustomNodesEnabled(...) } + * + * Consumers read this state: + * VerticalNavBar shows tab only when isAllowed + * AppContent redirects only when isKnown && !isAllowed + */ +describe('deriveCustomNodesPermission', () => { + it('returns unknown/denied while setupStatus is still loading', () => { + expect(deriveCustomNodesPermission(undefined, undefined)).toEqual({ isKnown: false, isAllowed: false }); + expect(deriveCustomNodesPermission(undefined, null)).toEqual({ isKnown: false, isAllowed: false }); + expect(deriveCustomNodesPermission(undefined, { is_admin: false })).toEqual({ isKnown: false, isAllowed: false }); + expect(deriveCustomNodesPermission(undefined, { is_admin: true })).toEqual({ isKnown: false, isAllowed: false }); + }); + + it('resolves to known/allowed in single-user mode regardless of user', () => { + expect(deriveCustomNodesPermission({ multiuser_enabled: false }, undefined)).toEqual({ + isKnown: true, + isAllowed: true, + }); + expect(deriveCustomNodesPermission({ multiuser_enabled: false }, null)).toEqual({ + isKnown: true, + isAllowed: true, + }); + expect(deriveCustomNodesPermission({ multiuser_enabled: false }, { is_admin: false })).toEqual({ + isKnown: true, + isAllowed: true, + }); + }); + + it('resolves to known/allowed for multiuser admin', () => { + expect(deriveCustomNodesPermission({ multiuser_enabled: true }, { is_admin: true })).toEqual({ + isKnown: true, + isAllowed: true, + }); + }); + + it('resolves to known/denied for multiuser non-admin or missing user', () => { + expect(deriveCustomNodesPermission({ multiuser_enabled: true }, { is_admin: false })).toEqual({ + isKnown: true, + isAllowed: false, + }); + expect(deriveCustomNodesPermission({ multiuser_enabled: true }, null)).toEqual({ + isKnown: true, + isAllowed: false, + }); + expect(deriveCustomNodesPermission({ multiuser_enabled: true }, undefined)).toEqual({ + isKnown: true, + isAllowed: false, + }); + }); + + it('non-admin multiuser user never sees isAllowed=true in any state', () => { + // Regression: during loading AND after resolution, a non-admin in multiuser + // mode must never get isAllowed=true, so the tab never renders content. + expect(deriveCustomNodesPermission(undefined, { is_admin: false }).isAllowed).toBe(false); + expect(deriveCustomNodesPermission({ multiuser_enabled: true }, { is_admin: false }).isAllowed).toBe(false); + }); +}); diff --git a/invokeai/frontend/web/src/features/customNodes/useIsCustomNodesEnabled.ts b/invokeai/frontend/web/src/features/customNodes/useIsCustomNodesEnabled.ts new file mode 100644 index 00000000000..926e51d5474 --- /dev/null +++ b/invokeai/frontend/web/src/features/customNodes/useIsCustomNodesEnabled.ts @@ -0,0 +1,65 @@ +import { useAppSelector } from 'app/store/storeHooks'; +import { selectCurrentUser } from 'features/auth/store/authSlice'; +import { useMemo } from 'react'; +import { useGetSetupStatusQuery } from 'services/api/endpoints/auth'; + +/** + * Pure decision function: determines whether custom node management is enabled. + * + * Returns true if: + * - Multiuser mode is disabled (single-user mode = always admin) + * - Multiuser mode is enabled AND user is an admin + * + * Returns false if: + * - Multiuser mode is enabled AND user is not an admin + */ +export const getIsCustomNodesEnabled = (multiuserEnabled: boolean, isAdmin: boolean | undefined): boolean => { + if (!multiuserEnabled) { + return true; + } + return isAdmin ?? false; +}; + +type CustomNodesPermission = { + /** Whether setup status has loaded and a permission decision can be made. */ + isKnown: boolean; + /** Whether the current user is allowed to access custom node management. + * Only meaningful when isKnown is true; defaults to false while loading. */ + isAllowed: boolean; +}; + +/** Minimal shapes the derivation needs — matches the runtime types from auth slice + RTK Query. */ +type SetupStatusLike = { multiuser_enabled: boolean } | undefined; +type UserLike = { is_admin: boolean } | null | undefined; + +/** + * Pure derivation of the permission state from the raw inputs the hook reads. + * Both the hook and the tests consume this directly so the two can never drift. + * + * - loading (setupStatus undefined) -> { isKnown: false, isAllowed: false } + * - resolved (setupStatus defined) -> { isKnown: true, isAllowed: getIsCustomNodesEnabled(...) } + */ +export const deriveCustomNodesPermission = (setupStatus: SetupStatusLike, user: UserLike): CustomNodesPermission => { + if (!setupStatus) { + return { isKnown: false, isAllowed: false }; + } + return { isKnown: true, isAllowed: getIsCustomNodesEnabled(setupStatus.multiuser_enabled, user?.is_admin) }; +}; + +/** + * Hook that returns two-state permission info for custom node management. + * + * - `isKnown`: false while setupStatus is still loading; true once resolved. + * - `isAllowed`: the actual permission decision (only trustworthy when isKnown is true). + * + * Consumers use these separately: + * - **VerticalNavBar**: show the tab only when `isAllowed` (conservative — hidden while loading). + * - **AppContent redirect**: only redirect away once `isKnown && !isAllowed` (avoids kicking + * a legitimate single-user session off a persisted customNodes tab before the query resolves). + */ +export const useIsCustomNodesEnabled = (): CustomNodesPermission => { + const user = useAppSelector(selectCurrentUser); + const { data: setupStatus } = useGetSetupStatusQuery(); + + return useMemo(() => deriveCustomNodesPermission(setupStatus, user), [setupStatus, user]); +}; diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx index 2d37a03f69f..c05a2df84fa 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx @@ -14,6 +14,7 @@ import { useListAllBoardsQuery } from 'services/api/endpoints/boards'; import AddBoardButton from './AddBoardButton'; import GalleryBoard from './GalleryBoard'; import NoBoardBoard from './NoBoardBoard'; +import { VirtualBoardSection } from './VirtualBoardSection'; export const BoardsList = memo(() => { const { t } = useTranslation(); @@ -40,6 +41,7 @@ export const BoardsList = memo(() => { if (!boardSearchText.length) { elements.push(); + elements.push(); } filteredBoards.forEach((board) => { diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/VirtualBoardItem.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/VirtualBoardItem.tsx new file mode 100644 index 00000000000..d85c90f7dc1 --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/VirtualBoardItem.tsx @@ -0,0 +1,96 @@ +import type { SystemStyleObject } from '@invoke-ai/ui-library'; +import { Box, Flex, Icon, Image, Text, Tooltip } from '@invoke-ai/ui-library'; +import { skipToken } from '@reduxjs/toolkit/query'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { selectSelectedBoardId } from 'features/gallery/store/gallerySelectors'; +import { boardIdSelected } from 'features/gallery/store/gallerySlice'; +import { memo, useCallback } from 'react'; +import { PiCalendarBold, PiImageSquare } from 'react-icons/pi'; +import { useGetImageDTOQuery } from 'services/api/endpoints/images'; +import type { VirtualSubBoard } from 'services/api/endpoints/virtual_boards'; + +const _hover: SystemStyleObject = { + bg: 'base.850', +}; + +interface VirtualBoardItemProps { + board: VirtualSubBoard; +} + +const VirtualBoardItem = ({ board }: VirtualBoardItemProps) => { + const dispatch = useAppDispatch(); + const selectedBoardId = useAppSelector(selectSelectedBoardId); + const isSelected = selectedBoardId === board.virtual_board_id; + + const onClick = useCallback(() => { + if (selectedBoardId !== board.virtual_board_id) { + dispatch(boardIdSelected({ boardId: board.virtual_board_id })); + } + }, [selectedBoardId, board.virtual_board_id, dispatch]); + + return ( + + + + + + + {board.board_name} + + + + + + {board.image_count} | {board.asset_count} + + + + + + ); +}; + +export default memo(VirtualBoardItem); + +const CoverImage = ({ coverImageName }: { coverImageName: string | null }) => { + const { currentData: coverImage } = useGetImageDTOQuery(coverImageName ?? skipToken); + + if (coverImage) { + return ( + + ); + } + + return ( + + + + ); +}; diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/VirtualBoardSection.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/VirtualBoardSection.tsx new file mode 100644 index 00000000000..bdadaf77fda --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/VirtualBoardSection.tsx @@ -0,0 +1,62 @@ +import { Collapse, Flex, Icon, IconButton, Text } from '@invoke-ai/ui-library'; +import { createSelector } from '@reduxjs/toolkit'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { selectGallerySlice, virtualBoardsSectionOpenChanged } from 'features/gallery/store/gallerySlice'; +import { memo, useCallback } from 'react'; +import { PiCalendarBold, PiCaretDownBold, PiCaretRightBold } from 'react-icons/pi'; +import { useListVirtualBoardsByDateQuery } from 'services/api/endpoints/virtual_boards'; + +import VirtualBoardItem from './VirtualBoardItem'; + +const selectShowVirtualBoards = createSelector(selectGallerySlice, (gallery) => gallery.showVirtualBoards); +const selectVirtualBoardsSectionOpen = createSelector( + selectGallerySlice, + (gallery) => gallery.virtualBoardsSectionOpen +); + +export const VirtualBoardSection = memo(() => { + const dispatch = useAppDispatch(); + const showVirtualBoards = useAppSelector(selectShowVirtualBoards); + const isOpen = useAppSelector(selectVirtualBoardsSectionOpen); + + const { data: virtualBoards } = useListVirtualBoardsByDateQuery(undefined, { + skip: !showVirtualBoards, + }); + + const toggleOpen = useCallback(() => { + dispatch(virtualBoardsSectionOpenChanged(!isOpen)); + }, [dispatch, isOpen]); + + if (!showVirtualBoards || !virtualBoards?.length) { + return null; + } + + return ( + + + + + + By Date + + + : } + onClick={toggleOpen} + /> + + + + {virtualBoards.map((board) => ( + + ))} + + + + ); +}); + +VirtualBoardSection.displayName = 'VirtualBoardSection'; diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsSettingsPopover.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsSettingsPopover.tsx index 3fef611f99b..814595e7f2e 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsSettingsPopover.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsSettingsPopover.tsx @@ -13,6 +13,7 @@ import { import BoardAutoAddSelect from 'features/gallery/components/Boards/BoardAutoAddSelect'; import AutoAssignBoardCheckbox from 'features/gallery/components/GallerySettingsPopover/AutoAssignBoardCheckbox'; import ShowArchivedBoardsCheckbox from 'features/gallery/components/GallerySettingsPopover/ShowArchivedBoardsCheckbox'; +import ShowVirtualBoardsCheckbox from 'features/gallery/components/GallerySettingsPopover/ShowVirtualBoardsCheckbox'; import { memo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiGearSixFill } from 'react-icons/pi'; @@ -47,6 +48,7 @@ export const BoardsSettingsPopover = memo(() => { + diff --git a/invokeai/frontend/web/src/features/gallery/components/GallerySettingsPopover/ShowVirtualBoardsCheckbox.tsx b/invokeai/frontend/web/src/features/gallery/components/GallerySettingsPopover/ShowVirtualBoardsCheckbox.tsx new file mode 100644 index 00000000000..29e3e7ab3ce --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/GallerySettingsPopover/ShowVirtualBoardsCheckbox.tsx @@ -0,0 +1,29 @@ +import { Checkbox, FormControl, FormLabel } from '@invoke-ai/ui-library'; +import { createSelector } from '@reduxjs/toolkit'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { selectGallerySlice, showVirtualBoardsChanged } from 'features/gallery/store/gallerySlice'; +import type { ChangeEvent } from 'react'; +import { memo, useCallback } from 'react'; + +const selectShowVirtualBoards = createSelector(selectGallerySlice, (gallery) => gallery.showVirtualBoards); + +const ShowVirtualBoardsCheckbox = () => { + const dispatch = useAppDispatch(); + const showVirtualBoards = useAppSelector(selectShowVirtualBoards); + + const onChange = useCallback( + (e: ChangeEvent) => { + dispatch(showVirtualBoardsChanged(e.target.checked)); + }, + [dispatch] + ); + + return ( + + Virtual Boards + + + ); +}; + +export default memo(ShowVirtualBoardsCheckbox); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx index e123d0ebd06..105ad3dfd67 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx @@ -63,6 +63,8 @@ export const ImageMetadataActions = memo((props: Props) => { + +
); diff --git a/invokeai/frontend/web/src/features/gallery/components/use-gallery-image-names.ts b/invokeai/frontend/web/src/features/gallery/components/use-gallery-image-names.ts index c81728a1b21..487c5609062 100644 --- a/invokeai/frontend/web/src/features/gallery/components/use-gallery-image-names.ts +++ b/invokeai/frontend/web/src/features/gallery/components/use-gallery-image-names.ts @@ -1,21 +1,61 @@ +import { skipToken } from '@reduxjs/toolkit/query'; import { EMPTY_ARRAY } from 'app/store/constants'; import { useAppSelector } from 'app/store/storeHooks'; -import { selectGetImageNamesQueryArgs } from 'features/gallery/store/gallerySelectors'; +import { selectGetImageNamesQueryArgs, selectSelectedBoardId } from 'features/gallery/store/gallerySelectors'; +import { getDateFromVirtualBoardId, isVirtualBoardId } from 'features/gallery/store/types'; import { useGetImageNamesQuery } from 'services/api/endpoints/images'; +import { useGetVirtualBoardImageNamesByDateQuery } from 'services/api/endpoints/virtual_boards'; import { useDebounce } from 'use-debounce'; -const getImageNamesQueryOptions = { +const selectFromResult = ({ + currentData, + isLoading, + isFetching, +}: { + currentData?: { image_names: string[] }; + isLoading: boolean; + isFetching: boolean; +}) => ({ + imageNames: currentData?.image_names ?? EMPTY_ARRAY, + isLoading, + isFetching, +}); + +const queryOptions = { refetchOnReconnect: true, - selectFromResult: ({ currentData, isLoading, isFetching }) => ({ - imageNames: currentData?.image_names ?? EMPTY_ARRAY, - isLoading, - isFetching, - }), -} satisfies Parameters[1]; + selectFromResult, +}; export const useGalleryImageNames = () => { + const selectedBoardId = useAppSelector(selectSelectedBoardId); const _queryArgs = useAppSelector(selectGetImageNamesQueryArgs); const [queryArgs] = useDebounce(_queryArgs, 300); - const { imageNames, isLoading, isFetching } = useGetImageNamesQuery(queryArgs, getImageNamesQueryOptions); - return { imageNames, isLoading, isFetching, queryArgs }; + const isVirtual = isVirtualBoardId(selectedBoardId); + + // Regular board query + const regularResult = useGetImageNamesQuery(isVirtual ? skipToken : queryArgs, queryOptions); + + // Virtual board query + const date = isVirtual ? getDateFromVirtualBoardId(selectedBoardId) : ''; + const virtualResult = useGetVirtualBoardImageNamesByDateQuery( + isVirtual + ? { + date, + categories: queryArgs.categories ?? undefined, + search_term: queryArgs.search_term || undefined, + order_dir: queryArgs.order_dir, + starred_first: queryArgs.starred_first, + } + : skipToken, + queryOptions + ); + + const result = isVirtual ? virtualResult : regularResult; + + return { + imageNames: result.imageNames, + isLoading: result.isLoading, + isFetching: result.isFetching, + queryArgs, + }; }; diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index 6a25caadce4..e4894b60766 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -12,6 +12,7 @@ import { type ComparisonMode, type GalleryState, type GalleryView, + isVirtualBoardId, type OrderDir, zGalleryState, } from './types'; @@ -33,6 +34,8 @@ const getInitialState = (): GalleryState => ({ comparisonMode: 'slider', comparisonFit: 'fill', shouldShowArchivedBoards: false, + showVirtualBoards: false, + virtualBoardsSectionOpen: true, boardsListOrderBy: 'created_at', boardsListOrderDir: 'DESC', }); @@ -103,6 +106,10 @@ const slice = createSlice({ state.autoAddBoardId = 'none'; return; } + // Virtual boards cannot be auto-add targets + if (isVirtualBoardId(action.payload)) { + return; + } state.autoAddBoardId = action.payload; }, galleryViewChanged: (state, action: PayloadAction) => { @@ -127,6 +134,17 @@ const slice = createSlice({ shouldShowArchivedBoardsChanged: (state, action: PayloadAction) => { state.shouldShowArchivedBoards = action.payload; }, + showVirtualBoardsChanged: (state, action: PayloadAction) => { + state.showVirtualBoards = action.payload; + // If virtual boards are hidden and a virtual board is selected, reset to 'none' + if (!action.payload && isVirtualBoardId(state.selectedBoardId)) { + state.selectedBoardId = 'none'; + state.selection = []; + } + }, + virtualBoardsSectionOpenChanged: (state, action: PayloadAction) => { + state.virtualBoardsSectionOpen = action.payload; + }, starredFirstChanged: (state, action: PayloadAction) => { state.starredFirst = action.payload; }, @@ -172,6 +190,8 @@ export const { orderDirChanged, starredFirstChanged, shouldShowArchivedBoardsChanged, + showVirtualBoardsChanged, + virtualBoardsSectionOpenChanged, searchTermChanged, boardsListOrderByChanged, boardsListOrderDirChanged, @@ -189,6 +209,13 @@ export const gallerySliceConfig: SliceConfig = { if (!('_version' in state)) { state._version = 1; } + // Add virtual boards fields if missing (added in virtual boards feature) + if (!('showVirtualBoards' in state)) { + state.showVirtualBoards = false; + } + if (!('virtualBoardsSectionOpen' in state)) { + state.virtualBoardsSectionOpen = true; + } return zGalleryState.parse(state); }, persistDenylist: ['selection', 'galleryView', 'imageToCompare'], diff --git a/invokeai/frontend/web/src/features/gallery/store/types.ts b/invokeai/frontend/web/src/features/gallery/store/types.ts index addeefe870f..c040e5834d7 100644 --- a/invokeai/frontend/web/src/features/gallery/store/types.ts +++ b/invokeai/frontend/web/src/features/gallery/store/types.ts @@ -35,8 +35,16 @@ export const zGalleryState = z.object({ comparisonMode: zComparisonMode, comparisonFit: zComparisonFit, shouldShowArchivedBoards: z.boolean(), + showVirtualBoards: z.boolean(), + virtualBoardsSectionOpen: z.boolean(), boardsListOrderBy: zBoardRecordOrderBy, boardsListOrderDir: zOrderDir, }); export type GalleryState = z.infer; + +const VIRTUAL_BOARD_ID_PREFIX = 'by_date:'; + +export const isVirtualBoardId = (id: string): boolean => id.startsWith(VIRTUAL_BOARD_ID_PREFIX); + +export const getDateFromVirtualBoardId = (id: string): string => id.replace(VIRTUAL_BOARD_ID_PREFIX, ''); diff --git a/invokeai/frontend/web/src/features/metadata/parsing.test.tsx b/invokeai/frontend/web/src/features/metadata/parsing.test.tsx new file mode 100644 index 00000000000..bb295303273 --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/parsing.test.tsx @@ -0,0 +1,174 @@ +import type { AppStore } from 'app/store/store'; +import type * as paramsSliceModule from 'features/controlLayers/store/paramsSlice'; +import { ImageMetadataHandlers } from 'features/metadata/parsing'; +import type * as modelsApiModule from 'services/api/endpoints/models'; +import { beforeEach, describe, expect, it, vi } from 'vitest'; + +// --------------------------------------------------------------------------- +// Module mocks +// +// We are testing only the *gating* logic of the model-related metadata +// handlers (`VAEModel`, `KleinVAEModel`, `KleinQwen3EncoderModel`). The actual +// model lookup goes through `parseModelIdentifier`, which dispatches RTK +// Query thunks. We stub the models endpoint so that any lookup resolves to a +// canned model identifier — the parse step then succeeds and the assertions +// inside each handler become observable. +// --------------------------------------------------------------------------- + +let currentBase: string | null = 'flux2'; + +vi.mock('features/controlLayers/store/paramsSlice', async (importOriginal) => { + const mod = await importOriginal(); + return { ...mod, selectBase: () => currentBase }; +}); + +const fakeModel = (type: 'vae' | 'qwen3_encoder', base: string) => ({ + key: `${type}-key`, + hash: 'hash', + name: `Some ${type}`, + base, + type, +}); + +let nextResolved: ReturnType = fakeModel('vae', 'flux2'); + +vi.mock('services/api/endpoints/models', async (importOriginal) => { + const mod = await importOriginal(); + return { + ...mod, + modelsApi: { + ...mod.modelsApi, + endpoints: { + ...mod.modelsApi.endpoints, + getModelConfig: { initiate: (key: string) => ({ type: 'rtkq/initiate', key }) }, + }, + }, + }; +}); + +const makeStore = (): AppStore => + ({ + dispatch: vi.fn(() => ({ + unwrap: () => Promise.resolve(nextResolved), + })), + getState: () => ({}), + }) as unknown as AppStore; + +beforeEach(() => { + currentBase = 'flux2'; + nextResolved = fakeModel('vae', 'flux2'); +}); + +describe('ImageMetadataHandlers — Klein recall gating', () => { + describe('KleinVAEModel', () => { + it('parses metadata.vae when the current main model is FLUX.2 Klein', async () => { + currentBase = 'flux2'; + nextResolved = fakeModel('vae', 'flux2'); + const store = makeStore(); + + const parsed = await ImageMetadataHandlers.KleinVAEModel.parse({ vae: nextResolved }, store); + + expect(parsed.key).toBe('vae-key'); + expect(parsed.type).toBe('vae'); + }); + + it('rejects parsing when the current main model is not FLUX.2 Klein', async () => { + currentBase = 'sdxl'; + nextResolved = fakeModel('vae', 'flux2'); + const store = makeStore(); + + await expect(ImageMetadataHandlers.KleinVAEModel.parse({ vae: nextResolved }, store)).rejects.toThrow(); + }); + }); + + describe('KleinQwen3EncoderModel', () => { + it('parses metadata.qwen3_encoder when the current main model is FLUX.2 Klein', async () => { + currentBase = 'flux2'; + nextResolved = fakeModel('qwen3_encoder', 'flux2'); + const store = makeStore(); + + const parsed = await ImageMetadataHandlers.KleinQwen3EncoderModel.parse({ qwen3_encoder: nextResolved }, store); + + expect(parsed.key).toBe('qwen3_encoder-key'); + expect(parsed.type).toBe('qwen3_encoder'); + }); + + it('rejects parsing when the current main model is not FLUX.2 Klein', async () => { + currentBase = 'sdxl'; + nextResolved = fakeModel('qwen3_encoder', 'flux2'); + const store = makeStore(); + + await expect( + ImageMetadataHandlers.KleinQwen3EncoderModel.parse({ qwen3_encoder: nextResolved }, store) + ).rejects.toThrow(); + }); + }); + + describe('VAEModel (generic)', () => { + // The generic VAEModel handler must NOT also fire for FLUX.2 / Z-Image + // images, otherwise the metadata viewer renders duplicate VAE rows next + // to the dedicated KleinVAEModel / ZImageVAEModel handlers. + it.each(['flux2', 'z-image'])('rejects parsing when current base is %s', async (base) => { + currentBase = base; + nextResolved = fakeModel('vae', base); + const store = makeStore(); + + await expect(ImageMetadataHandlers.VAEModel.parse({ vae: nextResolved }, store)).rejects.toThrow(); + }); + + it('parses successfully for non-Klein, non-Z-Image bases', async () => { + currentBase = 'sdxl'; + nextResolved = fakeModel('vae', 'sdxl'); + const store = makeStore(); + + const parsed = await ImageMetadataHandlers.VAEModel.parse({ vae: nextResolved }, store); + expect(parsed.key).toBe('vae-key'); + }); + }); + + describe('Guidance (legacy FLUX.2 gating)', () => { + // Prior to the Klein guidance cleanup, FLUX.2 images wrote a `guidance` + // field into metadata. The guidance scalar is inert for all current Klein + // variants, so legacy values must not be recalled into the shared guidance + // state — otherwise they leak back into FLUX.1 when the user switches + // models. + it('rejects parsing when the image was generated with a FLUX.2 model', async () => { + const store = makeStore(); + + await expect( + Promise.resolve().then(() => + ImageMetadataHandlers.Guidance.parse( + { + model: { key: 'k', hash: 'h', name: 'Klein 9B Base', base: 'flux2', type: 'main' }, + guidance: 3.5, + }, + store + ) + ) + ).rejects.toThrow(); + }); + + it('parses successfully for FLUX.1 metadata', async () => { + const store = makeStore(); + + const parsed = await ImageMetadataHandlers.Guidance.parse( + { + model: { key: 'k', hash: 'h', name: 'FLUX Dev', base: 'flux', type: 'main' }, + guidance: 3.5, + }, + store + ); + + expect(parsed).toBe(3.5); + }); + + it('parses successfully when no model metadata is present', async () => { + // Metadata without a model field should still parse (back-compat for + // images where only scalar params were saved). + const store = makeStore(); + + const parsed = await ImageMetadataHandlers.Guidance.parse({ guidance: 3.5 }, store); + expect(parsed).toBe(3.5); + }); + }); +}); diff --git a/invokeai/frontend/web/src/features/metadata/parsing.tsx b/invokeai/frontend/web/src/features/metadata/parsing.tsx index 24b643da319..cf55f378106 100644 --- a/invokeai/frontend/web/src/features/metadata/parsing.tsx +++ b/invokeai/frontend/web/src/features/metadata/parsing.tsx @@ -379,6 +379,15 @@ const Guidance: SingleMetadataHandler = { [SingleMetadataKey]: true, type: 'Guidance', parse: (metadata, _store) => { + // Legacy FLUX.2 images may still carry a `guidance` field, but guidance_embeds + // is inert for all current Klein variants. Reject parsing for FLUX.2 metadata + // so the handler is skipped on both display and recall - avoids leaking a stale + // value into the shared guidance param (which is still used by FLUX.1). + const rawModel = getProperty(metadata, 'model'); + const modelBase = (rawModel as { base?: unknown } | undefined)?.base; + if (modelBase === 'flux2') { + throw new Error('Guidance is not used for FLUX.2 Klein models.'); + } const raw = getProperty(metadata, 'guidance'); const parsed = zParameterGuidance.parse(raw); return Promise.resolve(parsed); @@ -957,6 +966,9 @@ const VAEModel: SingleMetadataHandler = { const parsed = await parseModelIdentifier(raw, store, 'vae'); assert(parsed.type === 'vae'); assert(isCompatibleWithMainModel(parsed, store)); + // Z-Image and FLUX.2 Klein have dedicated VAE handlers; avoid rendering a duplicate row. + const base = selectBase(store.getState()); + assert(base !== 'z-image' && base !== 'flux2', 'VAEModel handler does not apply to Z-Image or FLUX.2 Klein'); return Promise.resolve(parsed); }, recall: (value, store) => { diff --git a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useEncoderModelSettings.ts b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useEncoderModelSettings.ts index b1521f55fce..6b3e9d71010 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useEncoderModelSettings.ts +++ b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useEncoderModelSettings.ts @@ -7,6 +7,7 @@ import type { Qwen3EncoderModelConfig, SigLIPModelConfig, T5EncoderModelConfig, + TextLLMModelConfig, } from 'services/api/types'; type EncoderModelConfig = @@ -15,7 +16,8 @@ type EncoderModelConfig = | Qwen3EncoderModelConfig | CLIPVisionModelConfig | SigLIPModelConfig - | LlavaOnevisionModelConfig; + | LlavaOnevisionModelConfig + | TextLLMModelConfig; export const useEncoderModelSettings = (modelConfig: EncoderModelConfig) => { const encoderModelSettingsDefaults = useMemo(() => { diff --git a/invokeai/frontend/web/src/features/modelManagerV2/models.ts b/invokeai/frontend/web/src/features/modelManagerV2/models.ts index 7cdba474bbf..20f44850014 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/models.ts +++ b/invokeai/frontend/web/src/features/modelManagerV2/models.ts @@ -17,6 +17,7 @@ import { isSpandrelImageToImageModelConfig, isT2IAdapterModelConfig, isT5EncoderModelConfig, + isTextLLMModelConfig, isTIModelConfig, isUnknownModelConfig, isVAEModelConfig, @@ -122,6 +123,11 @@ const MODEL_CATEGORIES: Record = { i18nKey: 'modelManager.llavaOnevision', filter: isLLaVAModelConfig, }, + text_llm: { + category: 'text_llm', + i18nKey: 'modelManager.textLLM', + filter: isTextLLMModelConfig, + }, external_image_generator: { category: 'external_image_generator', i18nKey: 'modelManager.externalImageGenerator', @@ -176,6 +182,7 @@ export const MODEL_TYPE_TO_LONG_NAME: Record = { clip_embed: 'CLIP Embed', siglip: 'SigLIP', flux_redux: 'FLUX Redux', + text_llm: 'Text LLM', external_image_generator: 'External Image Generator', unknown: 'Unknown', }; @@ -228,6 +235,7 @@ export const MODEL_VARIANT_TO_LONG_NAME: Record = { dev_fill: 'FLUX Dev - Fill', schnell: 'FLUX Schnell', klein_4b: 'FLUX.2 Klein 4B', + klein_4b_base: 'FLUX.2 Klein 4B Base', klein_9b: 'FLUX.2 Klein 9B', klein_9b_base: 'FLUX.2 Klein 9B Base', turbo: 'Z-Image Turbo', diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/EncoderModelSettings/EncoderModelSettings.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/EncoderModelSettings/EncoderModelSettings.tsx index e10766214f4..9bfe3974ddf 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/EncoderModelSettings/EncoderModelSettings.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/EncoderModelSettings/EncoderModelSettings.tsx @@ -19,6 +19,7 @@ import type { Qwen3EncoderModelConfig, SigLIPModelConfig, T5EncoderModelConfig, + TextLLMModelConfig, } from 'services/api/types'; export type EncoderModelSettingsFormData = { @@ -31,7 +32,8 @@ type EncoderModelConfig = | Qwen3EncoderModelConfig | CLIPVisionModelConfig | SigLIPModelConfig - | LlavaOnevisionModelConfig; + | LlavaOnevisionModelConfig + | TextLLMModelConfig; type Props = { modelConfig: EncoderModelConfig; diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/ModelEdit.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/ModelEdit.tsx index 66880e2361e..0d46fc3605a 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/ModelEdit.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/ModelEdit.tsx @@ -172,6 +172,12 @@ export const ModelEdit = memo(({ modelConfig }: Props) => {