diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml
new file mode 100644
index 0000000..d12cda8
--- /dev/null
+++ b/.github/workflows/deploy-docs.yml
@@ -0,0 +1,70 @@
+name: Deploy Documentation
+
+on:
+ push:
+ branches:
+ - main
+ - docs-website
+ pull_request:
+ branches:
+ - main
+ workflow_dispatch:
+
+# Sets permissions for GitHub Pages deployment
+permissions:
+ contents: read
+ pages: write
+ id-token: write
+
+# Allow one concurrent deployment
+concurrency:
+ group: "pages"
+ cancel-in-progress: true
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.11'
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v4
+
+ - name: Install dependencies
+ run: |
+ uv sync --group docs
+
+ - name: Install Pandoc (required for nbsphinx)
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y pandoc
+
+ - name: Build documentation
+ run: |
+ uv run sphinx-build -b html docs/source build/html
+
+ - name: Upload artifact
+ uses: actions/upload-pages-artifact@v3
+ with:
+ path: build/html
+
+ deploy:
+ needs: build
+ if: github.event_name == 'push' && ( github.ref == 'refs/heads/main' || github.ref == 'refs/heads/docs-website' )
+ runs-on: ubuntu-latest
+
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v4
diff --git a/.gitignore b/.gitignore
index 31f2232..5853749 100644
--- a/.gitignore
+++ b/.gitignore
@@ -70,6 +70,8 @@ instance/
# Sphinx documentation
docs/_build/
+docs/build/
+docs/source/_autosummary/
# PyBuilder
.pybuilder/
@@ -165,8 +167,7 @@ lightning_logs/
# Training data
data/training_data.txt
-# Docs
-docs/
+# Other
fastTextAttention.py
*.pth
diff --git a/README.md b/README.md
index 6772d79..804b293 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,7 @@
# torchTextClassifiers
+[](https://inseeflab.github.io/torchTextClassifiers/)
+
A unified, extensible framework for text classification with categorical variables built on [PyTorch](https://pytorch.org/) and [PyTorch Lightning](https://lightning.ai/docs/pytorch/stable/).
## 🚀 Features
@@ -30,6 +32,16 @@ uv sync
pip install -e .
```
+## 📖 Documentation
+
+Full documentation is available at: **https://inseeflab.github.io/torchTextClassifiers/**
+
+The documentation includes:
+- **Getting Started**: Installation and quick start guide
+- **Architecture**: Understanding the 3-layer design
+- **Tutorials**: Step-by-step guides for different use cases
+- **API Reference**: Complete API documentation
+
## 📝 Usage
Checkout the [notebook](notebooks/example.ipynb) for a quick start.
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..88b4154
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,19 @@
+# Minimal makefile for Sphinx documentation
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 0000000..747ffb7
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.https://www.sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css
new file mode 100644
index 0000000..7c43b9e
--- /dev/null
+++ b/docs/source/_static/custom.css
@@ -0,0 +1,148 @@
+/* Custom styling for torchTextClassifiers documentation with pydata-sphinx-theme */
+
+/* Improve code block styling */
+div.highlight {
+ border-radius: 6px;
+ border: 1px solid var(--pst-color-border);
+ margin: 1em 0;
+}
+
+div.highlight pre {
+ padding: 12px;
+ overflow-x: auto;
+}
+
+/* Better admonition styling */
+.admonition {
+ border-radius: 6px;
+ padding: 1rem;
+ margin: 1rem 0;
+}
+
+.admonition-title {
+ font-weight: 600;
+ margin-bottom: 0.5rem;
+}
+
+/* Improve table styling */
+table.docutils {
+ border-collapse: collapse;
+ width: 100%;
+ margin: 1em 0;
+}
+
+table.docutils td,
+table.docutils th {
+ border: 1px solid var(--pst-color-border);
+ padding: 0.5rem;
+}
+
+table.docutils th {
+ background-color: var(--pst-color-surface);
+ font-weight: 600;
+}
+
+/* Navigation improvements */
+.bd-sidebar {
+ font-size: 0.9rem;
+}
+
+.bd-sidebar .nav-link {
+ padding: 0.25rem 0.5rem;
+}
+
+/* Logo text styling */
+.navbar-brand {
+ font-weight: 600;
+ font-size: 1.25rem;
+}
+
+/* Improve inline code styling */
+code.docutils.literal {
+ background-color: var(--pst-color-surface);
+ padding: 0.1em 0.4em;
+ border-radius: 3px;
+ font-size: 0.9em;
+}
+
+/* Better spacing for content */
+.bd-content {
+ padding: 2rem;
+}
+
+/* Improve heading spacing */
+.bd-content h1 {
+ margin-top: 0;
+ margin-bottom: 1.5rem;
+ padding-bottom: 0.5rem;
+ border-bottom: 2px solid var(--pst-color-border);
+}
+
+.bd-content h2 {
+ margin-top: 2rem;
+ margin-bottom: 1rem;
+}
+
+.bd-content h3 {
+ margin-top: 1.5rem;
+ margin-bottom: 0.75rem;
+}
+
+/* Cards and grids (from sphinx-design) */
+.sd-card {
+ border-radius: 8px;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+ transition: box-shadow 0.3s ease;
+}
+
+.sd-card:hover {
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15);
+}
+
+/* Improve footer spacing */
+footer.bd-footer {
+ margin-top: 3rem;
+ padding-top: 2rem;
+ border-top: 1px solid var(--pst-color-border);
+}
+
+/* Better responsive images */
+img {
+ max-width: 100%;
+ height: auto;
+ border-radius: 4px;
+}
+
+/* Improve API documentation layout */
+dl.py.class,
+dl.py.function,
+dl.py.method {
+ margin-bottom: 2rem;
+}
+
+dt.sig {
+ background-color: var(--pst-color-surface);
+ padding: 0.5rem 1rem;
+ border-radius: 4px;
+ border-left: 3px solid var(--pst-color-primary);
+}
+
+dd {
+ margin-left: 2rem;
+ margin-top: 0.5rem;
+}
+
+/* Parameter list styling */
+dl.field-list {
+ margin-top: 1rem;
+}
+
+dl.field-list dt {
+ font-weight: 600;
+ margin-bottom: 0.25rem;
+}
+
+dl.field-list dd {
+ margin-left: 1.5rem;
+ margin-bottom: 0.5rem;
+}
diff --git a/docs/source/_static/logo-ttc-dark.svg b/docs/source/_static/logo-ttc-dark.svg
new file mode 100644
index 0000000..90f761f
--- /dev/null
+++ b/docs/source/_static/logo-ttc-dark.svg
@@ -0,0 +1,16 @@
+
diff --git a/docs/source/_static/logo-ttc-light.svg b/docs/source/_static/logo-ttc-light.svg
new file mode 100644
index 0000000..5c5f446
--- /dev/null
+++ b/docs/source/_static/logo-ttc-light.svg
@@ -0,0 +1,16 @@
+
diff --git a/docs/source/api/components.rst b/docs/source/api/components.rst
new file mode 100644
index 0000000..5ac2f9f
--- /dev/null
+++ b/docs/source/api/components.rst
@@ -0,0 +1,273 @@
+Model Components
+================
+
+Modular torch.nn.Module components for building custom architectures.
+
+.. currentmodule:: torchTextClassifiers.model.components
+
+Text Embedding
+--------------
+
+TextEmbedder
+~~~~~~~~~~~~
+
+Embeds text tokens with optional self-attention.
+
+.. autoclass:: torchTextClassifiers.model.components.text_embedder.TextEmbedder
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+TextEmbedderConfig
+~~~~~~~~~~~~~~~~~~
+
+Configuration for TextEmbedder.
+
+.. autoclass:: torchTextClassifiers.model.components.text_embedder.TextEmbedderConfig
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Example:
+
+.. code-block:: python
+
+ from torchTextClassifiers.model.components import TextEmbedder, TextEmbedderConfig
+
+ # Simple text embedder
+ config = TextEmbedderConfig(
+ vocab_size=5000,
+ embedding_dim=128,
+ attention_config=None
+ )
+ embedder = TextEmbedder(config)
+
+ # With self-attention
+ from torchTextClassifiers.model.components import AttentionConfig
+
+ attention_config = AttentionConfig(
+ n_embd=128,
+ n_head=4,
+ n_layer=2,
+ dropout=0.1
+ )
+ config = TextEmbedderConfig(
+ vocab_size=5000,
+ embedding_dim=128,
+ attention_config=attention_config
+ )
+ embedder = TextEmbedder(config)
+
+Categorical Features
+--------------------
+
+CategoricalVariableNet
+~~~~~~~~~~~~~~~~~~~~~~
+
+Handles categorical features alongside text.
+
+.. autoclass:: torchTextClassifiers.model.components.categorical_var_net.CategoricalVariableNet
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+CategoricalForwardType
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enum for categorical feature combination strategies.
+
+.. autoclass:: torchTextClassifiers.model.components.categorical_var_net.CategoricalForwardType
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ .. attribute:: SUM_TO_TEXT
+
+ Sum categorical embeddings, concatenate with text.
+
+ .. attribute:: AVERAGE_AND_CONCAT
+
+ Average categorical embeddings, concatenate with text.
+
+ .. attribute:: CONCATENATE_ALL
+
+ Concatenate all embeddings (text + each categorical).
+
+Example:
+
+.. code-block:: python
+
+ from torchTextClassifiers.model.components import (
+ CategoricalVariableNet,
+ CategoricalForwardType
+ )
+
+ # 3 categorical variables with different vocab sizes
+ cat_net = CategoricalVariableNet(
+ vocabulary_sizes=[10, 5, 20],
+ embedding_dims=[8, 4, 16],
+ forward_type=CategoricalForwardType.AVERAGE_AND_CONCAT
+ )
+
+ # Forward pass
+ cat_embeddings = cat_net(categorical_data)
+
+Classification Head
+-------------------
+
+ClassificationHead
+~~~~~~~~~~~~~~~~~~
+
+Linear classification layer(s).
+
+.. autoclass:: torchTextClassifiers.model.components.classification_head.ClassificationHead
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Example:
+
+.. code-block:: python
+
+ from torchTextClassifiers.model.components import ClassificationHead
+
+ # Simple linear classifier
+ head = ClassificationHead(
+ input_dim=128,
+ num_classes=5
+ )
+
+ # Custom classifier with nested nn.Module
+ import torch.nn as nn
+
+ custom_head_module = nn.Sequential(
+ nn.Linear(128, 64),
+ nn.ReLU(),
+ nn.Dropout(0.2),
+ nn.Linear(64, 5)
+ )
+
+ head = ClassificationHead(net=custom_head_module)
+
+Attention Mechanism
+-------------------
+
+AttentionConfig
+~~~~~~~~~~~~~~~
+
+Configuration for transformer-style self-attention.
+
+.. autoclass:: torchTextClassifiers.model.components.attention.AttentionConfig
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ .. rubric:: Attributes
+
+ .. attribute:: n_embd
+ :type: int
+
+ Embedding dimension.
+
+ .. attribute:: n_head
+ :type: int
+
+ Number of attention heads.
+
+ .. attribute:: n_layer
+ :type: int
+
+ Number of transformer blocks.
+
+ .. attribute:: dropout
+ :type: float
+
+ Dropout rate (default: 0.0).
+
+ .. attribute:: bias
+ :type: bool
+
+ Use bias in linear layers (default: False).
+
+Block
+~~~~~
+
+Single transformer block with self-attention + MLP.
+
+.. autoclass:: torchTextClassifiers.model.components.attention.Block
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+SelfAttentionLayer
+~~~~~~~~~~~~~~~~~~
+
+Multi-head self-attention layer.
+
+.. autoclass:: torchTextClassifiers.model.components.attention.SelfAttentionLayer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+MLP
+~~~
+
+Feed-forward network.
+
+.. autoclass:: torchTextClassifiers.model.components.attention.MLP
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Example:
+
+.. code-block:: python
+
+ from torchTextClassifiers.model.components import AttentionConfig, Block
+
+ # Configure attention
+ config = AttentionConfig(
+ n_embd=128,
+ n_head=4,
+ n_layer=3,
+ dropout=0.1
+ )
+
+ # Create transformer block
+ block = Block(config)
+
+ # Forward pass (requires rotary embeddings cos, sin)
+ output = block(embeddings, cos, sin)
+
+Composing Components
+--------------------
+
+Components can be composed to create custom architectures:
+
+.. code-block:: python
+
+ import torch.nn as nn
+ from torchTextClassifiers.model.components import (
+ TextEmbedder, CategoricalVariableNet, ClassificationHead
+ )
+
+ class CustomModel(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.text_embedder = TextEmbedder(text_config)
+ self.cat_net = CategoricalVariableNet(...)
+ self.head = ClassificationHead(...)
+
+ def forward(self, input_ids, categorical_data):
+ text_features = self.text_embedder(input_ids)
+ cat_features = self.cat_net(categorical_data)
+ combined = torch.cat([text_features, cat_features], dim=1)
+ return self.head(combined)
+
+See Also
+--------
+
+* :doc:`model` - How components are used in models
+* :doc:`../architecture/overview` - Architecture explanation
+* :doc:`configs` - ModelConfig for component configuration
+
diff --git a/docs/source/api/configs.rst b/docs/source/api/configs.rst
new file mode 100644
index 0000000..0e8b5a4
--- /dev/null
+++ b/docs/source/api/configs.rst
@@ -0,0 +1,192 @@
+Configuration Classes
+=====================
+
+Configuration dataclasses for model and training setup.
+
+.. currentmodule:: torchTextClassifiers.torchTextClassifiers
+
+ModelConfig
+-----------
+
+Configuration for model architecture.
+
+.. autoclass:: ModelConfig
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ .. rubric:: Attributes
+
+ .. attribute:: embedding_dim
+ :type: int
+
+ Dimension of text embeddings.
+
+ .. attribute:: categorical_vocabulary_sizes
+ :type: Optional[List[int]]
+
+ Vocabulary sizes for categorical variables (optional).
+
+ .. attribute:: categorical_embedding_dims
+ :type: Optional[Union[List[int], int]]
+
+ Embedding dimensions for categorical variables (optional).
+
+ .. attribute:: num_classes
+ :type: Optional[int]
+
+ Number of output classes (optional, inferred from data if not provided).
+
+ .. attribute:: attention_config
+ :type: Optional[AttentionConfig]
+
+ Configuration for attention mechanism (optional).
+
+Example
+~~~~~~~
+
+.. code-block:: python
+
+ from torchTextClassifiers import ModelConfig
+ from torchTextClassifiers.model.components import AttentionConfig
+
+ # Simple configuration
+ config = ModelConfig(
+ embedding_dim=128,
+ num_classes=3
+ )
+
+ # With categorical features
+ config = ModelConfig(
+ embedding_dim=128,
+ num_classes=5,
+ categorical_vocabulary_sizes=[10, 20, 5], # 3 categorical variables
+ categorical_embedding_dims=[8, 16, 4] # Their embedding dimensions
+ )
+
+ # With attention
+ attention_config = AttentionConfig(
+ n_embd=128,
+ n_head=4,
+ n_layer=2,
+ dropout=0.1
+ )
+ config = ModelConfig(
+ embedding_dim=128,
+ num_classes=2,
+ attention_config=attention_config
+ )
+
+TrainingConfig
+--------------
+
+Configuration for training process.
+
+.. autoclass:: TrainingConfig
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ .. rubric:: Attributes
+
+ .. attribute:: num_epochs
+ :type: int
+
+ Number of training epochs.
+
+ .. attribute:: batch_size
+ :type: int
+
+ Batch size for training.
+
+ .. attribute:: lr
+ :type: float
+
+ Learning rate.
+
+ .. attribute:: loss
+ :type: torch.nn.Module
+
+ Loss function (default: CrossEntropyLoss).
+
+ .. attribute:: optimizer
+ :type: Type[torch.optim.Optimizer]
+
+ Optimizer class (default: Adam).
+
+ .. attribute:: scheduler
+ :type: Optional[Type[torch.optim.lr_scheduler._LRScheduler]]
+
+ Learning rate scheduler class (optional).
+
+ .. attribute:: accelerator
+ :type: str
+
+ Accelerator type: "auto", "cpu", "gpu", or "mps" (default: "auto").
+
+ .. attribute:: num_workers
+ :type: int
+
+ Number of data loading workers (default: 12).
+
+ .. attribute:: patience_early_stopping
+ :type: int
+
+ Early stopping patience in epochs (default: 3).
+
+ .. attribute:: dataloader_params
+ :type: Optional[dict]
+
+ Additional DataLoader parameters (optional).
+
+ .. attribute:: trainer_params
+ :type: Optional[dict]
+
+ Additional PyTorch Lightning Trainer parameters (optional).
+
+ .. attribute:: optimizer_params
+ :type: Optional[dict]
+
+ Additional optimizer parameters (optional).
+
+ .. attribute:: scheduler_params
+ :type: Optional[dict]
+
+ Additional scheduler parameters (optional).
+
+Example
+~~~~~~~
+
+.. code-block:: python
+
+ from torchTextClassifiers import TrainingConfig
+ import torch.nn as nn
+ import torch.optim as optim
+
+ # Basic configuration
+ config = TrainingConfig(
+ num_epochs=20,
+ batch_size=32,
+ lr=1e-3
+ )
+
+ # Advanced configuration
+ config = TrainingConfig(
+ num_epochs=50,
+ batch_size=64,
+ lr=5e-4,
+ loss=nn.CrossEntropyLoss(weight=torch.tensor([1.0, 2.0, 1.5])),
+ optimizer=optim.AdamW,
+ scheduler=optim.lr_scheduler.CosineAnnealingLR,
+ accelerator="gpu",
+ patience_early_stopping=10,
+ optimizer_params={"weight_decay": 0.01},
+ scheduler_params={"T_max": 50}
+ )
+
+See Also
+--------
+
+* :doc:`wrapper` - Using configurations with the wrapper
+* :doc:`components` - AttentionConfig for attention mechanism
+* :doc:`model` - How configurations affect the model
diff --git a/docs/source/api/dataset.rst b/docs/source/api/dataset.rst
new file mode 100644
index 0000000..32fdecc
--- /dev/null
+++ b/docs/source/api/dataset.rst
@@ -0,0 +1,267 @@
+Dataset
+=======
+
+PyTorch Dataset classes for data loading.
+
+.. currentmodule:: torchTextClassifiers.dataset
+
+TextClassificationDataset
+-------------------------
+
+PyTorch Dataset for text classification with optional categorical features.
+
+.. autoclass:: torchTextClassifiers.dataset.dataset.TextClassificationDataset
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ **Features:**
+
+ - Support for text data
+ - Optional categorical variables
+ - Optional labels (for inference)
+ - Multilabel support with ragged arrays
+ - Integration with tokenizers
+
+Parameters
+----------
+
+.. class:: TextClassificationDataset(X_text, y, tokenizer, X_categorical=None)
+
+ :param X_text: Text samples (list or array of strings)
+ :type X_text: Union[List[str], np.ndarray]
+
+ :param y: Labels (optional for inference)
+ :type y: Optional[Union[List[int], np.ndarray]]
+
+ :param tokenizer: Tokenizer instance
+ :type tokenizer: BaseTokenizer
+
+ :param X_categorical: Categorical features (optional)
+ :type X_categorical: Optional[np.ndarray]
+
+Example Usage
+-------------
+
+Basic Text Dataset
+~~~~~~~~~~~~~~~~~~
+
+.. code-block:: python
+
+ from torchTextClassifiers.dataset import TextClassificationDataset
+ from torchTextClassifiers.tokenizers import WordPieceTokenizer
+ import numpy as np
+
+ # Prepare data
+ texts = ["Text sample 1", "Text sample 2", "Text sample 3"]
+ labels = [0, 1, 0]
+
+ # Create tokenizer
+ tokenizer = WordPieceTokenizer()
+ tokenizer.train(texts, vocab_size=1000)
+
+ # Create dataset
+ dataset = TextClassificationDataset(
+ X_text=texts,
+ y=labels,
+ tokenizer=tokenizer
+ )
+
+ # Use with DataLoader
+ from torch.utils.data import DataLoader
+
+ dataloader = DataLoader(dataset, batch_size=2, shuffle=True)
+
+ for batch in dataloader:
+ input_ids, labels_batch = batch
+ # Train model...
+
+Mixed Features Dataset
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: python
+
+ import numpy as np
+
+ # Text data
+ texts = ["Sample 1", "Sample 2", "Sample 3"]
+ labels = [0, 1, 2]
+
+ # Categorical data (3 samples, 2 categorical variables)
+ categorical = np.array([
+ [5, 2], # Sample 1: cat1=5, cat2=2
+ [3, 1], # Sample 2: cat1=3, cat2=1
+ [7, 0], # Sample 3: cat1=7, cat2=0
+ ])
+
+ # Create dataset
+ dataset = TextClassificationDataset(
+ X_text=texts,
+ y=labels,
+ tokenizer=tokenizer,
+ X_categorical=categorical
+ )
+
+ # Batch returns: (input_ids, categorical_features, labels)
+ for batch in dataloader:
+ input_ids, cat_features, labels_batch = batch
+ # Train model with mixed features...
+
+Inference Dataset
+~~~~~~~~~~~~~~~~~
+
+For inference without labels:
+
+.. code-block:: python
+
+ # Create dataset without labels
+ inference_dataset = TextClassificationDataset(
+ X_text=test_texts,
+ y=None, # No labels for inference
+ tokenizer=tokenizer
+ )
+
+ # Batch returns only features (no labels)
+ for batch in dataloader:
+ input_ids = batch
+ # Make predictions...
+
+Multilabel Dataset
+~~~~~~~~~~~~~~~~~~
+
+For multilabel classification:
+
+.. code-block:: python
+
+ # Multilabel targets (ragged arrays supported)
+ texts = ["Sample 1", "Sample 2", "Sample 3"]
+ labels = [
+ [0, 1], # Sample 1 has labels 0 and 1
+ [2], # Sample 2 has only label 2
+ [0, 1, 2], # Sample 3 has all three labels
+ ]
+
+ # Create dataset
+ dataset = TextClassificationDataset(
+ X_text=texts,
+ y=labels,
+ tokenizer=tokenizer
+ )
+
+ # Dataset handles ragged label arrays automatically
+
+DataLoader Integration
+----------------------
+
+The dataset integrates seamlessly with PyTorch DataLoader:
+
+.. code-block:: python
+
+ from torch.utils.data import DataLoader
+
+ # Create dataset
+ dataset = TextClassificationDataset(X_text, y, tokenizer)
+
+ # Create dataloader
+ dataloader = DataLoader(
+ dataset,
+ batch_size=32,
+ shuffle=True,
+ num_workers=4,
+ pin_memory=True # For GPU training
+ )
+
+ # Iterate
+ for batch_idx, batch in enumerate(dataloader):
+ # Process batch...
+ pass
+
+Batch Format
+------------
+
+The dataset returns different batch formats depending on configuration:
+
+**Text only:**
+
+.. code-block:: python
+
+ input_ids = batch
+ # Shape: (batch_size, seq_len)
+
+**Text + labels:**
+
+.. code-block:: python
+
+ input_ids, labels = batch
+ # input_ids shape: (batch_size, seq_len)
+ # labels shape: (batch_size,)
+
+**Text + categorical + labels:**
+
+.. code-block:: python
+
+ input_ids, categorical_features, labels = batch
+ # input_ids shape: (batch_size, seq_len)
+ # categorical_features shape: (batch_size, num_categorical_vars)
+ # labels shape: (batch_size,)
+
+Custom Collation
+----------------
+
+For advanced use cases, you can provide a custom collate function:
+
+.. code-block:: python
+
+ def custom_collate_fn(batch):
+ # Custom batching logic
+ ...
+ return custom_batch
+
+ dataloader = DataLoader(
+ dataset,
+ batch_size=32,
+ collate_fn=custom_collate_fn
+ )
+
+Memory Considerations
+---------------------
+
+For large datasets:
+
+**1. Use generators:**
+
+.. code-block:: python
+
+ def text_generator():
+ for text in large_text_file:
+ yield text.strip()
+
+ X_text = list(text_generator())
+
+**2. Increase num_workers:**
+
+.. code-block:: python
+
+ dataloader = DataLoader(
+ dataset,
+ batch_size=32,
+ num_workers=8 # Parallel data loading
+ )
+
+**3. Pin memory for GPU:**
+
+.. code-block:: python
+
+ dataloader = DataLoader(
+ dataset,
+ batch_size=32,
+ pin_memory=True # Faster GPU transfer
+ )
+
+See Also
+--------
+
+* :doc:`tokenizers` - Tokenizer options
+* :doc:`model` - Using datasets with models
+* :doc:`wrapper` - High-level API handling datasets automatically
+* :doc:`../tutorials/basic_classification` - Dataset usage examples
diff --git a/docs/source/api/index.rst b/docs/source/api/index.rst
new file mode 100644
index 0000000..5a7b468
--- /dev/null
+++ b/docs/source/api/index.rst
@@ -0,0 +1,63 @@
+API Reference
+=============
+
+Complete API documentation for torchTextClassifiers, auto-generated from source code docstrings.
+
+Overview
+--------
+
+The API is organized into several modules:
+
+* :doc:`wrapper` - High-level torchTextClassifiers wrapper class
+* :doc:`configs` - Configuration classes (ModelConfig, TrainingConfig)
+* :doc:`tokenizers` - Text tokenization (NGram, WordPiece, HuggingFace)
+* :doc:`components` - Model components (TextEmbedder, CategoricalVariableNet, etc.)
+* :doc:`model` - Core PyTorch models
+* :doc:`dataset` - Dataset classes for data loading
+
+Quick Links
+-----------
+
+Most Used Classes
+~~~~~~~~~~~~~~~~~
+
+* :class:`torchTextClassifiers.torchTextClassifiers.torchTextClassifiers` - Main wrapper class
+* :class:`torchTextClassifiers.torchTextClassifiers.ModelConfig` - Model configuration
+* :class:`torchTextClassifiers.torchTextClassifiers.TrainingConfig` - Training configuration
+* :class:`torchTextClassifiers.tokenizers.WordPieceTokenizer` - WordPiece tokenizer
+* :class:`torchTextClassifiers.tokenizers.NGramTokenizer` - N-gram tokenizer
+
+Architecture Components
+~~~~~~~~~~~~~~~~~~~~~~~
+
+* :class:`torchTextClassifiers.model.components.TextEmbedder` - Text embedding layer
+* :class:`torchTextClassifiers.model.components.CategoricalVariableNet` - Categorical features
+* :class:`torchTextClassifiers.model.components.ClassificationHead` - Classification layer
+* :class:`torchTextClassifiers.model.components.Attention.AttentionConfig` - Attention configuration
+
+Core Models
+~~~~~~~~~~~
+
+* :class:`torchTextClassifiers.model.model.TextClassificationModel` - Core PyTorch model
+* :class:`torchTextClassifiers.model.lightning.TextClassificationModule` - PyTorch Lightning module
+* :class:`torchTextClassifiers.dataset.dataset.TextClassificationDataset` - PyTorch Dataset
+
+Module Documentation
+--------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ wrapper
+ configs
+ tokenizers
+ components
+ model
+ dataset
+
+Indices
+-------
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/docs/source/api/model.rst b/docs/source/api/model.rst
new file mode 100644
index 0000000..e31eac0
--- /dev/null
+++ b/docs/source/api/model.rst
@@ -0,0 +1,199 @@
+Core Models
+===========
+
+Core PyTorch and PyTorch Lightning models.
+
+.. currentmodule:: torchTextClassifiers.model
+
+PyTorch Model
+-------------
+
+TextClassificationModel
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Core PyTorch nn.Module combining all components.
+
+.. autoclass:: torchTextClassifiers.model.model.TextClassificationModel
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ **Architecture:**
+
+ The model combines three main components:
+
+ 1. **TextEmbedder**: Converts tokens to embeddings
+ 2. **CategoricalVariableNet** (optional): Handles categorical features
+ 3. **ClassificationHead**: Produces class logits
+
+Example:
+
+.. code-block:: python
+
+ from torchTextClassifiers.model import TextClassificationModel
+ from torchTextClassifiers.model.components import (
+ TextEmbedder, TextEmbedderConfig,
+ CategoricalVariableNet, CategoricalForwardType,
+ ClassificationHead
+ )
+
+ # Create components
+ text_embedder = TextEmbedder(TextEmbedderConfig(
+ vocab_size=5000,
+ embedding_dim=128
+ ))
+
+ cat_net = CategoricalVariableNet(
+ vocabulary_sizes=[10, 20],
+ embedding_dims=[8, 16],
+ forward_type=CategoricalForwardType.AVERAGE_AND_CONCAT
+ )
+
+ classification_head = ClassificationHead(
+ input_dim=128 + 24, # text_dim + cat_dim
+ num_classes=5
+ )
+
+ # Combine into model
+ model = TextClassificationModel(
+ text_embedder=text_embedder,
+ categorical_variable_net=cat_net,
+ classification_head=classification_head
+ )
+
+ # Forward pass
+ logits = model(input_ids, categorical_data)
+
+PyTorch Lightning Module
+-------------------------
+
+TextClassificationModule
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+PyTorch Lightning LightningModule for automated training.
+
+.. autoclass:: torchTextClassifiers.model.lightning.TextClassificationModule
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ **Features:**
+
+ - Automated training/validation/test steps
+ - Metrics tracking (accuracy)
+ - Optimizer and scheduler management
+ - Logging integration
+ - PyTorch Lightning callbacks support
+
+Example:
+
+.. code-block:: python
+
+ from torchTextClassifiers.model import (
+ TextClassificationModel,
+ TextClassificationModule
+ )
+ import torch.nn as nn
+ import torch.optim as optim
+ from pytorch_lightning import Trainer
+
+ # Create PyTorch model
+ model = TextClassificationModel(...)
+
+ # Wrap in Lightning module
+ lightning_module = TextClassificationModule(
+ model=model,
+ loss=nn.CrossEntropyLoss(),
+ optimizer=optim.Adam,
+ lr=1e-3,
+ scheduler=optim.lr_scheduler.StepLR,
+ scheduler_params={"step_size": 10, "gamma": 0.1}
+ )
+
+ # Train with Lightning Trainer
+ trainer = Trainer(
+ max_epochs=20,
+ accelerator="auto",
+ devices=1
+ )
+
+ trainer.fit(
+ lightning_module,
+ train_dataloaders=train_dataloader,
+ val_dataloaders=val_dataloader
+ )
+
+ # Test
+ trainer.test(lightning_module, dataloaders=test_dataloader)
+
+Training Steps
+--------------
+
+The TextClassificationModule implements standard training/validation/test steps:
+
+**Training Step:**
+
+.. code-block:: python
+
+ def training_step(self, batch, batch_idx):
+ input_ids, cat_features, labels = batch
+ logits = self.model(input_ids, cat_features)
+ loss = self.loss(logits, labels)
+ acc = self.compute_accuracy(logits, labels)
+
+ self.log("train_loss", loss)
+ self.log("train_acc", acc)
+
+ return loss
+
+**Validation Step:**
+
+.. code-block:: python
+
+ def validation_step(self, batch, batch_idx):
+ input_ids, cat_features, labels = batch
+ logits = self.model(input_ids, cat_features)
+ loss = self.loss(logits, labels)
+ acc = self.compute_accuracy(logits, labels)
+
+ self.log("val_loss", loss)
+ self.log("val_acc", acc)
+
+Custom Training
+---------------
+
+For custom training loops, use the PyTorch model directly:
+
+.. code-block:: python
+
+ from torchTextClassifiers.model import TextClassificationModel
+ import torch.nn as nn
+ import torch.optim as optim
+
+ model = TextClassificationModel(...)
+ loss_fn = nn.CrossEntropyLoss()
+ optimizer = optim.Adam(model.parameters(), lr=1e-3)
+
+ # Custom training loop
+ for epoch in range(num_epochs):
+ for batch in dataloader:
+ input_ids, cat_features, labels = batch
+
+ # Forward pass
+ logits = model(input_ids, cat_features)
+ loss = loss_fn(logits, labels)
+
+ # Backward pass
+ optimizer.zero_grad()
+ loss.backward()
+ optimizer.step()
+
+ print(f"Epoch {epoch}, Loss: {loss.item()}")
+
+See Also
+--------
+
+* :doc:`components` - Model components
+* :doc:`wrapper` - High-level wrapper using these models
+* :doc:`dataset` - Data loading for models
+* :doc:`configs` - Model and training configuration
diff --git a/docs/source/api/tokenizers.rst b/docs/source/api/tokenizers.rst
new file mode 100644
index 0000000..a871fec
--- /dev/null
+++ b/docs/source/api/tokenizers.rst
@@ -0,0 +1,231 @@
+Tokenizers
+==========
+
+Text tokenization classes for converting text to numerical tokens.
+
+.. currentmodule:: torchTextClassifiers.tokenizers
+
+Base Classes
+------------
+
+BaseTokenizer
+~~~~~~~~~~~~~
+
+Abstract base class for all tokenizers.
+
+.. autoclass:: torchTextClassifiers.tokenizers.base.BaseTokenizer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+TokenizerOutput
+~~~~~~~~~~~~~~~
+
+Output dataclass from tokenization.
+
+.. autoclass:: torchTextClassifiers.tokenizers.base.TokenizerOutput
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ .. rubric:: Attributes
+
+ .. attribute:: input_ids
+ :type: torch.Tensor
+
+ Token indices (batch_size, seq_len).
+
+ .. attribute:: attention_mask
+ :type: torch.Tensor
+
+ Attention mask tensor (batch_size, seq_len).
+
+ .. attribute:: offset_mapping
+ :type: Optional[List[List[Tuple[int, int]]]]
+
+ Byte offsets for each token (optional, for explainability).
+
+ .. attribute:: word_ids
+ :type: Optional[List[List[Optional[int]]]]
+
+ Word-level indices for each token (optional).
+
+Concrete Tokenizers
+-------------------
+
+NGramTokenizer
+~~~~~~~~~~~~~~
+
+FastText-style character n-gram tokenizer.
+
+.. autoclass:: torchTextClassifiers.tokenizers.ngram.NGramTokenizer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ **Features:**
+
+ - Character n-gram generation (customizable min/max n)
+ - Subword caching for performance
+ - Text cleaning and normalization (FastText style)
+ - Hash-based tokenization
+ - Support for special tokens, padding, truncation
+
+Example:
+
+.. code-block:: python
+
+ from torchTextClassifiers.tokenizers import NGramTokenizer
+
+ # Create tokenizer
+ tokenizer = NGramTokenizer(
+ vocab_size=10000,
+ min_n=3, # Minimum n-gram size
+ max_n=6, # Maximum n-gram size
+ output_dim=128
+ )
+
+ # Train on corpus
+ tokenizer.train(training_texts)
+
+ # Tokenize
+ output = tokenizer(["Hello world!", "Text classification"])
+
+WordPieceTokenizer
+~~~~~~~~~~~~~~~~~~
+
+WordPiece subword tokenization.
+
+.. autoclass:: torchTextClassifiers.tokenizers.WordPiece.WordPieceTokenizer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ **Features:**
+
+ - Subword tokenization strategy
+ - Vocabulary learning from corpus
+ - Handles unknown words gracefully
+ - Efficient encoding/decoding
+
+Example:
+
+.. code-block:: python
+
+ from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+ # Create tokenizer
+ tokenizer = WordPieceTokenizer(
+ vocab_size=5000,
+ output_dim=128
+ )
+
+ # Train on corpus
+ tokenizer.train(training_texts)
+
+ # Tokenize
+ output = tokenizer(["Hello world!", "Text classification"])
+
+HuggingFaceTokenizer
+~~~~~~~~~~~~~~~~~~~~
+
+Wrapper for HuggingFace tokenizers.
+
+.. autoclass:: torchTextClassifiers.tokenizers.base.HuggingFaceTokenizer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ **Features:**
+
+ - Access to HuggingFace pre-trained tokenizers
+ - Compatible with transformer models
+ - Support for special tokens
+
+Example:
+
+.. code-block:: python
+
+ from torchTextClassifiers.tokenizers import HuggingFaceTokenizer
+ from transformers import AutoTokenizer
+
+ # Load pre-trained tokenizer
+ hf_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
+
+ # Wrap in our interface
+ tokenizer = HuggingFaceTokenizer(
+ tokenizer=hf_tokenizer,
+ output_dim=128
+ )
+
+ # Tokenize
+ output = tokenizer(["Hello world!", "Text classification"])
+
+Choosing a Tokenizer
+---------------------
+
+**NGramTokenizer (FastText-style)**
+
+Use when:
+
+* You want character-level features
+* Your text has many misspellings or variations
+* You need fast training
+* You have limited vocabulary
+
+**WordPieceTokenizer**
+
+Use when:
+
+* You want subword-level features
+* Your vocabulary is large but manageable
+* You need good coverage with reasonable vocab size
+* You're doing standard text classification
+
+**HuggingFaceTokenizer**
+
+Use when:
+
+* You want to use pre-trained tokenizers
+* You're working with transformer models
+* You need specific language support
+* You want to fine-tune on top of BERT/RoBERTa/etc.
+
+Tokenizer Comparison
+--------------------
+
+.. list-table::
+ :widths: 25 25 25 25
+ :header-rows: 1
+
+ * - Feature
+ - NGramTokenizer
+ - WordPieceTokenizer
+ - HuggingFaceTokenizer
+ * - Granularity
+ - Character n-grams
+ - Subwords
+ - Subwords/Words
+ * - Training Speed
+ - Fast
+ - Medium
+ - Pre-trained
+ * - Vocab Size
+ - Configurable
+ - Configurable
+ - Pre-defined
+ * - OOV Handling
+ - Excellent (char-level)
+ - Good (subwords)
+ - Good (subwords)
+ * - Memory
+ - Efficient
+ - Medium
+ - Larger
+
+See Also
+--------
+
+* :doc:`wrapper` - Using tokenizers with the wrapper
+* :doc:`dataset` - How tokenizers are used in datasets
+* :doc:`../tutorials/basic_classification` - Tokenizer tutorial
diff --git a/docs/source/api/wrapper.rst b/docs/source/api/wrapper.rst
new file mode 100644
index 0000000..1290cf7
--- /dev/null
+++ b/docs/source/api/wrapper.rst
@@ -0,0 +1,59 @@
+torchTextClassifiers Wrapper
+=============================
+
+The main wrapper class for text classification tasks.
+
+.. currentmodule:: torchTextClassifiers.torchTextClassifiers
+
+Main Class
+----------
+
+.. autoclass:: torchTextClassifiers
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
+ :special-members: __init__
+
+ .. rubric:: Methods
+
+ .. autosummary::
+ :nosignatures:
+
+ ~torchTextClassifiers.train
+ ~torchTextClassifiers.predict
+ ~torchTextClassifiers.predict_proba
+ ~torchTextClassifiers.get_explanations
+ ~torchTextClassifiers.save
+ ~torchTextClassifiers.load
+
+Usage Example
+-------------
+
+.. code-block:: python
+
+ from torchTextClassifiers import torchTextClassifiers, ModelConfig, TrainingConfig
+ from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+ # Create tokenizer
+ tokenizer = WordPieceTokenizer()
+ tokenizer.train(texts, vocab_size=1000)
+
+ # Configure model
+ model_config = ModelConfig(embedding_dim=64, num_classes=2)
+ training_config = TrainingConfig(num_epochs=10, batch_size=16, lr=1e-3)
+
+ # Create and train classifier
+ classifier = torchTextClassifiers(tokenizer=tokenizer, model_config=model_config)
+ classifier.train(X_text=texts, y=labels, training_config=training_config)
+
+ # Make predictions
+ predictions = classifier.predict(new_texts)
+ probabilities = classifier.predict_proba(new_texts)
+
+See Also
+--------
+
+* :doc:`configs` - Configuration classes
+* :doc:`tokenizers` - Tokenizer options
+* :doc:`model` - Underlying PyTorch models
diff --git a/docs/source/architecture/diagrams/NN.drawio.png b/docs/source/architecture/diagrams/NN.drawio.png
new file mode 100644
index 0000000..ed86a7d
Binary files /dev/null and b/docs/source/architecture/diagrams/NN.drawio.png differ
diff --git a/docs/source/architecture/diagrams/avg_concat.png b/docs/source/architecture/diagrams/avg_concat.png
new file mode 100644
index 0000000..ecea862
Binary files /dev/null and b/docs/source/architecture/diagrams/avg_concat.png differ
diff --git a/docs/source/architecture/diagrams/full_concat.png b/docs/source/architecture/diagrams/full_concat.png
new file mode 100644
index 0000000..0848261
Binary files /dev/null and b/docs/source/architecture/diagrams/full_concat.png differ
diff --git a/docs/source/architecture/diagrams/ttc_architecture.png b/docs/source/architecture/diagrams/ttc_architecture.png
new file mode 100644
index 0000000..ff4da85
Binary files /dev/null and b/docs/source/architecture/diagrams/ttc_architecture.png differ
diff --git a/docs/source/architecture/index.md b/docs/source/architecture/index.md
new file mode 100644
index 0000000..c457efe
--- /dev/null
+++ b/docs/source/architecture/index.md
@@ -0,0 +1,54 @@
+# Architecture
+
+This section explains the design and architecture of torchTextClassifiers.
+
+```{toctree}
+:maxdepth: 2
+
+overview
+```
+
+## Overview
+
+torchTextClassifiers is built on a **modular, component-based pipeline** that balances simplicity for beginners with flexibility for advanced users.
+
+The core pipeline consists of four main components:
+
+1. **Tokenizer**: Converts text strings into numerical tokens
+2. **Text Embedder**: Creates semantic embeddings from tokens (with optional attention)
+3. **Categorical Handler**: Processes additional categorical features (optional)
+4. **Classification Head**: Produces final class predictions
+
+This design allows you to:
+
+- Understand the clear data flow through the model
+- Mix and match components for your specific needs
+- Start simple and add complexity as required
+- Use the high-level API or drop down to PyTorch for full control
+
+## Quick Links
+
+- {doc}`overview`: Complete architecture explanation with examples
+- {doc}`../api/index`: API reference for all components
+
+## Design Philosophy
+
+The architecture follows these principles:
+
+**Modularity**
+: Each component (Tokenizer, Embedder, Categorical Handler, Classification Head) is independent and can be used separately or replaced with custom implementations
+
+**Clear Data Flow**
+: The pipeline shows exactly how data moves from text input through embeddings to predictions, making the model transparent and understandable
+
+**Composability**
+: Components can be mixed and matched to create custom architectures—use text-only, add categorical features, or build entirely custom combinations
+
+**Flexibility**
+: Start with the high-level `torchTextClassifiers` wrapper for simplicity, or compose components directly with PyTorch for maximum control
+
+**Type Safety**
+: Extensive use of type hints and dataclasses for better IDE support and fewer runtime errors
+
+**Framework Integration**
+: All components are standard `torch.nn.Module` objects with seamless PyTorch and PyTorch Lightning integration
diff --git a/docs/source/architecture/overview.md b/docs/source/architecture/overview.md
new file mode 100644
index 0000000..ae1538b
--- /dev/null
+++ b/docs/source/architecture/overview.md
@@ -0,0 +1,613 @@
+# Architecture Overview
+
+torchTextClassifiers is a **modular, component-based framework** for text classification. Rather than a black box, it provides clear, reusable components that you can understand, configure, and compose.
+
+## The Pipeline
+
+At its core, torchTextClassifiers processes data through a simple pipeline:
+
+```{thumbnail} diagrams/ttc_architecture.png
+:alt: Package Architecture
+```
+
+**Data Flow:**
+1. **Text** is tokenized into numerical tokens
+2. **Tokens** are embedded into dense vectors (with optional attention)
+3. **Categorical variables** (optional) are embedded separately
+4. **All embeddings** are combined
+5. **Classification head** produces final predictions
+
+## Component 1: Tokenizer
+
+**Purpose:** Convert text strings into numerical tokens that the model can process.
+
+### Available Tokenizers
+
+torchTextClassifiers supports three tokenization strategies:
+
+#### NGramTokenizer (FastText-style)
+
+Character n-gram tokenization for robustness to typos and rare words.
+
+```python
+from torchTextClassifiers.tokenizers import NGramTokenizer
+
+tokenizer = NGramTokenizer(
+ vocab_size=10000,
+ min_n=3, # Minimum n-gram size
+ max_n=6, # Maximum n-gram size
+)
+tokenizer.train(training_texts)
+```
+
+**When to use:**
+- Text with typos or non-standard spellings
+- Morphologically rich languages
+- Limited training data
+
+#### WordPieceTokenizer
+
+Subword tokenization for balanced vocabulary coverage.
+
+```python
+from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+tokenizer = WordPieceTokenizer(vocab_size=5000)
+tokenizer.train(training_texts)
+```
+
+**When to use:**
+- Standard text classification
+- Moderate vocabulary size
+- Good balance of coverage and granularity
+
+#### HuggingFaceTokenizer
+
+Use pre-trained tokenizers from HuggingFace.
+
+```python
+from torchTextClassifiers.tokenizers import HuggingFaceTokenizer
+from transformers import AutoTokenizer
+
+hf_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
+tokenizer = HuggingFaceTokenizer(tokenizer=hf_tokenizer)
+```
+
+**When to use:**
+- Transfer learning from pre-trained models
+- Need specific language support
+- Want to leverage existing tokenizers
+
+### Tokenizer Output
+
+All tokenizers produce the same output format:
+
+```python
+output = tokenizer(["Hello world!", "Text classification"])
+# output.input_ids: Token indices (batch_size, seq_len)
+# output.attention_mask: Attention mask (batch_size, seq_len)
+```
+
+## Component 2: Text Embedder
+
+**Purpose:** Convert tokens into dense, semantic embeddings that capture meaning.
+
+### Basic Text Embedding
+
+```python
+from torchTextClassifiers.model.components import TextEmbedder, TextEmbedderConfig
+
+config = TextEmbedderConfig(
+ vocab_size=5000,
+ embedding_dim=128,
+)
+embedder = TextEmbedder(config)
+
+# Forward pass
+text_features = embedder(token_ids) # Shape: (batch_size, 128)
+```
+
+**How it works:**
+1. Looks up embedding for each token
+2. Averages embeddings across the sequence
+3. Produces a fixed-size vector per sample
+
+### With Self-Attention (Optional)
+
+Add transformer-style self-attention for better contextual understanding:
+
+```python
+from torchTextClassifiers.model.components import AttentionConfig
+
+attention_config = AttentionConfig(
+ n_embd=128,
+ n_head=4, # Number of attention heads
+ n_layer=2, # Number of transformer blocks
+ dropout=0.1,
+)
+
+config = TextEmbedderConfig(
+ vocab_size=5000,
+ embedding_dim=128,
+ attention_config=attention_config, # Add attention
+)
+embedder = TextEmbedder(config)
+```
+
+**When to use attention:**
+- Long documents where context matters
+- Tasks requiring understanding of word relationships
+- When you have sufficient training data
+
+**Configuration:**
+- `embedding_dim`: Size of embedding vectors (e.g., 64, 128, 256)
+- `n_head`: Number of attention heads (typically 4, 8, or 16)
+- `n_layer`: Depth of transformer (start with 2-3)
+
+## Component 3: Categorical Variable Handler
+
+**Purpose:** Process categorical features (like user demographics, product categories) alongside text.
+
+### When to Use
+
+Add categorical features when you have structured data that complements text:
+- User age, location, or demographics
+- Product categories or attributes
+- Document metadata (source, type, etc.)
+
+### Setup
+
+```python
+from torchTextClassifiers.model.components import (
+ CategoricalVariableNet,
+ CategoricalForwardType
+)
+
+# Example: 3 categorical variables
+# - Variable 1: 10 possible values
+# - Variable 2: 5 possible values
+# - Variable 3: 20 possible values
+
+cat_handler = CategoricalVariableNet(
+ vocabulary_sizes=[10, 5, 20],
+ embedding_dims=[8, 4, 16], # Embedding size for each
+ forward_type=CategoricalForwardType.AVERAGE_AND_CONCAT
+)
+```
+
+### Combination Strategies
+
+The `forward_type` controls how categorical embeddings are combined:
+
+#### AVERAGE_AND_CONCAT
+
+Average all categorical embeddings, then concatenate with text:
+
+```{thumbnail} diagrams/avg_concat.png
+:alt: Average and Concatenate
+```
+
+```python
+forward_type=CategoricalForwardType.AVERAGE_AND_CONCAT
+```
+
+**Output size:** `text_embedding_dim + sum(categorical_embedding_dims)/n_categoricals`
+
+**When to use:** When categorical variables are equally important
+
+#### CONCATENATE_ALL
+
+Concatenate each categorical embedding separately:
+
+```{thumbnail} diagrams/full_concat.png
+:alt: Full Concatenation
+```
+
+```python
+forward_type=CategoricalForwardType.CONCATENATE_ALL
+```
+
+**Output size:** `text_embedding_dim + sum(categorical_embedding_dims)`
+
+**When to use:** When each categorical variable has unique importance
+
+#### SUM_TO_TEXT
+
+Sum all categorical embeddings, then concatenate:
+
+```python
+forward_type=CategoricalForwardType.SUM_TO_TEXT
+```
+
+**Output size:** `text_embedding_dim + categorical_embedding_dim`
+
+**When to use:** To minimize output dimension
+
+### Example with Data
+
+```python
+# Text data
+texts = ["Sample 1", "Sample 2"]
+
+# Categorical data: (n_samples, n_categorical_variables)
+categorical = np.array([
+ [5, 2, 14], # Sample 1: cat1=5, cat2=2, cat3=14
+ [3, 1, 8], # Sample 2: cat1=3, cat2=1, cat3=8
+])
+
+# Process
+cat_features = cat_handler(categorical) # Shape: (2, total_emb_dim)
+```
+
+## Component 4: Classification Head
+
+**Purpose:** Take the combined features and produce class predictions.
+
+### Simple Classification
+
+```python
+from torchTextClassifiers.model.components import ClassificationHead
+
+head = ClassificationHead(
+ input_dim=152, # 128 (text) + 24 (categorical)
+ num_classes=5, # Number of output classes
+)
+
+logits = head(combined_features) # Shape: (batch_size, 5)
+```
+
+### Custom Classification Head
+
+For more complex classification, provide your own architecture:
+
+```python
+import torch.nn as nn
+
+custom_head = nn.Sequential(
+ nn.Linear(152, 64),
+ nn.ReLU(),
+ nn.Dropout(0.2),
+ nn.Linear(64, 5)
+)
+
+head = ClassificationHead(linear=custom_head)
+```
+
+## Complete Architecture
+
+```{thumbnail} diagrams/NN.drawio.png
+:alt:
+```
+
+### Full Model Assembly
+
+The framework automatically combines all components:
+
+```python
+from torchTextClassifiers.model import TextClassificationModel
+
+model = TextClassificationModel(
+ text_embedder=text_embedder,
+ categorical_variable_net=cat_handler, # Optional
+ classification_head=head,
+)
+
+# Forward pass
+logits = model(token_ids, categorical_data)
+```
+
+## Usage Examples
+
+### Example 1: Text-Only Classification
+
+Simple sentiment analysis with just text:
+
+```python
+from torchTextClassifiers import torchTextClassifiers, ModelConfig, TrainingConfig
+from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+# 1. Create tokenizer
+tokenizer = WordPieceTokenizer(vocab_size=5000)
+tokenizer.train(texts)
+
+# 2. Configure model
+model_config = ModelConfig(
+ embedding_dim=128,
+ num_classes=2, # Binary classification
+)
+
+# 3. Train
+classifier = torchTextClassifiers(tokenizer=tokenizer, model_config=model_config)
+training_config = TrainingConfig(num_epochs=10, batch_size=32, lr=1e-3)
+classifier.train(texts, labels, training_config=training_config)
+
+# 4. Predict
+predictions = classifier.predict(new_texts)
+```
+
+### Example 2: Mixed Features (Text + Categorical)
+
+Product classification using both description and category:
+
+```python
+import numpy as np
+
+# Text + categorical data
+texts = ["Product description...", "Another product..."]
+categorical = np.array([
+ [3, 1], # Product 1: category=3, brand=1
+ [5, 2], # Product 2: category=5, brand=2
+])
+labels = [0, 1]
+
+# Configure model with categorical features
+model_config = ModelConfig(
+ embedding_dim=128,
+ num_classes=3,
+ categorical_vocabulary_sizes=[10, 5], # 10 categories, 5 brands
+ categorical_embedding_dims=[8, 4],
+)
+
+# Train
+classifier = torchTextClassifiers(tokenizer=tokenizer, model_config=model_config)
+classifier.train(
+ X_text=texts,
+ y=labels,
+ X_categorical=categorical,
+ training_config=training_config
+)
+```
+
+### Example 3: With Attention
+
+For longer documents or complex text:
+
+```python
+from torchTextClassifiers.model.components import AttentionConfig
+
+# Add attention for better understanding
+attention_config = AttentionConfig(
+ n_embd=128,
+ n_head=8,
+ n_layer=3,
+ dropout=0.1,
+)
+
+model_config = ModelConfig(
+ embedding_dim=128,
+ num_classes=5,
+ attention_config=attention_config, # Enable attention
+)
+
+classifier = torchTextClassifiers(tokenizer=tokenizer, model_config=model_config)
+```
+
+### Example 4: Custom Components
+
+For maximum flexibility, compose components manually:
+
+```python
+from torch import nn
+from torchTextClassifiers.model.components import TextEmbedder, ClassificationHead
+
+# Create custom model
+class CustomClassifier(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.text_embedder = TextEmbedder(text_config)
+ self.custom_layer = nn.Linear(128, 64)
+ self.head = ClassificationHead(64, num_classes)
+
+ def forward(self, input_ids):
+ text_features = self.text_embedder(input_ids)
+ custom_features = self.custom_layer(text_features)
+ return self.head(custom_features)
+```
+
+## Using the High-Level API
+
+For most users, the `torchTextClassifiers` wrapper handles all the complexity:
+
+```python
+from torchTextClassifiers import torchTextClassifiers, ModelConfig, TrainingConfig
+
+# Simple 3-step process:
+# 1. Create tokenizer and train it
+# 2. Configure model architecture
+# 3. Train and predict
+
+classifier = torchTextClassifiers(tokenizer=tokenizer, model_config=model_config)
+classifier.train(texts, labels, training_config=training_config)
+predictions = classifier.predict(new_texts)
+```
+
+**What the wrapper does:**
+- Creates all components automatically
+- Sets up PyTorch Lightning training
+- Handles data loading and batching
+- Provides simple train/predict interface
+- Manages configurations
+
+**When to use the wrapper:**
+- Standard classification tasks
+- Quick experimentation
+- Don't need custom architecture
+- Want simplicity over control
+
+## For Advanced Users
+
+### Direct PyTorch Usage
+
+All components are standard `torch.nn.Module` objects:
+
+```python
+# All components work with standard PyTorch
+isinstance(text_embedder, nn.Module) # True
+isinstance(cat_handler, nn.Module) # True
+isinstance(head, nn.Module) # True
+
+# Use in any PyTorch code
+model = TextClassificationModel(text_embedder, cat_handler, head)
+optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
+
+# Standard PyTorch training loop
+for batch in dataloader:
+ optimizer.zero_grad()
+ logits = model(batch.input_ids, batch.categorical)
+ loss = criterion(logits, batch.labels)
+ loss.backward()
+ optimizer.step()
+```
+
+### PyTorch Lightning Integration
+
+For automated training with advanced features:
+
+```python
+from torchTextClassifiers.model import TextClassificationModule
+from pytorch_lightning import Trainer
+
+# Wrap model in Lightning module
+lightning_module = TextClassificationModule(
+ model=model,
+ loss=nn.CrossEntropyLoss(),
+ optimizer=torch.optim.Adam,
+ lr=1e-3,
+)
+
+# Use Lightning Trainer
+trainer = Trainer(
+ max_epochs=20,
+ accelerator="gpu",
+ devices=4, # Multi-GPU
+ callbacks=[EarlyStopping(), ModelCheckpoint()],
+)
+trainer.fit(lightning_module, train_dataloader, val_dataloader)
+```
+
+## Design Philosophy
+
+### Modularity
+
+Each component is independent and can be used separately:
+
+```python
+# Use just the tokenizer
+tokenizer = NGramTokenizer()
+
+# Use just the embedder
+embedder = TextEmbedder(config)
+
+# Use just the classifier head
+head = ClassificationHead(input_dim, num_classes)
+```
+
+### Flexibility
+
+Mix and match components for your use case:
+
+```python
+# Text only
+model = TextClassificationModel(text_embedder, None, head)
+
+# Text + categorical
+model = TextClassificationModel(text_embedder, cat_handler, head)
+
+# Custom combination
+model = MyCustomModel(text_embedder, my_layer, head)
+```
+
+### Simplicity
+
+Sensible defaults for quick starts:
+
+```python
+# Minimal configuration
+model_config = ModelConfig(embedding_dim=128, num_classes=2)
+
+# Or detailed configuration
+model_config = ModelConfig(
+ embedding_dim=256,
+ num_classes=10,
+ categorical_vocabulary_sizes=[50, 20, 100],
+ categorical_embedding_dims=[32, 16, 64],
+ attention_config=AttentionConfig(n_embd=256, n_head=8, n_layer=4),
+)
+```
+
+### Extensibility
+
+Easy to add custom components:
+
+```python
+class MyCustomEmbedder(nn.Module):
+ def __init__(self):
+ super().__init__()
+ # Your custom implementation
+
+ def forward(self, input_ids):
+ # Your custom forward pass
+ return embeddings
+
+# Use with existing components
+model = TextClassificationModel(
+ text_embedder=MyCustomEmbedder(),
+ classification_head=head,
+)
+```
+
+## Configuration Guide
+
+### Choosing Embedding Dimension
+
+| Task Complexity | Data Size | Recommended embedding_dim |
+|----------------|-----------|---------------------------|
+| Simple (binary) | < 1K samples | 32-64 |
+| Medium (3-5 classes) | 1K-10K samples | 64-128 |
+| Complex (10+ classes) | 10K-100K samples | 128-256 |
+| Very complex | > 100K samples | 256-512 |
+
+### Attention Configuration
+
+| Document Length | Recommended Setup |
+|----------------|-------------------|
+| Short (< 50 tokens) | No attention needed |
+| Medium (50-200 tokens) | n_layer=2, n_head=4 |
+| Long (200-512 tokens) | n_layer=3-4, n_head=8 |
+| Very long (> 512 tokens) | n_layer=4-6, n_head=8-16 |
+
+### Categorical Embedding Size
+
+Rule of thumb: `embedding_dim ≈ min(50, vocabulary_size // 2)`
+
+```python
+# For categorical variable with 100 unique values:
+categorical_embedding_dim = min(50, 100 // 2) = 50
+
+# For categorical variable with 10 unique values:
+categorical_embedding_dim = min(50, 10 // 2) = 5
+```
+
+## Summary
+
+torchTextClassifiers provides a **component-based pipeline** for text classification:
+
+1. **Tokenizer** → Converts text to tokens
+2. **Text Embedder** → Creates semantic embeddings (with optional attention)
+3. **Categorical Handler** → Processes additional features (optional)
+4. **Classification Head** → Produces predictions
+
+**Key Benefits:**
+- Clear data flow through intuitive components
+- Mix and match for your specific needs
+- Start simple, add complexity as needed
+- Full PyTorch compatibility
+
+## Next Steps
+
+- **Tutorials**: See {doc}`../tutorials/index` for step-by-step guides
+- **API Reference**: Check {doc}`../api/index` for detailed documentation
+- **Examples**: Explore complete examples in the repository
+
+Ready to build your classifier? Start with {doc}`../getting_started/quickstart`!
+
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000..569cb02
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,140 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# For the full list of built-in configuration values, see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+import os
+import sys
+sys.path.insert(0, os.path.abspath('../..'))
+
+# -- Project information -----------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
+
+project = 'torchTextClassifiers'
+copyright = '2024-2025, Cédric Couralet, Meilame Tayebjee'
+author = 'Cédric Couralet, Meilame Tayebjee'
+
+# -- General configuration ---------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
+
+extensions = [
+ 'sphinx.ext.autodoc', # Auto-generate API docs from docstrings
+ 'sphinx.ext.napoleon', # Support Google/NumPy style docstrings
+ 'sphinx.ext.viewcode', # Add links to highlighted source code
+ 'sphinx.ext.intersphinx', # Link to other project documentation
+ 'sphinx.ext.autosummary', # Generate summary tables
+ 'sphinx_autodoc_typehints', # Include type hints in documentation
+ 'sphinx_copybutton', # Add copy button to code blocks
+ 'myst_parser', # Parse Markdown files
+ 'sphinx_design', # Modern UI components (cards, grids, etc.)
+ 'nbsphinx', # Render Jupyter notebooks
+ 'sphinxcontrib.images' # Allow zooming on images
+]
+
+
+
+templates_path = ['_templates']
+exclude_patterns = []
+
+# The suffix(es) of source filenames.
+source_suffix = {
+ '.rst': 'restructuredtext',
+ '.md': 'markdown',
+}
+
+
+# -- Options for HTML output -------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
+
+html_theme = 'pydata_sphinx_theme'
+html_static_path = ['_static']
+html_css_files = ['custom.css']
+
+html_theme_options = {
+ "github_url": "https://github.com/InseeFrLab/torchTextClassifiers",
+ "logo": {
+ "image_light": "_static/logo-ttc-light.svg",
+ "image_dark": "_static/logo-ttc-dark.svg",
+ "text": "torchTextClassifiers",
+ },
+ "show_nav_level": 2,
+ "navigation_depth": 3,
+ "show_toc_level": 2,
+ "navbar_align": "left",
+ "navbar_end": ["theme-switcher", "navbar-icon-links"],
+ "footer_start": ["copyright"],
+ "footer_end": ["sphinx-version"],
+ "secondary_sidebar_items": ["page-toc", "edit-this-page"],
+ "collapse_navigation": False,
+ "navigation_with_keys": True,
+}
+
+# -- Extension configuration -------------------------------------------------
+
+# Autodoc configuration
+autodoc_default_options = {
+ 'members': True,
+ 'member-order': 'bysource',
+ 'special-members': '__init__',
+ 'undoc-members': True,
+ 'exclude-members': '__weakref__'
+}
+
+autodoc_typehints = 'description'
+autodoc_typehints_description_target = 'documented'
+
+# Mock imports for documentation (packages that aren't installed)
+autodoc_mock_imports = ['transformers', 'tokenizers', 'datasets', 'captum']
+
+# Napoleon configuration (for Google/NumPy style docstrings)
+napoleon_google_docstring = True
+napoleon_numpy_docstring = True
+napoleon_include_init_with_doc = True
+napoleon_include_private_with_doc = False
+napoleon_include_special_with_doc = True
+napoleon_use_admonition_for_examples = True
+napoleon_use_admonition_for_notes = True
+napoleon_use_admonition_for_references = False
+napoleon_use_ivar = False
+napoleon_use_param = True
+napoleon_use_rtype = True
+napoleon_preprocess_types = False
+napoleon_type_aliases = None
+napoleon_attr_annotations = True
+
+# Intersphinx configuration (link to other documentation)
+intersphinx_mapping = {
+ 'python': ('https://docs.python.org/3', None),
+ 'torch': ('https://pytorch.org/docs/stable/', None),
+ 'numpy': ('https://numpy.org/doc/stable/', None),
+ 'lightning': ('https://lightning.ai/docs/pytorch/stable/', None),
+}
+
+# MyST parser configuration (for Markdown)
+myst_enable_extensions = [
+ "colon_fence", # ::: for admonitions
+ "deflist", # Definition lists
+ "html_image", # HTML images
+ "linkify", # Auto-link URLs
+ "replacements", # Text replacements
+ "smartquotes", # Smart quotes
+ "tasklist", # Task lists
+]
+
+myst_heading_anchors = 3
+
+# nbsphinx configuration (for Jupyter notebooks)
+nbsphinx_execute = 'never' # Don't execute notebooks during build
+nbsphinx_allow_errors = True
+
+# Autosummary configuration
+autosummary_generate = True
+
+# Copy button configuration
+copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
+copybutton_prompt_is_regexp = True
+
+# Syntax highlighting
+pygments_style = 'sphinx'
diff --git a/docs/source/getting_started/index.md b/docs/source/getting_started/index.md
new file mode 100644
index 0000000..2bf16d2
--- /dev/null
+++ b/docs/source/getting_started/index.md
@@ -0,0 +1,41 @@
+# Getting Started
+
+Welcome to torchTextClassifiers! This section will help you get up and running quickly.
+
+```{toctree}
+:maxdepth: 2
+
+installation
+quickstart
+```
+
+## What You'll Learn
+
+In this section, you'll learn:
+
+1. **Installation**: How to install torchTextClassifiers and its dependencies
+2. **Quick Start**: Build your first text classifier in minutes
+
+## Prerequisites
+
+Before you begin, make sure you have:
+
+- Python 3.11 or higher
+- Basic familiarity with Python and PyTorch
+- A working Python environment (we recommend using `uv` or `conda`)
+
+## Next Steps
+
+After completing the quick start, you can:
+
+- Explore the {doc}`../architecture/overview` to understand how the framework is designed
+- Follow {doc}`../tutorials/index` for specific use cases
+- Check the {doc}`../api/index` for detailed API documentation
+
+## Need Help?
+
+If you encounter any issues:
+
+- Check our {doc}`../tutorials/index` for common patterns
+- Visit our [GitHub Issues](https://github.com/InseeFrLab/torchTextClassifiers/issues) to report bugs
+- Join the discussion on [GitHub Discussions](https://github.com/InseeFrLab/torchTextClassifiers/discussions)
diff --git a/docs/source/getting_started/installation.md b/docs/source/getting_started/installation.md
new file mode 100644
index 0000000..e1e746e
--- /dev/null
+++ b/docs/source/getting_started/installation.md
@@ -0,0 +1,155 @@
+# Installation
+
+## Requirements
+
+torchTextClassifiers requires:
+
+- **Python**: 3.11 or higher
+- **PyTorch**: Will be installed automatically as a dependency via pytorch-lightning
+- **Operating System**: Linux, macOS, or Windows
+
+## Installation from Source
+
+Currently, torchTextClassifiers is available only from source. Clone the repository and install using [uv](https://github.com/astral-sh/uv), a fast Python package installer and resolver.
+
+```bash
+# Clone the repository
+git clone https://github.com/InseeFrLab/torchTextClassifiers.git
+cd torchTextClassifiers
+
+# Install with uv
+uv sync
+```
+
+## Optional Dependencies
+
+torchTextClassifiers comes with optional dependency groups for additional features:
+
+### Explainability Support
+
+For model interpretation and explainability features:
+
+```bash
+uv sync --extra explainability
+```
+
+This installs:
+- `captum`: For attribution analysis
+- `nltk`: For text preprocessing
+- `unidecode`: For text normalization
+
+### HuggingFace Integration
+
+To use HuggingFace tokenizers:
+
+```bash
+uv sync --extra huggingface
+```
+
+This installs:
+- `tokenizers`: Fast tokenizers
+- `transformers`: HuggingFace transformers
+- `datasets`: HuggingFace datasets
+
+### Text Preprocessing
+
+For additional text preprocessing utilities:
+
+```bash
+uv sync --extra preprocess
+```
+
+This installs:
+- `nltk`: Natural language toolkit
+- `unidecode`: Text normalization
+
+### All Optional Dependencies
+
+Install all extras at once:
+
+```bash
+uv sync --all-extras
+```
+
+### Development Dependencies
+
+If you want to contribute to the project:
+
+```bash
+uv sync --group dev
+```
+
+## Verification
+
+Verify your installation by running:
+
+```python
+import torchTextClassifiers
+print(torchTextClassifiers.__version__) # Should print: 0.0.0-dev
+```
+
+Or try a simple import:
+
+```python
+from torchTextClassifiers import torchTextClassifiers, ModelConfig, TrainingConfig
+from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+print("Installation successful!")
+```
+
+## GPU Support
+
+torchTextClassifiers uses PyTorch Lightning, which automatically detects and uses GPUs if available.
+
+To use GPUs, make sure you have:
+1. CUDA-compatible GPU
+2. CUDA toolkit installed
+3. PyTorch installed with CUDA support
+
+Check GPU availability:
+
+```python
+import torch
+print(f"GPU available: {torch.cuda.is_available()}")
+print(f"GPU count: {torch.cuda.device_count()}")
+```
+
+## Troubleshooting
+
+### Import Errors
+
+If you encounter import errors, make sure you've installed the package:
+
+```bash
+# Reinstall
+uv sync
+```
+
+### Dependency Conflicts
+
+If you have dependency conflicts, try creating a fresh virtual environment:
+
+```bash
+# Create new virtual environment with uv
+uv venv
+source .venv/bin/activate # On Windows: .venv\Scripts\activate
+uv sync
+```
+
+### PyTorch Installation Issues
+
+If PyTorch installation fails, uv will handle it automatically through pytorch-lightning. If you need a specific PyTorch version, you can specify it in your environment before running:
+
+```bash
+# For CPU-only PyTorch
+export PYTORCH_INDEX_URL="https://download.pytorch.org/whl/cpu"
+uv sync
+
+# For GPU (CUDA 11.8)
+export PYTORCH_INDEX_URL="https://download.pytorch.org/whl/cu118"
+uv sync
+```
+
+## Next Steps
+
+Now that you have torchTextClassifiers installed, head over to the {doc}`quickstart` to build your first classifier!
diff --git a/docs/source/getting_started/quickstart.md b/docs/source/getting_started/quickstart.md
new file mode 100644
index 0000000..c6fe663
--- /dev/null
+++ b/docs/source/getting_started/quickstart.md
@@ -0,0 +1,289 @@
+# Quick Start
+
+This guide will walk you through building your first text classifier with torchTextClassifiers in just a few minutes.
+
+## Overview
+
+In this quick start, you'll:
+
+1. Create sample training data
+2. Train a tokenizer
+3. Configure a model
+4. Train the classifier
+5. Make predictions
+
+## Complete Example
+
+Here's a complete, runnable example for sentiment analysis:
+
+```python
+import os
+os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" # For Mac users
+
+from torchTextClassifiers import torchTextClassifiers, ModelConfig, TrainingConfig
+from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+# Step 1: Prepare training data
+texts = [
+ "I love this product! It's amazing!",
+ "Terrible experience, would not recommend.",
+ "Pretty good, meets expectations.",
+ "Awful quality, very disappointed.",
+ "Excellent service and great value!",
+ "Not worth the money.",
+ "Fantastic! Exceeded my expectations!",
+ "Poor quality, broke after one use.",
+ "Highly recommend, very satisfied!",
+ "Waste of money, terrible product.",
+]
+labels = [1, 0, 1, 0, 1, 0, 1, 0, 1, 0] # 1 = positive, 0 = negative
+
+# Step 2: Create and train tokenizer
+print("Training tokenizer...")
+tokenizer = WordPieceTokenizer()
+tokenizer.train(texts, vocab_size=500, min_frequency=1)
+print(f"Tokenizer trained with vocabulary size: {len(tokenizer)}")
+
+# Step 3: Configure model
+model_config = ModelConfig(
+ embedding_dim=64, # Size of text embeddings
+ num_classes=2, # Binary classification
+)
+
+# Step 4: Configure training
+training_config = TrainingConfig(
+ num_epochs=10,
+ batch_size=4,
+ lr=1e-3,
+ patience_early_stopping=5,
+ accelerator="cpu", # Use "gpu" if available
+)
+
+# Step 5: Create classifier
+print("\nCreating classifier...")
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config,
+)
+
+# Step 6: Train the model
+print("\nTraining model...")
+classifier.train(
+ X_text=texts,
+ y=labels,
+ training_config=training_config,
+)
+
+# Step 7: Make predictions
+print("\nMaking predictions...")
+test_texts = [
+ "This is the best thing I've ever bought!",
+ "Completely useless, don't buy this.",
+ "Pretty decent for the price.",
+]
+
+predictions = classifier.predict(test_texts)
+probabilities = classifier.predict_proba(test_texts)
+
+# Display results
+print("\nPredictions:")
+for text, pred, proba in zip(test_texts, predictions, probabilities):
+ sentiment = "Positive" if pred == 1 else "Negative"
+ confidence = proba[pred]
+ print(f"\nText: {text}")
+ print(f"Sentiment: {sentiment} (confidence: {confidence:.2%})")
+```
+
+## Understanding the Code
+
+Let's break down each step:
+
+### Step 1: Prepare Training Data
+
+```python
+texts = ["I love this product!", "Terrible experience", ...]
+labels = [1, 0, ...] # Binary labels
+```
+
+- `texts`: List of text samples
+- `labels`: Corresponding labels (0 or 1 for binary classification)
+
+### Step 2: Train Tokenizer
+
+```python
+tokenizer = WordPieceTokenizer()
+tokenizer.train(texts, vocab_size=500, min_frequency=1)
+```
+
+The tokenizer learns to split text into subwords:
+- `vocab_size`: Maximum vocabulary size
+- `min_frequency`: Minimum frequency for a token to be included
+
+### Step 3: Configure Model
+
+```python
+model_config = ModelConfig(
+ embedding_dim=64,
+ num_classes=2,
+)
+```
+
+- `embedding_dim`: Dimension of the embedding vectors
+- `num_classes`: Number of output classes (2 for binary classification)
+
+### Step 4: Configure Training
+
+```python
+training_config = TrainingConfig(
+ num_epochs=10,
+ batch_size=4,
+ lr=1e-3,
+ patience_early_stopping=5,
+)
+```
+
+- `num_epochs`: Maximum number of training epochs
+- `batch_size`: Number of samples per batch
+- `lr`: Learning rate
+- `patience_early_stopping`: Stop if validation loss doesn't improve for this many epochs
+
+### Step 5-6: Create and Train
+
+```python
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config,
+)
+
+classifier.train(X_text=texts, y=labels, training_config=training_config)
+```
+
+The classifier orchestrates the entire training process using PyTorch Lightning.
+
+### Step 7: Make Predictions
+
+```python
+predictions = classifier.predict(test_texts)
+probabilities = classifier.predict_proba(test_texts)
+```
+
+- `predict()`: Returns class predictions
+- `predict_proba()`: Returns class probabilities
+
+## Expected Output
+
+When you run this example, you should see output similar to:
+
+```
+Training tokenizer...
+Tokenizer trained with vocabulary size: 245
+
+Creating classifier...
+
+Training model...
+Epoch 0: 100%|██████████| 3/3 [00:00<00:00, 15.23it/s, v_num=0]
+Epoch 1: 100%|██████████| 3/3 [00:00<00:00, 18.45it/s, v_num=0]
+...
+
+Making predictions...
+
+Predictions:
+
+Text: This is the best thing I've ever bought!
+Sentiment: Positive (confidence: 92.34%)
+
+Text: Completely useless, don't buy this.
+Sentiment: Negative (confidence: 88.76%)
+
+Text: Pretty decent for the price.
+Sentiment: Positive (confidence: 65.43%)
+```
+
+## Running with Your Own Data
+
+To use your own data, simply replace the `texts` and `labels` with your dataset:
+
+```python
+# Your own data
+texts = [...] # List of strings
+labels = [...] # List of integers (0, 1, 2, ... for multiclass)
+
+# For multiclass classification (e.g., 3 classes)
+model_config = ModelConfig(
+ embedding_dim=64,
+ num_classes=3, # Change this to your number of classes
+)
+```
+
+## Using Validation Data
+
+For better model evaluation, split your data into training and validation sets:
+
+```python
+from sklearn.model_selection import train_test_split
+
+# Split data
+X_train, X_val, y_train, y_val = train_test_split(
+ texts, labels, test_size=0.2, random_state=42
+)
+
+# Train with validation
+classifier.train(
+ X_text=X_train,
+ y=y_train,
+ X_val=X_val,
+ y_val=y_val,
+ training_config=training_config,
+)
+```
+
+## What's Next?
+
+Now that you've built your first classifier, you can:
+
+- **Explore tutorials**: See {doc}`../tutorials/index` for more advanced examples
+- **Understand the architecture**: Read {doc}`../architecture/overview` to learn how it works
+- **Customize your model**: Check the {doc}`../api/index` for all configuration options
+- **Add categorical features**: See {doc}`../tutorials/index` for combining text with other data
+
+## Common Issues
+
+### Small Dataset Warning
+
+If you see warnings about small datasets, that's expected for this quick example. For real applications, use larger datasets (hundreds or thousands of samples).
+
+### Training on GPU
+
+To use GPU acceleration:
+
+```python
+training_config = TrainingConfig(
+ ...
+ accelerator="gpu", # or "mps" for Mac M1/M2
+)
+```
+
+### Reproducibility
+
+For reproducible results, set seeds:
+
+```python
+import random
+import numpy as np
+import torch
+
+random.seed(42)
+np.random.seed(42)
+torch.manual_seed(42)
+```
+
+## Summary
+
+In this quick start, you:
+
+- ✅ Trained a WordPiece tokenizer
+- ✅ Configured a text classification model
+- ✅ Trained the model with PyTorch Lightning
+- ✅ Made predictions on new text
+
+You're now ready to explore more advanced features and build production-ready classifiers!
diff --git a/docs/source/index.md b/docs/source/index.md
new file mode 100644
index 0000000..bba98fc
--- /dev/null
+++ b/docs/source/index.md
@@ -0,0 +1,202 @@
+# torchTextClassifiers
+
+**A unified, extensible framework for text classification with categorical variables built on PyTorch and PyTorch Lightning.**
+
+```{toctree}
+:maxdepth: 2
+:hidden:
+
+getting_started/index
+architecture/index
+tutorials/index
+api/index
+```
+
+## Welcome
+
+torchTextClassifiers is a Python package designed to simplify building, training, and evaluating deep learning text classifiers. Whether you're working on sentiment analysis, document categorization, or any text classification task, this framework provides the tools you need while maintaining flexibility for customization.
+
+## Key Features
+
+::::{grid} 1 1 2 2
+:gutter: 3
+
+:::{grid-item-card} Complex Input Support
+:text-align: center
+
+Handle text data alongside categorical variables seamlessly
+:::
+
+:::{grid-item-card} Highly Customizable
+:text-align: center
+
+Use any tokenizer from HuggingFace or FastText's n-gram tokenizer
+:::
+
+:::{grid-item-card} Multiclass & Multilabel
+:text-align: center
+
+Support for both multiclass and multi-label classification tasks
+:::
+
+:::{grid-item-card} PyTorch Lightning
+:text-align: center
+
+Automated training with callbacks, early stopping, and logging
+:::
+
+:::{grid-item-card} Modular Architecture
+:text-align: center
+
+Mix and match components to create custom architectures
+:::
+
+:::{grid-item-card} Built-in Explainability
+:text-align: center
+
+Understand predictions using Captum integration
+:::
+
+::::
+
+## Quick Example
+
+Here's a minimal example to get you started:
+
+```python
+from torchTextClassifiers import torchTextClassifiers, ModelConfig, TrainingConfig
+from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+# Sample data
+texts = ["I love this product!", "Terrible experience", "It's okay"]
+labels = [1, 0, 1] # Binary classification
+
+# Create and train tokenizer
+tokenizer = WordPieceTokenizer()
+tokenizer.train(texts, vocab_size=1000)
+
+# Configure model
+model_config = ModelConfig(embedding_dim=64, num_classes=2)
+training_config = TrainingConfig(num_epochs=5, batch_size=16, lr=1e-3)
+
+# Create classifier
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config,
+)
+
+# Train
+classifier.train(texts, labels, training_config=training_config)
+
+# Predict
+predictions = classifier.predict(["Best product ever!"])
+```
+
+## Installation
+
+Currently, install from source:
+
+```bash
+# Clone the repository
+git clone https://github.com/InseeFrLab/torchTextClassifiers.git
+cd torchTextClassifiers
+
+# Install with uv (recommended)
+uv sync
+```
+
+### Optional Dependencies
+
+Install additional features as needed:
+
+```bash
+# For explainability features
+uv sync --extra explainability
+
+# For HuggingFace tokenizers
+uv sync --extra huggingface
+
+# For text preprocessing
+uv sync --extra preprocess
+
+# Install all extras
+uv sync --all-extras
+```
+
+## Get Started
+
+::::{grid} 1 1 2 2
+:gutter: 3
+
+:::{grid-item-card} {fas}`rocket` Quick Start
+:link: getting_started/quickstart
+:link-type: doc
+
+Get up and running in 5 minutes with a complete working example
+:::
+
+:::{grid-item-card} {fas}`layer-group` Architecture
+:link: architecture/overview
+:link-type: doc
+
+Understand the component-based pipeline and design philosophy
+:::
+
+:::{grid-item-card} {fas}`graduation-cap` Tutorials
+:link: tutorials/index
+:link-type: doc
+
+Step-by-step guides for different use cases and features
+:::
+
+:::{grid-item-card} {fas}`book` API Reference
+:link: api/index
+:link-type: doc
+
+Complete API documentation for all classes and functions
+:::
+
+::::
+
+## Why torchTextClassifiers?
+
+### Unified API
+
+Work with a consistent, simple API whether you're doing binary, multiclass, or multilabel classification. The `torchTextClassifiers` wrapper class handles all the complexity.
+
+### Flexible Components
+
+All components (`TextEmbedder`, `CategoricalVariableNet`, `ClassificationHead`) are standard `torch.nn.Module` objects. Mix and match them or create your own custom components.
+
+### Production Ready
+
+Built on PyTorch Lightning for robust training with automatic:
+- Early stopping
+- Checkpointing
+- Logging
+- Multi-GPU support
+
+### Explainability First
+
+Understand what your model is learning with built-in Captum integration for word-level and character-level attribution analysis.
+
+## Use Cases
+
+- **Sentiment Analysis**: Binary or multi-class sentiment classification
+- **Document Categorization**: Classify documents into multiple categories
+- **Mixed Feature Classification**: Combine text with categorical variables (e.g., user demographics)
+- **Multilabel Classification**: Assign multiple labels to each text sample
+- **Model Interpretation**: Understand which words contribute to predictions
+
+## License
+
+This project is licensed under the MIT License - see the [LICENSE](https://github.com/InseeFrLab/torchTextClassifiers/blob/main/LICENSE) file for details.
+
+## Contributing
+
+Contributions are welcome! Please feel free to submit a Pull Request on [GitHub](https://github.com/InseeFrLab/torchTextClassifiers).
+
+## Support
+
+- **GitHub Issues**: [Report bugs or request features](https://github.com/InseeFrLab/torchTextClassifiers/issues)
+- **GitHub Discussions**: [Ask questions and share ideas](https://github.com/InseeFrLab/torchTextClassifiers/discussions)
diff --git a/docs/source/tutorials/basic_classification.md b/docs/source/tutorials/basic_classification.md
new file mode 100644
index 0000000..4568588
--- /dev/null
+++ b/docs/source/tutorials/basic_classification.md
@@ -0,0 +1,415 @@
+# Binary Classification Tutorial
+
+Learn how to build a binary sentiment classifier for product reviews.
+
+## Learning Objectives
+
+By the end of this tutorial, you will be able to:
+
+- Create and train a WordPiece tokenizer
+- Configure a binary classification model
+- Train the model with validation data
+- Make predictions and evaluate performance
+- Understand the complete workflow from data to predictions
+
+## Prerequisites
+
+- Basic Python knowledge
+- torchTextClassifiers installed
+- Familiarity with classification concepts
+
+## Overview
+
+In this tutorial, we'll build a **sentiment classifier** that predicts whether a product review is positive or negative. We'll use:
+
+- **Dataset**: Product reviews (30 training, 8 validation, 10 test samples)
+- **Task**: Binary classification (positive vs. negative)
+- **Tokenizer**: WordPiece
+- **Architecture**: Simple text embedder + classification head
+
+## Complete Code
+
+Here's the complete code we'll walk through:
+
+```python
+import os
+import numpy as np
+import torch
+from torchTextClassifiers import ModelConfig, TrainingConfig, torchTextClassifiers
+from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+# For Mac M1/M2 users
+os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
+
+# Step 1: Prepare Data
+X_train = np.array([
+ "I love this product! It's amazing and works perfectly.",
+ "This is terrible. Worst purchase ever made.",
+ "Great quality and fast shipping. Highly recommend!",
+ "Poor quality, broke after one day. Very disappointed.",
+ "Excellent customer service and great value for money.",
+ "Overpriced and doesn't work as advertised.",
+ # ... (30 total samples)
+])
+y_train = np.array([1, 0, 1, 0, 1, 0, ...]) # 1=positive, 0=negative
+
+X_val = np.array([
+ "Good product, satisfied with purchase.",
+ "Not worth the money, poor quality.",
+ # ... (8 total samples)
+])
+y_val = np.array([1, 0, ...])
+
+X_test = np.array([
+ "This is an amazing product with great features!",
+ "Completely disappointed with this purchase.",
+ # ... (10 total samples)
+])
+y_test = np.array([1, 0, ...])
+
+# Step 2: Create and Train Tokenizer
+tokenizer = WordPieceTokenizer(vocab_size=5000, output_dim=128)
+tokenizer.train(X_train.tolist())
+
+# Step 3: Configure Model
+model_config = ModelConfig(
+ embedding_dim=50,
+ num_classes=2
+)
+
+# Step 4: Create Classifier
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config
+)
+
+# Step 5: Train Model
+training_config = TrainingConfig(
+ num_epochs=20,
+ batch_size=4,
+ lr=1e-3,
+ patience_early_stopping=5,
+ num_workers=0,
+)
+
+classifier.train(
+ X_train, y_train,
+ X_val, y_val,
+ training_config=training_config,
+ verbose=True
+)
+
+# Step 6: Make Predictions
+result = classifier.predict(X_test)
+predictions = result["prediction"].squeeze().numpy()
+confidence = result["confidence"].squeeze().numpy()
+
+# Step 7: Evaluate
+accuracy = (predictions == y_test).mean()
+print(f"Test accuracy: {accuracy:.3f}")
+```
+
+## Step-by-Step Walkthrough
+
+### Step 1: Prepare Your Data
+
+First, organize your data into training, validation, and test sets:
+
+```python
+X_train = np.array([
+ "I love this product! It's amazing and works perfectly.",
+ "This is terrible. Worst purchase ever made.",
+ # ... more samples
+])
+y_train = np.array([1, 0, ...]) # Binary labels
+```
+
+**Key Points:**
+
+- **Training set**: Used to train the model (30 samples)
+- **Validation set**: Used for early stopping and hyperparameter tuning (8 samples)
+- **Test set**: Used for final evaluation (10 samples)
+- **Labels**: 0 = negative, 1 = positive
+
+:::{tip}
+For real projects, use at least hundreds of samples per class. This example uses small numbers for demonstration.
+:::
+
+### Step 2: Create and Train Tokenizer
+
+The tokenizer converts text into numerical tokens:
+
+```python
+tokenizer = WordPieceTokenizer(vocab_size=5000, output_dim=128)
+tokenizer.train(X_train.tolist())
+```
+
+**Parameters:**
+
+- `vocab_size`: Maximum vocabulary size (5000 subwords)
+- `output_dim`: Output dimension for tokenized sequences (128 tokens max)
+
+**What happens during training:**
+
+1. Analyzes the training corpus
+2. Learns common subwords and character combinations
+3. Builds a vocabulary of frequent patterns
+
+:::{note}
+The tokenizer only sees the training data, never validation or test data, to avoid data leakage.
+:::
+
+### Step 3: Configure the Model
+
+Define your model architecture:
+
+```python
+model_config = ModelConfig(
+ embedding_dim=50,
+ num_classes=2
+)
+```
+
+**Parameters:**
+
+- `embedding_dim`: Dimension of learned text embeddings (50)
+- `num_classes`: Number of output classes (2 for binary classification)
+
+**Architecture:**
+
+The model will have:
+- Embedding layer: Maps tokens to 50-dimensional vectors
+- Pooling: Averages token embeddings
+- Classification head: Linear layer outputting 2 logits
+
+### Step 4: Create the Classifier
+
+Instantiate the classifier with the tokenizer and configuration:
+
+```python
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config
+)
+```
+
+This creates the complete pipeline: tokenizer → embedder → classifier.
+
+### Step 5: Configure and Run Training
+
+Set up training hyperparameters:
+
+```python
+training_config = TrainingConfig(
+ num_epochs=20, # Maximum training epochs
+ batch_size=4, # Samples per batch
+ lr=1e-3, # Learning rate
+ patience_early_stopping=5, # Stop if no improvement for 5 epochs
+ num_workers=0, # Data loading workers
+)
+```
+
+**Key Hyperparameters:**
+
+- **num_epochs**: How many times to iterate through the dataset
+- **batch_size**: Smaller = more updates but slower; larger = faster but less stable
+- **lr (learning rate)**: How big the optimization steps are
+- **patience_early_stopping**: Prevents overfitting by stopping early
+
+Train the model:
+
+```python
+classifier.train(
+ X_train, y_train, # Training data
+ X_val, y_val, # Validation data
+ training_config=training_config,
+ verbose=True # Show training progress
+)
+```
+
+**Expected Output:**
+
+```
+Epoch 0: 100%|██████████| 8/8 [00:00<00:00, 25.32it/s, v_num=0]
+Epoch 1: 100%|██████████| 8/8 [00:00<00:00, 28.41it/s, v_num=0]
+...
+```
+
+:::{tip}
+Watch the validation metrics during training. If validation loss increases while training loss decreases, you may be overfitting.
+:::
+
+### Step 6: Make Predictions
+
+Use the trained model to predict on new data:
+
+```python
+result = classifier.predict(X_test)
+predictions = result["prediction"].squeeze().numpy()
+confidence = result["confidence"].squeeze().numpy()
+```
+
+**Output:**
+
+- `predictions`: Predicted class labels (0 or 1)
+- `confidence`: Confidence scores (0-1 range)
+
+**Example output:**
+
+```python
+predictions = [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
+confidence = [0.95, 0.88, 0.92, 0.76, 0.98, 0.85, 0.91, 0.79, 0.94, 0.87]
+```
+
+### Step 7: Evaluate Performance
+
+Calculate accuracy:
+
+```python
+accuracy = (predictions == y_test).mean()
+print(f"Test accuracy: {accuracy:.3f}")
+```
+
+Show detailed results:
+
+```python
+for i, (text, pred, true) in enumerate(zip(X_test, predictions, y_test)):
+ sentiment = "Positive" if pred == 1 else "Negative"
+ correct = "✅" if pred == true else "❌"
+ print(f"{i+1}. {correct} Predicted: {sentiment}")
+ print(f" Text: {text[:50]}...")
+```
+
+**Example output:**
+
+```
+1. ✅ Predicted: Positive
+ Text: This is an amazing product with great features...
+
+2. ✅ Predicted: Negative
+ Text: Completely disappointed with this purchase...
+
+Test accuracy: 0.900
+```
+
+## Understanding the Results
+
+### What Does Good Performance Look Like?
+
+- **Accuracy > 0.80**: Good for simple binary classification
+- **Accuracy > 0.90**: Excellent performance
+- **Confidence scores high**: Model is certain about predictions
+
+### When to Worry
+
+- **Accuracy < 0.60**: Model barely better than random guessing
+- **Validation loss increasing**: Possible overfitting
+- **Low confidence scores**: Model is uncertain
+
+## Customization Options
+
+### Using Different Tokenizers
+
+Try the NGramTokenizer (FastText-style):
+
+```python
+from torchTextClassifiers.tokenizers import NGramTokenizer
+
+tokenizer = NGramTokenizer(
+ vocab_size=5000,
+ min_n=3, # Minimum n-gram size
+ max_n=6, # Maximum n-gram size
+)
+tokenizer.train(X_train.tolist())
+```
+
+### Adjusting Model Size
+
+For better performance with more data:
+
+```python
+model_config = ModelConfig(
+ embedding_dim=128, # Larger embeddings
+ num_classes=2
+)
+```
+
+### Training Longer
+
+```python
+training_config = TrainingConfig(
+ num_epochs=50, # More epochs
+ batch_size=16, # Larger batches
+ lr=5e-4, # Lower learning rate
+ patience_early_stopping=10, # More patience
+)
+```
+
+### Using GPU
+
+If you have a GPU:
+
+```python
+training_config = TrainingConfig(
+ ...
+ accelerator="gpu", # Use GPU
+)
+```
+
+## Common Issues and Solutions
+
+### Issue: Low Accuracy
+
+**Solutions:**
+
+1. Increase `embedding_dim` (e.g., 128 or 256)
+2. Train for more epochs
+3. Collect more training data
+4. Try different learning rates (1e-4, 5e-4, 1e-3)
+
+### Issue: Model Overfitting
+
+**Symptoms:** High training accuracy, low validation accuracy
+
+**Solutions:**
+
+1. Reduce `embedding_dim`
+2. Add more training data
+3. Reduce `patience_early_stopping` for earlier stopping
+4. Use data augmentation
+
+### Issue: Training Too Slow
+
+**Solutions:**
+
+1. Increase `batch_size` (if memory allows)
+2. Reduce `num_epochs`
+3. Use `accelerator="gpu"`
+4. Increase `num_workers` (for data loading)
+
+## Next Steps
+
+Now that you've built a binary classifier, you can:
+
+1. **Try multiclass classification**: See {doc}`multiclass_classification`
+2. **Add categorical features**: Learn about mixed features
+3. **Use explainability**: Understand which words drive predictions
+4. **Explore architecture**: Read {doc}`../architecture/overview`
+
+## Complete Working Example
+
+You can find the complete working example in the repository:
+- [examples/basic_classification.py](https://github.com/InseeFrLab/torchTextClassifiers/blob/main/examples/basic_classification.py)
+
+## Summary
+
+In this tutorial, you learned:
+
+- ✅ How to prepare training, validation, and test data
+- ✅ How to create and train a WordPiece tokenizer
+- ✅ How to configure a binary classification model
+- ✅ How to train the model with early stopping
+- ✅ How to make predictions and evaluate performance
+- ✅ How to customize hyperparameters
+
+You're now ready to build your own text classifiers!
diff --git a/docs/source/tutorials/explainability.md b/docs/source/tutorials/explainability.md
new file mode 100644
index 0000000..55c8c06
--- /dev/null
+++ b/docs/source/tutorials/explainability.md
@@ -0,0 +1,525 @@
+# Model Explainability
+
+Understand which words and characters drive your model's predictions using attribution analysis.
+
+## Learning Objectives
+
+By the end of this tutorial, you'll be able to:
+
+- Generate explanations for individual predictions
+- Visualize word-level and character-level contributions
+- Identify the most influential tokens
+- Use interactive explainability for debugging
+- Understand Captum integration for attribution analysis
+
+## Prerequisites
+
+- Completed {doc}`basic_classification` tutorial
+- Familiarity with model predictions
+- (Optional) Understanding of gradient-based attribution methods
+
+## What Is Explainability?
+
+**Model explainability** reveals which parts of the input contribute most to a prediction. For text classification:
+
+- **Word-level**: Which words influence the prediction?
+- **Character-level**: Which characters matter most?
+- **Attribution scores**: How much each token contributes (positive or negative)
+
+### Why Use Explainability?
+
+✅ **Debugging**: Identify if model focuses on correct features
+✅ **Trust**: Understand and validate model decisions
+✅ **Bias detection**: Discover unwanted correlations
+✅ **Feature engineering**: Guide feature selection
+
+## Complete Example
+
+```python
+import numpy as np
+from torchTextClassifiers import ModelConfig, TrainingConfig, torchTextClassifiers
+from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+# Training data
+X_train = np.array([
+ "I love this product",
+ "Great quality and excellent service",
+ "Amazing design and fantastic performance",
+ "This is terrible quality",
+ "Poor design and cheap materials",
+ "Awful experience with this product"
+])
+
+y_train = np.array([1, 1, 1, 0, 0, 0]) # 1 = Positive, 0 = Negative
+
+X_val = np.array([
+ "Good product with decent quality",
+ "Bad quality and poor service"
+])
+y_val = np.array([1, 0])
+
+# Create and train tokenizer
+tokenizer = WordPieceTokenizer(vocab_size=5000)
+tokenizer.train(X_train.tolist())
+
+# Create model
+model_config = ModelConfig(
+ embedding_dim=50,
+ num_classes=2
+)
+
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config
+)
+
+# Train
+training_config = TrainingConfig(
+ num_epochs=25,
+ batch_size=8,
+ lr=1e-3
+)
+
+classifier.train(
+ X_train, y_train, X_val, y_val,
+ training_config=training_config
+)
+
+# Test with explainability
+test_text = "This product is amazing!"
+
+result = classifier.predict(
+ np.array([test_text]),
+ explain=True # Enable explainability
+)
+
+# Extract results
+prediction = result["prediction"][0][0].item()
+confidence = result["confidence"][0][0].item()
+attributions = result["attributions"][0][0] # Token-level attributions
+
+print(f"Prediction: {'Positive' if prediction == 1 else 'Negative'}")
+print(f"Confidence: {confidence:.4f}")
+print(f"Attribution shape: {attributions.shape}")
+```
+
+## Step-by-Step Walkthrough
+
+### 1. Enable Explainability
+
+Add `explain=True` to `predict()`:
+
+```python
+result = classifier.predict(
+ X_test,
+ explain=True # Generate attribution scores
+)
+```
+
+### 2. Understanding the Output
+
+The result dictionary contains additional keys:
+
+```python
+{
+ "prediction": tensor, # Class predictions
+ "confidence": tensor, # Confidence scores
+ "attributions": tensor, # Token-level attribution scores
+ "offset_mapping": list, # Character positions of tokens
+ "word_ids": list # Word IDs for each token
+}
+```
+
+**Attributions shape:** `(batch_size, top_k, sequence_length)`
+- Higher values = stronger influence on prediction
+- Positive values = supports predicted class
+- Negative values = opposes predicted class
+
+### 3. Visualize Word Contributions
+
+Map token attributions to words:
+
+```python
+from torchTextClassifiers.utilities.plot_explainability import map_attributions_to_word
+
+# Get attribution data
+attributions = result["attributions"][0][0] # Shape: (seq_len,)
+word_ids = result["word_ids"][0] # List of word IDs
+
+# Map to words
+words = test_text.split()
+word_attributions = []
+
+for word_idx in range(len(words)):
+ # Find tokens belonging to this word
+ token_mask = [wid == word_idx for wid in word_ids]
+ token_attrs = attributions[token_mask]
+
+ if len(token_attrs) > 0:
+ word_attr = token_attrs.mean().item()
+ word_attributions.append((words[word_idx], word_attr))
+
+# Display results
+print("\nWord-Level Contributions:")
+print("-" * 50)
+for word, score in word_attributions:
+ print(f"{word:>15} | {'█' * int(score * 40)} {score:.4f}")
+```
+
+### 4. Character-Level Visualization
+
+For finer-grained analysis:
+
+```python
+from torchTextClassifiers.utilities.plot_explainability import map_attributions_to_char
+
+# Map token attributions to characters
+char_attributions = map_attributions_to_char(
+ attributions.unsqueeze(0), # Add batch dimension
+ result["offset_mapping"][0],
+ test_text
+)[0]
+
+# Visualize
+print("\nCharacter-Level Contributions:")
+for i, char in enumerate(test_text):
+ if i < len(char_attributions):
+ score = char_attributions[i]
+ bar = "█" * int(score * 20)
+ print(f"{char} | {bar} {score:.4f}")
+```
+
+## Complete Visualization Example
+
+Here's a complete function to visualize word importance:
+
+```python
+def explain_prediction(classifier, text):
+ """Generate and visualize explanations for a prediction."""
+ import numpy as np
+
+ # Get prediction with explainability
+ result = classifier.predict(
+ np.array([text]),
+ top_k=1,
+ explain=True
+ )
+
+ # Extract prediction info
+ prediction = result["prediction"][0][0].item()
+ confidence = result["confidence"][0][0].item()
+ sentiment = "Positive" if prediction == 1 else "Negative"
+
+ print(f"Text: '{text}'")
+ print(f"Prediction: {sentiment} (confidence: {confidence:.4f})")
+ print("\n" + "="*60)
+
+ # Get attributions
+ attributions = result["attributions"][0][0]
+ offset_mapping = result["offset_mapping"][0]
+
+ # Map to characters
+ from torchTextClassifiers.utilities.plot_explainability import map_attributions_to_char
+ char_attrs = map_attributions_to_char(
+ attributions.unsqueeze(0),
+ offset_mapping,
+ text
+ )[0]
+
+ # Group by words
+ words = text.split()
+ char_idx = 0
+ word_scores = []
+
+ for word in words:
+ word_len = len(word)
+ word_attrs = char_attrs[char_idx:char_idx + word_len]
+
+ if len(word_attrs) > 0:
+ avg_attr = sum(word_attrs) / len(word_attrs)
+ word_scores.append((word, avg_attr))
+
+ char_idx += word_len + 1 # +1 for space
+
+ # Visualize
+ max_score = max(score for _, score in word_scores) if word_scores else 1
+
+ print("Word Contributions:")
+ print("-" * 60)
+ for word, score in word_scores:
+ bar_length = int((score / max_score) * 40)
+ bar = "█" * bar_length
+ print(f"{word:>15} | {bar:<40} {score:.4f}")
+
+ # Show top contributor
+ if word_scores:
+ top_word, top_score = max(word_scores, key=lambda x: x[1])
+ print("-" * 60)
+ print(f"Most influential: '{top_word}' (score: {top_score:.4f})")
+
+# Use it
+explain_prediction(classifier, "This product is amazing!")
+explain_prediction(classifier, "Poor quality and terrible service")
+```
+
+## Interactive Explainability
+
+Create an interactive tool for exploring predictions:
+
+```python
+def interactive_explainability(classifier):
+ """Interactive mode for exploring model predictions."""
+ print("\n" + "="*60)
+ print("Interactive Explainability Mode")
+ print("="*60)
+ print("Enter text to see predictions and explanations!")
+ print("Type 'quit' to exit.\n")
+
+ while True:
+ user_text = input("Enter text: ").strip()
+
+ if user_text.lower() in ['quit', 'exit', 'q']:
+ print("Goodbye!")
+ break
+
+ if not user_text:
+ print("Please enter some text.")
+ continue
+
+ try:
+ explain_prediction(classifier, user_text)
+ print("\n" + "-"*60 + "\n")
+ except Exception as e:
+ print(f"Error: {e}")
+
+# Use it
+interactive_explainability(classifier)
+```
+
+## Understanding Attribution Scores
+
+### What Do Scores Mean?
+
+- **High positive scores**: Strong support for predicted class
+- **Low/negative scores**: Opposition to predicted class
+- **Zero scores**: Neutral contribution
+
+### Example Interpretation
+
+For positive sentiment prediction:
+
+```
+Word Contributions:
+------------------------------------------------------------
+ This | █████ 0.1234
+ product | ████████████████ 0.4567
+ is | ██ 0.0543
+ amazing | ██████████████████████████████ 0.8901
+ ! | ███ 0.0876
+------------------------------------------------------------
+Most influential: 'amazing' (score: 0.8901)
+```
+
+**Interpretation:**
+- "amazing" strongly indicates positive sentiment (0.89)
+- "product" moderately supports positive (0.46)
+- "is" is nearly neutral (0.05)
+
+## Debugging with Explainability
+
+### Case 1: Unexpected Predictions
+
+```python
+test_text = "This product is not good"
+explain_prediction(classifier, test_text)
+
+# Output might show:
+# Word Contributions:
+# not | ████ 0.12 <- Low attribution!
+# good | ██████████ 0.45 <- High attribution for "good"
+```
+
+**Problem**: Model ignores "not", focuses on "good"
+**Solution**: Add more negation examples to training data
+
+### Case 2: Correct Predictions, Wrong Reasons
+
+```python
+test_text = "Product from China is excellent"
+explain_prediction(classifier, test_text)
+
+# If "China" has high attribution, model may have learned spurious correlation
+```
+
+**Problem**: Model uses irrelevant features
+**Solution**: Audit training data for bias, balance dataset
+
+### Case 3: Low Confidence
+
+```python
+test_text = "Product arrived on time"
+result = classifier.predict(np.array([test_text]), explain=True)
+confidence = result["confidence"][0][0].item() # Low confidence
+
+explain_prediction(classifier, test_text)
+# All words have similar low attribution scores
+```
+
+**Interpretation**: Text doesn't contain strong sentiment indicators
+**This is correct behavior**: Model appropriately uncertain
+
+## Advanced: Custom Attribution Methods
+
+By default, torchTextClassifiers uses integrated gradients. For custom attribution:
+
+```python
+from torchTextClassifiers.utilities.plot_explainability import generate_attributions
+from captum.attr import LayerIntegratedGradients
+
+# Access the underlying model
+model = classifier.model
+
+# Create custom attribution method
+attribution_method = LayerIntegratedGradients(
+ model,
+ model.text_embedder.embedding
+)
+
+# Generate attributions
+attributions = generate_attributions(
+ classifier,
+ texts=["Your text here"],
+ attribution_method=attribution_method
+)
+```
+
+## Common Issues
+
+### Issue 1: Explainability Fails
+
+**Error:** "explain=True requires captum package"
+
+**Solution:** Install explainability dependencies:
+```bash
+uv sync --extra explainability
+```
+
+### Issue 2: All Attributions Near Zero
+
+**Possible causes:**
+- Model not well-trained
+- Text contains no discriminative features
+- Attribution method sensitivity
+
+**Try:**
+- Train longer or with more data
+- Check prediction confidence
+- Verify model performance on test set
+
+### Issue 3: Inconsistent Attributions
+
+**Problem:** Same word has different attributions in different contexts
+
+**This is expected!** Attribution considers:
+- Surrounding context
+- Position in sentence
+- Interaction with other words
+
+## Best Practices
+
+1. **Always check confidence:** Low confidence = less reliable attributions
+2. **Compare multiple examples:** Look for patterns across predictions
+3. **Validate with domain knowledge:** Do highlighted words make sense?
+4. **Use for debugging, not blind trust:** Attributions are approximations
+5. **Check training data:** High attribution may reveal training biases
+
+## Real-World Use Cases
+
+### Sentiment Analysis
+
+```python
+positive_review = "Excellent product with amazing quality"
+negative_review = "Terrible product with poor quality"
+
+for review in [positive_review, negative_review]:
+ explain_prediction(classifier, review)
+ print("\n" + "="*60 + "\n")
+```
+
+Verify that sentiment words ("excellent", "terrible") have highest attribution.
+
+### Spam Detection
+
+```python
+spam_text = "Click here for free money now!"
+explain_prediction(spam_classifier, spam_text)
+```
+
+Check if "free", "click", "now" are highlighted (common spam indicators).
+
+### Topic Classification
+
+```python
+sports_text = "The team won the championship game"
+explain_prediction(topic_classifier, sports_text)
+```
+
+Verify "team", "championship", "game" drive sports prediction.
+
+## Customization
+
+### Batch Explainability
+
+Explain multiple texts at once:
+
+```python
+test_texts = [
+ "Great product",
+ "Terrible experience",
+ "Average quality"
+]
+
+result = classifier.predict(
+ np.array(test_texts),
+ explain=True
+)
+
+for i, text in enumerate(test_texts):
+ print(f"\nText {i+1}: {text}")
+ attributions = result["attributions"][i][0]
+ print(f"Max attribution: {attributions.max():.4f}")
+```
+
+### Save Explanations
+
+Export attributions for analysis:
+
+```python
+import json
+
+explanations = []
+for text in test_texts:
+ result = classifier.predict(np.array([text]), explain=True)
+
+ explanations.append({
+ "text": text,
+ "prediction": int(result["prediction"][0][0].item()),
+ "confidence": float(result["confidence"][0][0].item()),
+ "attributions": result["attributions"][0][0].tolist()
+ })
+
+# Save to JSON
+with open("explanations.json", "w") as f:
+ json.dump(explanations, f, indent=2)
+```
+
+## Summary
+
+**Key takeaways:**
+- Use `explain=True` to generate attribution scores
+- Visualize word and character contributions
+- High attribution = strong influence on prediction
+- Use explainability for debugging and validation
+- Check if model focuses on correct features
+
+Ready for multilabel classification? Continue to {doc}`multilabel_classification`!
diff --git a/docs/source/tutorials/index.md b/docs/source/tutorials/index.md
new file mode 100644
index 0000000..845c221
--- /dev/null
+++ b/docs/source/tutorials/index.md
@@ -0,0 +1,267 @@
+# Tutorials
+
+Step-by-step guides to learn torchTextClassifiers through practical examples.
+
+```{toctree}
+:maxdepth: 2
+
+basic_classification
+multiclass_classification
+mixed_features
+explainability
+multilabel_classification
+```
+
+## Overview
+
+These tutorials guide you through common text classification tasks, from basic binary classification to advanced multiclass scenarios.
+
+## Available Tutorials
+
+### Getting Started
+
+::::{grid} 1
+:gutter: 3
+
+:::{grid-item-card} {fas}`star` Binary Classification
+:link: basic_classification
+:link-type: doc
+
+**Recommended first tutorial**
+
+Build a sentiment classifier for product reviews. Learn the complete workflow from data preparation to evaluation.
+
+**What you'll learn:**
+- Creating and training tokenizers
+- Configuring models
+- Training with validation data
+- Making predictions
+- Evaluating performance
+
+**Difficulty:** Beginner | **Time:** 15 minutes
+:::
+
+::::
+
+### Intermediate Tutorials
+
+::::{grid} 1 1 2 2
+:gutter: 3
+
+:::{grid-item-card} {fas}`layer-group` Multiclass Classification
+:link: multiclass_classification
+:link-type: doc
+
+Classify text into 3+ categories with proper handling of class imbalance and evaluation metrics.
+
+**What you'll learn:**
+- Multiclass model configuration
+- Class distribution analysis
+- Reproducibility with seeds
+- Confusion matrices
+- Advanced evaluation metrics
+
+**Difficulty:** Intermediate | **Time:** 20 minutes
+:::
+
+:::{grid-item-card} {fas}`puzzle-piece` Mixed Features
+:link: mixed_features
+:link-type: doc
+
+Combine text with categorical variables for improved classification performance.
+
+**What you'll learn:**
+- Adding categorical features alongside text
+- Configuring categorical embeddings
+- Comparing performance improvements
+- Feature combination strategies
+
+**Difficulty:** Intermediate | **Time:** 25 minutes
+:::
+
+::::
+
+### Advanced Tutorials
+
+::::{grid} 1 1 2 2
+:gutter: 3
+
+:::{grid-item-card} {fas}`lightbulb` Explainability
+:link: explainability
+:link-type: doc
+
+Understand which words and characters drive your model's predictions.
+
+**What you'll learn:**
+- Generating attribution scores with Captum
+- Word-level and character-level visualizations
+- Identifying influential tokens
+- Interactive explainability mode
+
+**Difficulty:** Advanced | **Time:** 30 minutes
+:::
+
+:::{grid-item-card} {fas}`tags` Multilabel Classification
+:link: multilabel_classification
+:link-type: doc
+
+Assign multiple labels to each text sample for complex classification scenarios.
+
+**What you'll learn:**
+- Ragged lists vs. one-hot encoding
+- Configuring BCEWithLogitsLoss
+- Multilabel evaluation metrics
+- Handling variable labels per sample
+
+**Difficulty:** Advanced | **Time:** 30 minutes
+:::
+
+::::
+
+## Learning Path
+
+We recommend following this learning path:
+
+```{mermaid}
+graph LR
+ A[Quick Start] --> B[Binary Classification]
+ B --> C[Multiclass Classification]
+ C --> D[Mixed Features]
+ C --> F[Multilabel Classification]
+ D --> E[Explainability]
+ F --> E
+
+ style A fill:#e3f2fd
+ style B fill:#bbdefb
+ style C fill:#90caf9
+ style D fill:#64b5f6
+ style E fill:#1976d2
+ style F fill:#42a5f5
+```
+
+1. **Start with**: {doc}`../getting_started/quickstart` - Get familiar with the basics
+2. **Then**: {doc}`basic_classification` - Understand the complete workflow
+3. **Next**: {doc}`multiclass_classification` - Handle multiple classes
+4. **Branch out**: {doc}`mixed_features` for categorical features OR {doc}`multilabel_classification` for multiple labels
+5. **Master**: {doc}`explainability` - Understand your model's predictions
+
+## Tutorial Format
+
+Each tutorial follows a consistent structure:
+
+**Learning Objectives**
+: What you'll be able to do after completing the tutorial
+
+**Prerequisites**
+: What you need to know before starting
+
+**Complete Code**
+: Full working example you can copy and run
+
+**Step-by-Step Walkthrough**
+: Detailed explanation of each step
+
+**Customization**
+: How to adapt the code to your needs
+
+**Common Issues**
+: Troubleshooting tips and solutions
+
+**Next Steps**
+: Where to go after finishing
+
+## Tips for Learning
+
+### Run the Code
+
+Don't just read - run the examples! Modify them to see what happens:
+
+```python
+# Try different values
+model_config = ModelConfig(
+ embedding_dim=128, # Was 64 - what changes?
+ num_classes=2
+)
+```
+
+### Start Simple
+
+Begin with the Quick Start, then move to Binary Classification. Don't skip ahead!
+
+### Use Your Own Data
+
+Once you understand the examples, try them with your own text data:
+
+```python
+# Your data
+my_texts = ["your", "text", "samples"]
+my_labels = [0, 1, 0]
+
+# Same workflow
+classifier.train(my_texts, my_labels, training_config)
+```
+
+### Experiment
+
+- Try different tokenizers (WordPiece vs NGram)
+- Adjust hyperparameters (learning rate, embedding dim)
+- Compare model sizes
+- Test different batch sizes
+
+### Read the Errors
+
+Error messages are helpful! They often tell you exactly what's wrong:
+
+```python
+# Error: num_classes=2 but got label 3
+# Solution: Check your labels - should be 0, 1 (not 1, 2, 3)
+```
+
+## Getting Help
+
+Stuck on a tutorial? Here's how to get help:
+
+1. **Check Common Issues**: Each tutorial has a troubleshooting section
+2. **Read the API docs**: {doc}`../api/index` for detailed parameter descriptions
+3. **Review architecture**: {doc}`../architecture/overview` for how components work
+4. **Ask questions**: [GitHub Discussions](https://github.com/InseeFrLab/torchTextClassifiers/discussions)
+5. **Report bugs**: [GitHub Issues](https://github.com/InseeFrLab/torchTextClassifiers/issues)
+
+## Additional Resources
+
+### Example Scripts
+
+All tutorials are based on runnable examples in the repository:
+
+- [examples/basic_classification.py](https://github.com/InseeFrLab/torchTextClassifiers/blob/main/examples/basic_classification.py)
+- [examples/multiclass_classification.py](https://github.com/InseeFrLab/torchTextClassifiers/blob/main/examples/multiclass_classification.py)
+- [examples/using_additional_features.py](https://github.com/InseeFrLab/torchTextClassifiers/blob/main/examples/using_additional_features.py)
+- [examples/advanced_training.py](https://github.com/InseeFrLab/torchTextClassifiers/blob/main/examples/advanced_training.py)
+- [examples/simple_explainability_example.py](https://github.com/InseeFrLab/torchTextClassifiers/blob/main/examples/simple_explainability_example.py)
+
+### Jupyter Notebooks
+
+Interactive notebooks for hands-on learning:
+
+- [Basic example notebook](https://github.com/InseeFrLab/torchTextClassifiers/blob/main/notebooks/example.ipynb)
+- [Multilabel classification notebook](https://github.com/InseeFrLab/torchTextClassifiers/blob/main/notebooks/multilabel_classification.ipynb)
+
+## Contributing
+
+Want to contribute a tutorial? We welcome:
+
+- New use cases
+- Alternative approaches
+- Real-world examples
+- Performance tips
+
+See our [contributing guidelines](https://github.com/InseeFrLab/torchTextClassifiers/blob/main/CONTRIBUTING.md) to get started!
+
+## What's Next?
+
+Ready to start? Choose your path:
+
+- **New to text classification?** Start with {doc}`../getting_started/quickstart`
+- **Want to dive deeper?** Begin with {doc}`basic_classification`
+- **Ready for multiclass?** Jump to {doc}`multiclass_classification`
+- **Need API details?** Check {doc}`../api/index`
diff --git a/docs/source/tutorials/mixed_features.md b/docs/source/tutorials/mixed_features.md
new file mode 100644
index 0000000..163f3d7
--- /dev/null
+++ b/docs/source/tutorials/mixed_features.md
@@ -0,0 +1,450 @@
+# Mixed Features Classification
+
+Learn how to combine text with categorical variables for improved classification performance.
+
+## Learning Objectives
+
+By the end of this tutorial, you'll be able to:
+
+- Combine text and categorical features in a single model
+- Configure categorical embeddings
+- Compare performance with and without categorical features
+- Understand when categorical features improve results
+
+## Prerequisites
+
+- Completed {doc}`basic_classification` tutorial
+- Familiarity with categorical data (e.g., user demographics, product categories)
+- Understanding of embeddings
+
+## What Are Categorical Features?
+
+Categorical features are non-numeric variables like:
+- **User attributes**: Age group, location, membership tier
+- **Product metadata**: Category, brand, seller
+- **Document properties**: Source, type, language
+
+These features can significantly improve classification when they contain relevant information.
+
+## When to Use Categorical Features
+
+✅ **Good use cases:**
+- Product descriptions + (category, brand)
+- Reviews + (user location, verified purchase)
+- News articles + (source, publication date)
+
+❌ **Poor use cases:**
+- Text already contains the categorical information
+- Random or high-cardinality features (e.g., user IDs)
+- Categorical features with no relationship to labels
+
+## Complete Example
+
+```python
+import numpy as np
+from sklearn.preprocessing import LabelEncoder
+from sklearn.model_selection import train_test_split
+
+from torchTextClassifiers import ModelConfig, TrainingConfig, torchTextClassifiers
+from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+# Sample data: Product reviews with category
+texts = [
+ "Great phone with excellent camera",
+ "Battery dies too quickly",
+ "Love this laptop's performance",
+ "Screen quality is poor",
+ "Best headphones I've ever owned",
+ "Sound quality is disappointing",
+ "Fast shipping and great quality",
+ "Product arrived damaged"
+]
+
+# Categorical feature: Product category (0=Electronics, 1=Audio)
+categories = [0, 0, 0, 0, 1, 1, 0, 0]
+
+# Labels: Positive (1) or Negative (0)
+labels = [1, 0, 1, 0, 1, 0, 1, 0]
+
+# Prepare data
+X_text = np.array(texts)
+X_categorical = np.array(categories).reshape(-1, 1) # Shape: (n_samples, 1)
+y = np.array(labels)
+
+# Split data
+X_text_train, X_text_test, X_cat_train, X_cat_test, y_train, y_test = train_test_split(
+ X_text, X_categorical, y, test_size=0.25, random_state=42
+)
+
+# Create tokenizer
+tokenizer = WordPieceTokenizer(vocab_size=1000)
+tokenizer.train(X_text_train.tolist())
+
+# Configure model WITH categorical features
+model_config = ModelConfig(
+ embedding_dim=64,
+ num_classes=2,
+ categorical_vocabulary_sizes=[2], # 2 categories (Electronics, Audio)
+ categorical_embedding_dims=[8], # Embed each category into 8 dimensions
+)
+
+# Create classifier
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config
+)
+
+# Training configuration
+training_config = TrainingConfig(
+ num_epochs=20,
+ batch_size=4,
+ lr=1e-3
+)
+
+# Combine text and categorical features
+X_train_mixed = np.column_stack([X_text_train, X_cat_train])
+X_test_mixed = np.column_stack([X_text_test, X_cat_test])
+
+# Train model
+classifier.train(
+ X_train_mixed, y_train,
+ training_config=training_config
+)
+
+# Predict
+result = classifier.predict(X_test_mixed)
+predictions = result["prediction"].squeeze().numpy()
+
+# Evaluate
+accuracy = (predictions == y_test).mean()
+print(f"Test Accuracy: {accuracy:.3f}")
+```
+
+## Step-by-Step Walkthrough
+
+### 1. Prepare Categorical Features
+
+Categorical features must be **encoded as integers** (0, 1, 2, ...):
+
+```python
+from sklearn.preprocessing import LabelEncoder
+
+# Example: Encode product categories
+categories = ["Electronics", "Audio", "Electronics", "Audio"]
+encoder = LabelEncoder()
+categories_encoded = encoder.fit_transform(categories)
+# Result: [0, 1, 0, 1]
+```
+
+Shape your categorical data as `(n_samples, n_categorical_features)`:
+
+```python
+# Single categorical feature
+X_categorical = categories_encoded.reshape(-1, 1)
+
+# Multiple categorical features
+X_categorical = np.column_stack([
+ categories_encoded,
+ brands_encoded,
+ regions_encoded
+]) # Shape: (n_samples, 3)
+```
+
+### 2. Configure Categorical Embeddings
+
+Specify vocabulary sizes and embedding dimensions:
+
+```python
+model_config = ModelConfig(
+ embedding_dim=64, # For text
+ num_classes=2,
+ categorical_vocabulary_sizes=[10, 5, 20], # Vocab size for each feature
+ categorical_embedding_dims=[8, 4, 16] # Embedding dim for each feature
+)
+```
+
+**Rule of thumb for embedding dimensions:**
+```python
+embedding_dim = min(50, vocabulary_size // 2)
+```
+
+Examples:
+- 10 categories → embedding_dim = 5
+- 100 categories → embedding_dim = 50
+- 1000 categories → embedding_dim = 50 (capped)
+
+### 3. Combine Features
+
+Stack text and categorical data:
+
+```python
+# For training
+X_train_mixed = np.column_stack([X_text_train, X_cat_train])
+
+# For prediction
+X_test_mixed = np.column_stack([X_text_test, X_cat_test])
+```
+
+The framework automatically separates text (first column) from categorical features (remaining columns).
+
+### 4. Train and Predict
+
+Training and prediction work the same way:
+
+```python
+# Train
+classifier.train(X_train_mixed, y_train, training_config=training_config)
+
+# Predict
+result = classifier.predict(X_test_mixed)
+```
+
+## Comparison: Text-Only vs. Mixed Features
+
+Let's compare performance:
+
+```python
+# Text-only model
+model_config_text_only = ModelConfig(
+ embedding_dim=64,
+ num_classes=2
+)
+
+classifier_text_only = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config_text_only
+)
+
+classifier_text_only.train(X_text_train, y_train, training_config=training_config)
+result_text_only = classifier_text_only.predict(X_text_test)
+accuracy_text_only = (result_text_only["prediction"].squeeze().numpy() == y_test).mean()
+
+# Mixed features model (from above)
+accuracy_mixed = (predictions == y_test).mean()
+
+print(f"Text-Only Accuracy: {accuracy_text_only:.3f}")
+print(f"Mixed Features Accuracy: {accuracy_mixed:.3f}")
+print(f"Improvement: {(accuracy_mixed - accuracy_text_only):+.3f}")
+```
+
+## Combination Strategies
+
+The framework offers different ways to combine categorical embeddings:
+
+### AVERAGE_AND_CONCAT (Default)
+
+Average all categorical embeddings, then concatenate with text:
+
+```python
+from torchTextClassifiers.model.components import CategoricalForwardType
+
+model_config = ModelConfig(
+ embedding_dim=64,
+ num_classes=2,
+ categorical_vocabulary_sizes=[10, 5],
+ categorical_embedding_dims=[8, 4],
+ categorical_forward_type=CategoricalForwardType.AVERAGE_AND_CONCAT
+)
+```
+
+**Output size:** `text_embedding_dim + avg(categorical_embedding_dims)`
+
+### CONCATENATE_ALL
+
+Concatenate each categorical embedding separately:
+
+```python
+model_config = ModelConfig(
+ # ... same as above ...
+ categorical_forward_type=CategoricalForwardType.CONCATENATE_ALL
+)
+```
+
+**Output size:** `text_embedding_dim + sum(categorical_embedding_dims)`
+
+**When to use:** Each categorical variable has unique importance.
+
+### SUM_TO_TEXT
+
+Sum all categorical embeddings first:
+
+```python
+model_config = ModelConfig(
+ # ... same as above ...
+ categorical_forward_type=CategoricalForwardType.SUM_TO_TEXT
+)
+```
+
+**Output size:** `text_embedding_dim + categorical_embedding_dim`
+
+**When to use:** To minimize model size.
+
+## Real-World Example: AG News with Source
+
+```python
+import pandas as pd
+from sklearn.preprocessing import LabelEncoder
+
+# Load AG News dataset
+df = pd.read_parquet("path/to/ag_news.parquet")
+df = df.sample(10000, random_state=42)
+
+# Combine title and description
+df['text'] = df['title'] + ' ' + df['description']
+
+# Encode news source as categorical feature
+source_encoder = LabelEncoder()
+df['source_encoded'] = source_encoder.fit_transform(df['source'])
+
+# Prepare data
+X_text = df['text'].values
+X_categorical = df['source_encoded'].values.reshape(-1, 1)
+y_encoded = LabelEncoder().fit_transform(df['category'])
+
+# Split data
+X_text_train, X_text_test, X_cat_train, X_cat_test, y_train, y_test = train_test_split(
+ X_text, X_categorical, y_encoded, test_size=0.2, random_state=42
+)
+
+# Train model
+tokenizer = WordPieceTokenizer(vocab_size=5000)
+tokenizer.train(X_text_train.tolist())
+
+n_sources = len(source_encoder.classes_)
+n_categories = len(np.unique(y_encoded))
+
+model_config = ModelConfig(
+ embedding_dim=128,
+ num_classes=n_categories,
+ categorical_vocabulary_sizes=[n_sources],
+ categorical_embedding_dims=[min(50, n_sources // 2)]
+)
+
+classifier = torchTextClassifiers(tokenizer=tokenizer, model_config=model_config)
+
+X_train_mixed = np.column_stack([X_text_train, X_cat_train])
+X_test_mixed = np.column_stack([X_text_test, X_cat_test])
+
+training_config = TrainingConfig(
+ num_epochs=50,
+ batch_size=128,
+ lr=1e-3,
+ patience_early_stopping=3
+)
+
+classifier.train(X_train_mixed, y_train, training_config=training_config)
+
+# Evaluate
+result = classifier.predict(X_test_mixed)
+accuracy = (result["prediction"].squeeze().numpy() == y_test).mean()
+print(f"Test Accuracy: {accuracy:.3f}")
+```
+
+## Common Issues
+
+### Issue 1: Shape Mismatch
+
+**Error:** "Expected 2D array, got 1D array"
+
+**Solution:** Reshape single features:
+```python
+X_categorical = categories.reshape(-1, 1) # Add column dimension
+```
+
+### Issue 2: Non-Integer Categories
+
+**Error:** "Expected integer values"
+
+**Solution:** Use `LabelEncoder`:
+```python
+encoder = LabelEncoder()
+categories_encoded = encoder.fit_transform(categories)
+```
+
+### Issue 3: Missing Vocabulary Sizes
+
+**Error:** "Must specify categorical_vocabulary_sizes"
+
+**Solution:** Provide vocab size for each categorical feature:
+```python
+vocab_sizes = [int(np.max(X_cat_train[:, i]) + 1) for i in range(X_cat_train.shape[1])]
+model_config = ModelConfig(
+ categorical_vocabulary_sizes=vocab_sizes,
+ categorical_embedding_dims=[min(50, v // 2) for v in vocab_sizes]
+)
+```
+
+### Issue 4: No Performance Improvement
+
+**Possible reasons:**
+- Categorical features not predictive of labels
+- Text already contains categorical information
+- Need more training data
+- Categorical embeddings too small
+
+**Try:**
+- Increase embedding dimensions
+- Check feature-label correlation
+- Try different combination strategies
+
+## Customization
+
+### Custom Embedding Dimensions
+
+Different dimensions for different importance:
+
+```python
+model_config = ModelConfig(
+ embedding_dim=128,
+ num_classes=5,
+ categorical_vocabulary_sizes=[100, 10, 50],
+ categorical_embedding_dims=[32, 4, 16] # Vary by importance
+)
+```
+
+### With Attention
+
+Combine categorical features with attention-based text embeddings:
+
+```python
+from torchTextClassifiers.model.components import AttentionConfig
+
+attention_config = AttentionConfig(
+ n_embd=128,
+ n_head=8,
+ n_layer=3
+)
+
+model_config = ModelConfig(
+ embedding_dim=128,
+ num_classes=5,
+ attention_config=attention_config,
+ categorical_vocabulary_sizes=[100],
+ categorical_embedding_dims=[32]
+)
+```
+
+## Best Practices
+
+1. **Start simple:** Begin with text-only model, add categorical features if needed
+2. **Check correlation:** Ensure categorical features correlate with labels
+3. **Normalize vocabulary sizes:** Use embedding_dim ≈ vocabulary_size // 2
+4. **Avoid overfitting:** Don't use too many high-dimensional categorical features
+5. **Compare performance:** Always compare mixed vs. text-only models
+
+## Next Steps
+
+- **Explainability**: Learn which features (text or categorical) drive predictions in {doc}`explainability`
+- **Multilabel**: Apply mixed features to multilabel tasks in {doc}`multilabel_classification`
+- **Advanced Training**: Explore hyperparameter tuning with mixed features
+
+## Summary
+
+**Key takeaways:**
+- Categorical features can improve classification performance
+- Encode categories as integers (0, 1, 2, ...)
+- Configure vocabulary sizes and embedding dimensions
+- Combine text and categorical data using `np.column_stack`
+- Compare performance to validate improvement
+
+Ready to understand your model's predictions? Continue to {doc}`explainability`!
diff --git a/docs/source/tutorials/multiclass_classification.md b/docs/source/tutorials/multiclass_classification.md
new file mode 100644
index 0000000..6db8dc1
--- /dev/null
+++ b/docs/source/tutorials/multiclass_classification.md
@@ -0,0 +1,459 @@
+# Multiclass Classification Tutorial
+
+Learn how to build a multiclass sentiment classifier with 3 classes: negative, neutral, and positive.
+
+## Learning Objectives
+
+By the end of this tutorial, you will be able to:
+
+- Handle multiclass classification problems (3+ classes)
+- Configure models for multiple output classes
+- Ensure reproducible results with proper seeding
+- Evaluate multiclass performance
+- Understand class distribution and balance
+
+## Prerequisites
+
+- Completion of {doc}`basic_classification` tutorial (recommended)
+- Basic understanding of classification
+- torchTextClassifiers installed
+
+## Overview
+
+In this tutorial, we'll build a **3-class sentiment classifier** that categorizes product reviews as:
+
+- **Negative** (class 0): Bad reviews
+- **Neutral** (class 1): Mixed or moderate reviews
+- **Positive** (class 2): Good reviews
+
+**Key Difference from Binary Classification:**
+
+- Binary: 2 classes (positive/negative)
+- Multiclass: 3+ classes (negative/neutral/positive)
+
+## Complete Code
+
+```python
+import os
+import numpy as np
+import torch
+from pytorch_lightning import seed_everything
+from torchTextClassifiers import ModelConfig, TrainingConfig, torchTextClassifiers
+from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+# Step 1: Set Seeds for Reproducibility
+SEED = 42
+os.environ['PYTHONHASHSEED'] = str(SEED)
+seed_everything(SEED, workers=True)
+torch.backends.cudnn.deterministic = True
+torch.use_deterministic_algorithms(True, warn_only=True)
+
+# Step 2: Prepare Multi-class Data
+X_train = np.array([
+ # Negative (class 0)
+ "This product is terrible and I hate it completely.",
+ "Worst purchase ever. Total waste of money.",
+ "Absolutely awful quality. Very disappointed.",
+ "Poor service and terrible product quality.",
+ "I regret buying this. Complete failure.",
+
+ # Neutral (class 1)
+ "The product is okay, nothing special though.",
+ "It works but could be better designed.",
+ "Average quality for the price point.",
+ "Not bad but not great either.",
+ "It's fine, meets basic expectations.",
+
+ # Positive (class 2)
+ "Excellent product! Highly recommended!",
+ "Amazing quality and great customer service.",
+ "Perfect! Exactly what I was looking for.",
+ "Outstanding value and excellent performance.",
+ "Love it! Will definitely buy again."
+])
+
+y_train = np.array([0, 0, 0, 0, 0, # negative
+ 1, 1, 1, 1, 1, # neutral
+ 2, 2, 2, 2, 2]) # positive
+
+# Validation data
+X_val = np.array([
+ "Bad quality, not recommended.",
+ "It's okay, does the job.",
+ "Great product, very satisfied!"
+])
+y_val = np.array([0, 1, 2])
+
+# Test data
+X_test = np.array([
+ "This is absolutely horrible!",
+ "It's an average product, nothing more.",
+ "Fantastic! Love every aspect of it!",
+ "Really poor design and quality.",
+ "Works well, good value for money.",
+ "Outstanding product with amazing features!"
+])
+y_test = np.array([0, 1, 2, 0, 1, 2])
+
+# Step 3: Create and Train Tokenizer
+tokenizer = WordPieceTokenizer(vocab_size=5000, output_dim=128)
+tokenizer.train(X_train.tolist())
+
+# Step 4: Configure Model for 3 Classes
+model_config = ModelConfig(
+ embedding_dim=64,
+ num_classes=3 # KEY: 3 classes for multiclass
+)
+
+# Step 5: Create Classifier
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config
+)
+
+# Step 6: Train Model
+training_config = TrainingConfig(
+ num_epochs=30,
+ batch_size=8,
+ lr=1e-3,
+ patience_early_stopping=7,
+ num_workers=0,
+ trainer_params={'deterministic': True}
+)
+
+classifier.train(
+ X_train, y_train,
+ X_val, y_val,
+ training_config=training_config,
+ verbose=True
+)
+
+# Step 7: Make Predictions
+result = classifier.predict(X_test)
+predictions = result["prediction"].squeeze().numpy()
+
+# Step 8: Evaluate
+accuracy = (predictions == y_test).mean()
+print(f"Test accuracy: {accuracy:.3f}")
+
+# Show results with class names
+class_names = ["Negative", "Neutral", "Positive"]
+for text, pred, true in zip(X_test, predictions, y_test):
+ predicted = class_names[pred]
+ actual = class_names[true]
+ status = "✅" if pred == true else "❌"
+ print(f"{status} Predicted: {predicted}, True: {actual}")
+ print(f" Text: {text}")
+```
+
+## Step-by-Step Walkthrough
+
+### Step 1: Ensuring Reproducibility
+
+For consistent results across runs, set seeds properly:
+
+```python
+SEED = 42
+os.environ['PYTHONHASHSEED'] = str(SEED)
+seed_everything(SEED, workers=True)
+torch.backends.cudnn.deterministic = True
+torch.use_deterministic_algorithms(True, warn_only=True)
+```
+
+**Why this matters:**
+
+- Makes experiments reproducible
+- Enables fair comparison of hyperparameters
+- Helps debug model behavior
+
+:::{tip}
+Always set seeds when reporting results or comparing models!
+:::
+
+### Step 2: Preparing Multiclass Data
+
+Unlike binary classification, you now have **3 classes**:
+
+```python
+y_train = np.array([0, 0, 0, 0, 0, # negative (class 0)
+ 1, 1, 1, 1, 1, # neutral (class 1)
+ 2, 2, 2, 2, 2]) # positive (class 2)
+```
+
+**Important:** Class labels should be:
+- **Integers**: 0, 1, 2, ... (not strings)
+- **Continuous**: Start from 0, no gaps (0, 1, 2 not 0, 2, 5)
+- **Balanced**: Ideally equal samples per class
+
+**Check class distribution:**
+
+```python
+print(f"Negative: {sum(y_train==0)}")
+print(f"Neutral: {sum(y_train==1)}")
+print(f"Positive: {sum(y_train==2)}")
+```
+
+Output:
+```
+Negative: 5
+Neutral: 5
+Positive: 5
+```
+
+:::{note}
+This example has perfectly balanced classes (5 samples each). Real datasets are often imbalanced.
+:::
+
+### Step 3-4: Model Configuration
+
+The **only** difference from binary classification:
+
+```python
+model_config = ModelConfig(
+ embedding_dim=64,
+ num_classes=3 # Change from 2 to 3
+)
+```
+
+**Under the hood:**
+
+- Binary: Uses 2 output neurons + CrossEntropyLoss
+- Multiclass: Uses 3 output neurons + CrossEntropyLoss
+- The loss function handles both automatically!
+
+### Step 5-6: Training
+
+Training is identical to binary classification:
+
+```python
+classifier.train(
+ X_train, y_train,
+ X_val, y_val,
+ training_config=training_config
+)
+```
+
+**Training process:**
+
+1. Forward pass: Text → Embeddings → Logits (3 values)
+2. Loss calculation: Compare logits to true labels
+3. Backward pass: Compute gradients
+4. Update weights: Optimizer step
+5. Repeat for each batch/epoch
+
+### Step 7: Making Predictions
+
+Predictions now return values in {0, 1, 2}:
+
+```python
+result = classifier.predict(X_test)
+predictions = result["prediction"].squeeze().numpy()
+# Example: [0, 1, 2, 0, 1, 2]
+```
+
+**Probability interpretation:**
+
+You can also get probabilities for each class:
+
+```python
+probabilities = result["confidence"].squeeze().numpy()
+# Shape: (num_samples, 3)
+# Each row sums to 1.0
+```
+
+### Step 8: Evaluation
+
+For multiclass, use class names for clarity:
+
+```python
+class_names = ["Negative", "Neutral", "Positive"]
+
+for pred, true in zip(predictions, y_test):
+ predicted_label = class_names[pred]
+ true_label = class_names[true]
+ print(f"Predicted: {predicted_label}, True: {true_label}")
+```
+
+**Output:**
+
+```
+✅ Predicted: Negative, True: Negative
+ Text: This is absolutely horrible!
+
+✅ Predicted: Neutral, True: Neutral
+ Text: It's an average product, nothing more.
+
+✅ Predicted: Positive, True: Positive
+ Text: Fantastic! Love every aspect of it!
+```
+
+## Advanced: Class Imbalance
+
+Real datasets often have unbalanced classes:
+
+```python
+# Imbalanced example
+y_train = [0]*100 + [1]*20 + [2]*10 # 100:20:10 ratio
+```
+
+**Solutions:**
+
+### 1. Class Weights
+
+Weight the loss function to penalize minority class errors more:
+
+```python
+from torch import nn
+
+# Calculate class weights
+class_counts = np.bincount(y_train)
+class_weights = 1.0 / class_counts
+class_weights = class_weights / class_weights.sum() # Normalize
+
+# Use weighted loss
+training_config = TrainingConfig(
+ ...
+ loss=nn.CrossEntropyLoss(weight=torch.FloatTensor(class_weights))
+)
+```
+
+### 2. Oversampling/Undersampling
+
+Balance the dataset before training:
+
+```python
+from sklearn.utils import resample
+
+# Oversample minority classes or undersample majority class
+# (Use before creating the classifier)
+```
+
+### 3. Data Augmentation
+
+Generate synthetic samples for minority classes.
+
+## Evaluation Metrics
+
+For multiclass problems, accuracy isn't enough. Use:
+
+### Confusion Matrix
+
+```python
+from sklearn.metrics import confusion_matrix, classification_report
+
+cm = confusion_matrix(y_test, predictions)
+print("Confusion Matrix:")
+print(cm)
+# Pred 0 Pred 1 Pred 2
+# True 0 [[ 2 0 0]
+# True 1 [ 0 2 0]
+# True 2 [ 0 0 2]]
+```
+
+### Classification Report
+
+```python
+report = classification_report(
+ y_test, predictions,
+ target_names=["Negative", "Neutral", "Positive"]
+)
+print(report)
+```
+
+**Output:**
+
+```
+ precision recall f1-score support
+
+ Negative 1.00 1.00 1.00 2
+ Neutral 1.00 1.00 1.00 2
+ Positive 1.00 1.00 1.00 2
+
+ accuracy 1.00 6
+ macro avg 1.00 1.00 1.00 6
+weighted avg 1.00 1.00 1.00 6
+```
+
+**Metrics explained:**
+
+- **Precision**: Of predicted class X, how many were correct?
+- **Recall**: Of true class X, how many did we find?
+- **F1-score**: Harmonic mean of precision and recall
+- **Support**: Number of samples in each class
+
+## Extending to More Classes
+
+For 5 classes (e.g., star ratings 1-5):
+
+```python
+# Data with 5 classes
+y_train = np.array([0, 1, 2, 3, 4, ...]) # 0=1-star, 4=5-star
+
+# Model configuration
+model_config = ModelConfig(
+ embedding_dim=64,
+ num_classes=5 # Change to 5
+)
+```
+
+The same code works for any number of classes!
+
+## Common Issues
+
+### Issue: Poor Performance on Middle Classes
+
+**Problem:** Neutral class has low accuracy
+
+**Solution:**
+
+1. Collect more neutral examples
+2. Make the distinction clearer in your data
+3. Consider if neutral is necessary (binary might be better)
+
+### Issue: Model Always Predicts One Class
+
+**Symptoms:** All predictions are class 0 or class 2
+
+**Solutions:**
+
+1. Check class balance - might be too imbalanced
+2. Verify labels are correct (0, 1, 2 not 1, 2, 3)
+3. Lower learning rate
+4. Train for more epochs
+
+### Issue: Overfitting
+
+**Symptoms:** High training accuracy, low test accuracy
+
+**Solutions:**
+
+1. Reduce `embedding_dim`
+2. Add more training data
+3. Use stronger early stopping (lower `patience`)
+
+## Next Steps
+
+Now that you understand multiclass classification:
+
+1. **Add categorical features**: Combine text with metadata
+2. **Try multilabel classification**: Multiple labels per sample
+3. **Use explainability**: See which words matter for each class
+4. **Explore advanced architectures**: Add attention mechanisms
+
+## Complete Working Example
+
+Find the full code in the repository:
+- [examples/multiclass_classification.py](https://github.com/InseeFrLab/torchTextClassifiers/blob/main/examples/multiclass_classification.py)
+
+## Summary
+
+In this tutorial, you learned:
+
+- ✅ How to set up multiclass classification (3+ classes)
+- ✅ How to configure `num_classes` correctly
+- ✅ How to ensure reproducible results with proper seeding
+- ✅ How to check and handle class distribution
+- ✅ How to evaluate multiclass models with confusion matrices
+- ✅ How to handle class imbalance
+
+You're now ready to tackle real-world multiclass problems!
diff --git a/docs/source/tutorials/multilabel_classification.md b/docs/source/tutorials/multilabel_classification.md
new file mode 100644
index 0000000..b9a876c
--- /dev/null
+++ b/docs/source/tutorials/multilabel_classification.md
@@ -0,0 +1,637 @@
+# Multilabel Classification
+
+Learn how to assign multiple labels to each text sample, enabling more complex classification scenarios.
+
+## Learning Objectives
+
+By the end of this tutorial, you'll be able to:
+
+- Understand multilabel vs. multiclass classification
+- Use both ragged-list and one-hot encoding approaches
+- Configure appropriate loss functions for multilabel tasks
+- Evaluate multilabel predictions
+- Handle variable numbers of labels per sample
+
+## Prerequisites
+
+- Completed {doc}`multiclass_classification` tutorial
+- Understanding of binary classification
+- Familiarity with numpy arrays
+
+## Multilabel vs. Multiclass
+
+### Multiclass Classification
+
+Each sample has **exactly one label** from multiple classes:
+
+```python
+texts = ["Sports article", "Tech news", "Business report"]
+labels = [0, 1, 2] # Each sample has ONE label
+```
+
+### Multilabel Classification
+
+Each sample can have **zero, one, or multiple labels**:
+
+```python
+texts = [
+ "Article about AI in healthcare", # Both Tech AND Health
+ "Sports news from Europe", # Both Sports AND Europe
+ "Local business report" # Just Business
+]
+
+# Multiple labels per sample
+labels = [
+ [1, 3], # Tech (1) + Health (3)
+ [0, 4], # Sports (0) + Europe (4)
+ [2] # Business (2) only
+]
+```
+
+### Real-World Use Cases
+
+✅ **Document tagging**: Article can have multiple topics
+✅ **Product categorization**: Product can belong to multiple categories
+✅ **Symptom detection**: Patient can have multiple symptoms
+✅ **Content moderation**: Content can violate multiple rules
+✅ **Multi-genre classification**: Movie can have multiple genres
+
+## Two Approaches to Multilabel
+
+### Approach 1: Ragged Lists
+
+Each sample has a **list of label indices**:
+
+```python
+labels = [
+ [0, 1, 5], # Sample has labels 0, 1, and 5
+ [0, 4], # Sample has labels 0 and 4
+ [1, 5], # Sample has labels 1 and 5
+]
+```
+
+**Pros:**
+- Natural representation
+- Easy to construct
+
+**Cons:**
+- Can't directly convert to numpy array
+- Variable-length lists
+- Forward pass a bit slower (we convert to one-hot encodings behind the scene, on the fly for each batch)
+
+### Approach 2: One-Hot Encoding (Recommended)
+
+Each sample has a **binary vector** (1 = label present, 0 = absent):
+
+```python
+labels = [
+ [1, 1, 0, 0, 0, 1], # Labels 0, 1, 5 present
+ [1, 0, 0, 0, 1, 0], # Labels 0, 4 present
+ [0, 1, 0, 0, 0, 1], # Labels 1, 5 present
+]
+```
+
+**Pros:**
+- Fixed-size numpy array
+- Can store probabilities (not just 0/1)
+
+**Cons:**
+- _Might_ require a bit more work on your end to have this format
+
+## Complete Example: Ragged Lists
+
+```python
+import numpy as np
+import torch
+from torchTextClassifiers import ModelConfig, TrainingConfig, torchTextClassifiers
+from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+# Sample data: Each text can have multiple labels
+texts = [
+ "This is a positive example",
+ "This is a negative example",
+ "Another positive case",
+ "Another negative case",
+ "Good example here",
+ "Bad example here"
+]
+
+# Ragged lists: Variable-length label lists
+labels = [
+ [0, 1, 5], # Has 3 labels
+ [0, 4], # Has 2 labels
+ [1, 5], # Has 2 labels
+ [0, 1, 4], # Has 3 labels
+ [1, 5], # Has 2 labels
+ [0] # Has 1 label
+]
+
+# Prepare data
+X = np.array(texts)
+y = np.array(labels, dtype=object) # dtype=object for ragged lists
+
+# Create tokenizer
+tokenizer = WordPieceTokenizer(vocab_size=1000)
+tokenizer.train(X.tolist())
+
+# Calculate number of classes
+num_classes = max(max(label_list) for label_list in labels) + 1
+
+# Configure model
+model_config = ModelConfig(
+ embedding_dim=96,
+ num_classes=num_classes
+)
+
+# IMPORTANT: Use BCEWithLogitsLoss for multilabel
+training_config = TrainingConfig(
+ lr=1e-3,
+ batch_size=4,
+ num_epochs=10,
+ loss=torch.nn.BCEWithLogitsLoss() # Multilabel loss
+)
+
+# Create classifier with ragged_multilabel=True
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config,
+ ragged_multilabel=True # Key parameter!
+)
+
+# Train
+classifier.train(
+ X_train=X,
+ y_train=y,
+ training_config=training_config
+)
+
+# Predict
+result = classifier.predict(X)
+predictions = result["prediction"]
+```
+
+## Complete Example: One-Hot Encoding
+
+```python
+import numpy as np
+import torch
+from torchTextClassifiers import ModelConfig, TrainingConfig, torchTextClassifiers
+from torchTextClassifiers.tokenizers import WordPieceTokenizer
+
+# Same texts
+texts = [
+ "This is a positive example",
+ "This is a negative example",
+ "Another positive case",
+ "Another negative case",
+ "Good example here",
+ "Bad example here"
+]
+
+# One-hot encoding: Binary vectors
+# 6 samples, 6 possible labels (0-5)
+labels = [
+ [1., 1., 0., 0., 0., 1.], # Labels 0, 1, 5 present
+ [1., 0., 0., 0., 1., 0.], # Labels 0, 4 present
+ [0., 1., 0., 0., 0., 1.], # Labels 1, 5 present
+ [1., 1., 0., 0., 1., 0.], # Labels 0, 1, 4 present
+ [0., 1., 0., 0., 0., 1.], # Labels 1, 5 present
+ [1., 0., 0., 0., 0., 0.] # Label 0 present
+]
+
+# Prepare data
+X = np.array(texts)
+y = np.array(labels) # Can convert to numpy array now!
+
+# Create tokenizer
+tokenizer = WordPieceTokenizer(vocab_size=1000)
+tokenizer.train(X.tolist())
+
+# Configure model
+num_classes = y.shape[1] # Number of columns
+
+model_config = ModelConfig(
+ embedding_dim=96,
+ num_classes=num_classes
+)
+
+training_config = TrainingConfig(
+ lr=1e-3,
+ batch_size=4,
+ num_epochs=10,
+ loss=torch.nn.BCEWithLogitsLoss()
+)
+
+# Create classifier with ragged_multilabel=False (default)
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config,
+ ragged_multilabel=False # or omit (default is False)
+)
+
+# Train
+classifier.train(
+ X_train=X,
+ y_train=y,
+ training_config=training_config
+)
+
+# Predict
+result = classifier.predict(X)
+predictions = result["prediction"]
+```
+
+## Step-by-Step Walkthrough
+
+### 1. Choose Your Approach
+
+**Use Ragged Lists if:**
+- Data is naturally in list format...
+- ... and it wouldbe too costly to one-hot encode them
+
+**Use One-Hot if:**
+- You want more efficiency
+- You want to store probabilities
+
+### 2. Prepare Labels
+
+#### Ragged Lists
+
+```python
+# List of lists (variable length)
+labels = [[0, 1], [1, 2, 3], [0]]
+```
+
+#### One-Hot Encoding
+
+```python
+# Manual creation
+labels = [
+ [1, 0, 0, 0], # Label 0
+ [0, 1, 1, 1], # Labels 1, 2, 3
+ [1, 0, 0, 0] # Label 0
+]
+
+# Or convert from ragged lists
+from sklearn.preprocessing import MultiLabelBinarizer
+
+ragged_labels = [[0, 1], [1, 2, 3], [0]]
+mlb = MultiLabelBinarizer()
+one_hot_labels = mlb.fit_transform(ragged_labels)
+```
+
+### 3. Configure Loss Function
+
+**We recommend to use `BCEWithLogitsLoss` for multilabel:**
+
+```python
+import torch
+
+training_config = TrainingConfig(
+ # ... other params ...
+ loss=torch.nn.BCEWithLogitsLoss()
+)
+```
+
+**Why not CrossEntropyLoss?**
+- `CrossEntropyLoss`: Classes compete (only one can win)
+- `BCEWithLogitsLoss`: Each label is independent binary decision
+
+### 4. Set ragged_multilabel Flag
+
+```python
+# For ragged lists
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config,
+ ragged_multilabel=True # Must be True
+)
+
+# For one-hot encoding
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config,
+ ragged_multilabel=False # Must be False (default)
+)
+```
+
+**Warning:** Setting wrong flag leads to incorrect behavior!
+
+### 5. Understanding Predictions
+
+#### Ragged Lists
+
+Predictions are **probability scores** for each label:
+
+```python
+result = classifier.predict(X_test)
+predictions = result["prediction"] # Shape: (n_samples, n_classes)
+
+# For each sample, check which labels are active
+threshold = 0.5
+for i, pred in enumerate(predictions):
+ active_labels = [j for j, prob in enumerate(pred) if prob > threshold]
+ print(f"Sample {i}: {active_labels}")
+```
+
+#### One-Hot Encoding
+
+Same format - probabilities for each label:
+
+```python
+predictions = result["prediction"] # Shape: (n_samples, n_classes)
+
+# Apply threshold
+predicted_labels = (predictions > 0.5).astype(int)
+```
+
+## Evaluation Metrics
+
+### Exact Match Accuracy
+
+All labels must match exactly:
+
+```python
+def exact_match_accuracy(y_true, y_pred, threshold=0.5):
+ """Calculate exact match accuracy."""
+ y_pred_binary = (y_pred > threshold).astype(int)
+
+ # Check if each sample matches exactly
+ matches = np.all(y_pred_binary == y_true, axis=1)
+ return matches.mean()
+
+accuracy = exact_match_accuracy(y_test, predictions)
+print(f"Exact Match Accuracy: {accuracy:.3f}")
+```
+
+### Hamming Loss
+
+Average per-label error:
+
+```python
+from sklearn.metrics import hamming_loss
+
+# Convert predictions to binary
+y_pred_binary = (predictions > 0.5).astype(int)
+
+loss = hamming_loss(y_test, y_pred_binary)
+print(f"Hamming Loss: {loss:.3f}") # Lower is better
+```
+
+### F1 Score
+
+Harmonic mean of precision and recall:
+
+```python
+from sklearn.metrics import f1_score
+
+# Micro: Calculate globally
+f1_micro = f1_score(y_test, y_pred_binary, average='micro')
+
+# Macro: Average per label
+f1_macro = f1_score(y_test, y_pred_binary, average='macro')
+
+# Weighted: Weighted by support
+f1_weighted = f1_score(y_test, y_pred_binary, average='weighted')
+
+print(f"F1 Micro: {f1_micro:.3f}")
+print(f"F1 Macro: {f1_macro:.3f}")
+print(f"F1 Weighted: {f1_weighted:.3f}")
+```
+
+### Subset Accuracy
+
+Same as exact match accuracy:
+
+```python
+from sklearn.metrics import accuracy_score
+
+subset_acc = accuracy_score(y_test, y_pred_binary)
+print(f"Subset Accuracy: {subset_acc:.3f}")
+```
+
+## Real-World Example: Document Tagging
+
+```python
+import numpy as np
+from sklearn.model_selection import train_test_split
+from sklearn.metrics import classification_report
+
+# Document tagging dataset
+texts = [
+ "Python tutorial for machine learning",
+ "Introduction to neural networks",
+ "Web development with JavaScript",
+ "Data visualization with Python",
+ "Deep learning research paper",
+ "Building REST APIs in Python"
+]
+
+# Labels: 0=Programming, 1=AI/ML, 2=Web, 3=Data, 4=Research
+labels = [
+ [0, 1], # Programming + AI/ML
+ [1, 4], # AI/ML + Research
+ [0, 2], # Programming + Web
+ [0, 3], # Programming + Data
+ [1, 4], # AI/ML + Research
+ [0, 2] # Programming + Web
+]
+
+# Prepare data
+X = np.array(texts)
+y = np.array(labels, dtype=object)
+
+# Split
+X_train, X_test, y_train, y_test = train_test_split(
+ X, y, test_size=0.33, random_state=42
+)
+
+# Train model
+tokenizer = WordPieceTokenizer(vocab_size=1000)
+tokenizer.train(X_train.tolist())
+
+num_classes = 5
+
+model_config = ModelConfig(
+ embedding_dim=64,
+ num_classes=num_classes
+)
+
+training_config = TrainingConfig(
+ lr=1e-3,
+ batch_size=2,
+ num_epochs=50,
+ loss=torch.nn.BCEWithLogitsLoss()
+)
+
+classifier = torchTextClassifiers(
+ tokenizer=tokenizer,
+ model_config=model_config,
+ ragged_multilabel=True
+)
+
+classifier.train(X_train, y_train, training_config=training_config)
+
+# Predict and evaluate
+result = classifier.predict(X_test)
+predictions = result["prediction"]
+
+# Convert to binary predictions
+y_pred_binary = (predictions > 0.5).astype(int)
+
+# Convert ragged y_test to one-hot for evaluation
+from sklearn.preprocessing import MultiLabelBinarizer
+mlb = MultiLabelBinarizer(classes=range(num_classes))
+y_test_binary = mlb.fit_transform(y_test)
+
+# Evaluate
+from sklearn.metrics import classification_report
+
+label_names = ['Programming', 'AI/ML', 'Web', 'Data', 'Research']
+print(classification_report(
+ y_test_binary,
+ y_pred_binary,
+ target_names=label_names
+))
+```
+
+## Common Issues
+
+### Issue 1: Wrong ragged_multilabel Setting
+
+**Error:** Model trains but predictions are incorrect
+
+**Solution:** Ensure flag matches your data format:
+```python
+# Ragged lists → ragged_multilabel=True
+# One-hot → ragged_multilabel=False
+```
+
+### Issue 2: Using CrossEntropyLoss
+
+**Problem:** Model doesn't learn properly
+
+**Solution:** Always use `BCEWithLogitsLoss`:
+```python
+training_config = TrainingConfig(
+ loss=torch.nn.BCEWithLogitsLoss()
+)
+```
+
+### Issue 3: Shape Mismatch
+
+**Error:** "Expected 2D array for labels"
+
+**Solution:** For ragged lists, use `dtype=object`:
+```python
+y = np.array(labels, dtype=object)
+```
+
+### Issue 4: All Predictions Same
+
+**Possible causes:**
+- Not enough training data
+- Learning rate too high/low
+- Class imbalance
+
+**Try:**
+- Increase training epochs
+- Adjust learning rate
+- Check label distribution
+
+## Customization
+
+### Custom Threshold
+
+Adjust sensitivity vs. precision:
+
+```python
+# Conservative (higher precision)
+threshold = 0.7
+predicted_labels = (predictions > threshold).astype(int)
+
+# Aggressive (higher recall)
+threshold = 0.3
+predicted_labels = (predictions > threshold).astype(int)
+```
+
+### Class Weights
+
+Handle imbalanced labels:
+
+```python
+# Calculate class weights
+from sklearn.utils.class_weight import compute_class_weight
+
+# For one-hot labels
+class_weights = compute_class_weight(
+ 'balanced',
+ classes=np.arange(num_classes),
+ y=y_train.argmax(axis=1) # Works for imbalanced data
+)
+
+# Use in loss (requires custom loss function)
+```
+
+### With Attention
+
+For long documents:
+
+```python
+from torchTextClassifiers.model.components import AttentionConfig
+
+attention_config = AttentionConfig(
+ n_embd=128,
+ n_head=8,
+ n_layer=3
+)
+
+model_config = ModelConfig(
+ embedding_dim=128,
+ num_classes=num_classes,
+ attention_config=attention_config
+)
+```
+
+## Advanced: Probabilistic Labels
+
+One-hot encoding supports probabilities:
+
+```python
+# Soft labels (not just 0 or 1)
+labels = [
+ [0.9, 0.8, 0.1, 0.0, 0.0, 0.7], # Confident in 0,1,5
+ [0.6, 0.0, 0.0, 0.0, 0.5, 0.0], # Less confident in 0,4
+]
+
+y = np.array(labels) # Probabilities between 0 and 1
+
+# Use same setup, BCEWithLogitsLoss handles probabilities
+```
+
+## Best Practices
+
+1. **Choose the right approach:** Ragged lists for most cases, one-hot for probabilities
+2. **Always use BCEWithLogitsLoss:** Essential for multilabel
+3. **Set ragged_multilabel correctly:** Matches your data format
+4. **Use appropriate metrics:** F1, Hamming loss better than accuracy
+5. **Tune threshold:** Balance precision vs. recall for your use case
+6. **Handle imbalance:** Common in multilabel - consider class weights
+
+## Summary
+
+**Key takeaways:**
+- Multilabel: Each sample can have multiple labels
+- Two approaches: Ragged lists (recommended) or one-hot encoding
+- Always use `BCEWithLogitsLoss` for multilabel tasks
+- Set `ragged_multilabel=True` for ragged lists
+- Evaluate with F1, Hamming loss, or exact match accuracy
+
+Ready to combine everything? Try adding categorical features to multilabel classification, or use explainability to understand multilabel predictions!
+
+## Next Steps
+
+- **Mixed features**: Combine multilabel with categorical features
+- **Explainability**: Understand which words trigger which labels
+- **API Reference**: See {doc}`../api/index` for detailed documentation
+
diff --git a/pyproject.toml b/pyproject.toml
index c01919b..603e41e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -35,13 +35,17 @@ dev = [
"ipywidgets>=8.1.8",
]
docs = [
- "sphinx>=5.0.0",
- "sphinx-rtd-theme>=1.2.0",
- "sphinx-autodoc-typehints>=1.19.0",
- "sphinxcontrib-napoleon>=0.7",
+ "sphinx>=8.1.0",
+ "pydata-sphinx-theme>=0.16.0",
+ "sphinx-autodoc-typehints>=2.0.0",
"sphinx-copybutton>=0.5.0",
- "myst-parser>=0.18.0",
- "sphinx-design>=0.3.0"
+ "myst-parser>=4.0.0",
+ "sphinx-design>=0.6.0",
+ "nbsphinx>=0.9.0",
+ "ipython>=8.0.0",
+ "pandoc>=2.0.0",
+ "linkify-it-py>=2.0.0",
+ "sphinxcontrib-images>=1.0.1"
]
[project.optional-dependencies]
diff --git a/uv.lock b/uv.lock
index b552fa4..47568ac 100644
--- a/uv.lock
+++ b/uv.lock
@@ -6,6 +6,18 @@ resolution-markers = [
"python_full_version < '3.12'",
]
+[[package]]
+name = "accessible-pygments"
+version = "0.0.5"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pygments" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/bc/c1/bbac6a50d02774f91572938964c582fff4270eee73ab822a4aeea4d8b11b/accessible_pygments-0.0.5.tar.gz", hash = "sha256:40918d3e6a2b619ad424cb91e556bd3bd8865443d9f22f1dcdf79e33c8046872", size = 1377899, upload-time = "2024-05-10T11:23:10.216Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/8d/3f/95338030883d8c8b91223b4e21744b04d11b161a3ef117295d8241f50ab4/accessible_pygments-0.0.5-py3-none-any.whl", hash = "sha256:88ae3211e68a1d0b011504b2ffc1691feafce124b845bd072ab6f9f66f34d4b7", size = 1395903, upload-time = "2024-05-10T11:23:08.421Z" },
+]
+
[[package]]
name = "aiohappyeyeballs"
version = "2.6.1"
@@ -145,6 +157,36 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" },
]
+[[package]]
+name = "beautifulsoup4"
+version = "4.14.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "soupsieve" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/77/e9/df2358efd7659577435e2177bfa69cba6c33216681af51a707193dec162a/beautifulsoup4-4.14.2.tar.gz", hash = "sha256:2a98ab9f944a11acee9cc848508ec28d9228abfd522ef0fad6a02a72e0ded69e", size = 625822, upload-time = "2025-09-29T10:05:42.613Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/94/fe/3aed5d0be4d404d12d36ab97e2f1791424d9ca39c2f754a6285d59a3b01d/beautifulsoup4-4.14.2-py3-none-any.whl", hash = "sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515", size = 106392, upload-time = "2025-09-29T10:05:43.771Z" },
+]
+
+[[package]]
+name = "bleach"
+version = "6.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "webencodings" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/07/18/3c8523962314be6bf4c8989c79ad9531c825210dd13a8669f6b84336e8bd/bleach-6.3.0.tar.gz", hash = "sha256:6f3b91b1c0a02bb9a78b5a454c92506aa0fdf197e1d5e114d2e00c6f64306d22", size = 203533, upload-time = "2025-10-27T17:57:39.211Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cd/3a/577b549de0cc09d95f11087ee63c739bba856cd3952697eec4c4bb91350a/bleach-6.3.0-py3-none-any.whl", hash = "sha256:fe10ec77c93ddf3d13a73b035abaac7a9f5e436513864ccdad516693213c65d6", size = 164437, upload-time = "2025-10-27T17:57:37.538Z" },
+]
+
+[package.optional-dependencies]
+css = [
+ { name = "tinycss2" },
+]
+
[[package]]
name = "captum"
version = "0.7.0"
@@ -169,6 +211,76 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" },
]
+[[package]]
+name = "cffi"
+version = "2.0.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pycparser", marker = "implementation_name != 'PyPy'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/12/4a/3dfd5f7850cbf0d06dc84ba9aa00db766b52ca38d8b86e3a38314d52498c/cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe", size = 184344, upload-time = "2025-09-08T23:22:26.456Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/8b/f0e4c441227ba756aafbe78f117485b25bb26b1c059d01f137fa6d14896b/cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c", size = 180560, upload-time = "2025-09-08T23:22:28.197Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/b7/1200d354378ef52ec227395d95c2576330fd22a869f7a70e88e1447eb234/cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92", size = 209613, upload-time = "2025-09-08T23:22:29.475Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/56/6033f5e86e8cc9bb629f0077ba71679508bdf54a9a5e112a3c0b91870332/cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93", size = 216476, upload-time = "2025-09-08T23:22:31.063Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/7f/55fecd70f7ece178db2f26128ec41430d8720f2d12ca97bf8f0a628207d5/cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5", size = 203374, upload-time = "2025-09-08T23:22:32.507Z" },
+ { url = "https://files.pythonhosted.org/packages/84/ef/a7b77c8bdc0f77adc3b46888f1ad54be8f3b7821697a7b89126e829e676a/cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664", size = 202597, upload-time = "2025-09-08T23:22:34.132Z" },
+ { url = "https://files.pythonhosted.org/packages/d7/91/500d892b2bf36529a75b77958edfcd5ad8e2ce4064ce2ecfeab2125d72d1/cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26", size = 215574, upload-time = "2025-09-08T23:22:35.443Z" },
+ { url = "https://files.pythonhosted.org/packages/44/64/58f6255b62b101093d5df22dcb752596066c7e89dd725e0afaed242a61be/cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9", size = 218971, upload-time = "2025-09-08T23:22:36.805Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/49/fa72cebe2fd8a55fbe14956f9970fe8eb1ac59e5df042f603ef7c8ba0adc/cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414", size = 211972, upload-time = "2025-09-08T23:22:38.436Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/28/dd0967a76aab36731b6ebfe64dec4e981aff7e0608f60c2d46b46982607d/cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743", size = 217078, upload-time = "2025-09-08T23:22:39.776Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" },
+ { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" },
+ { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" },
+ { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" },
+ { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" },
+ { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" },
+ { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" },
+ { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" },
+ { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" },
+ { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" },
+ { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" },
+ { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" },
+ { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" },
+ { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" },
+ { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" },
+ { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" },
+]
+
[[package]]
name = "cfgv"
version = "3.4.0"
@@ -353,6 +465,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" },
]
+[[package]]
+name = "defusedxml"
+version = "0.7.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0f/d5/c66da9b79e5bdb124974bfe172b4daf3c984ebd9c2a06e2b8a4dc7331c72/defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69", size = 75520, upload-time = "2021-03-08T10:59:26.269Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/07/6c/aa3f2f849e01cb6a001cd8554a88d4c77c5c1a31c95bdf1cf9301e6d9ef4/defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61", size = 25604, upload-time = "2021-03-08T10:59:24.45Z" },
+]
+
[[package]]
name = "dill"
version = "0.4.0"
@@ -389,6 +510,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" },
]
+[[package]]
+name = "fastjsonschema"
+version = "2.21.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/20/b5/23b216d9d985a956623b6bd12d4086b60f0059b27799f23016af04a74ea1/fastjsonschema-2.21.2.tar.gz", hash = "sha256:b1eb43748041c880796cd077f1a07c3d94e93ae84bba5ed36800a33554ae05de", size = 374130, upload-time = "2025-08-14T18:49:36.666Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cb/a8/20d0723294217e47de6d9e2e40fd4a9d2f7c4b6ef974babd482a59743694/fastjsonschema-2.21.2-py3-none-any.whl", hash = "sha256:1c797122d0a86c5cace2e54bf4e819c36223b552017172f32c5c024a6b77e463", size = 24024, upload-time = "2025-08-14T18:49:34.776Z" },
+]
+
[[package]]
name = "filelock"
version = "3.18.0"
@@ -712,6 +842,71 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/7d/4f/1195bbac8e0c2acc5f740661631d8d750dc38d4a32b23ee5df3cde6f4e0d/joblib-1.5.1-py3-none-any.whl", hash = "sha256:4719a31f054c7d766948dcd83e9613686b27114f190f717cec7eaa2084f8a74a", size = 307746, upload-time = "2025-05-23T12:04:35.124Z" },
]
+[[package]]
+name = "jsonschema"
+version = "4.25.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "attrs" },
+ { name = "jsonschema-specifications" },
+ { name = "referencing" },
+ { name = "rpds-py" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" },
+]
+
+[[package]]
+name = "jsonschema-specifications"
+version = "2025.9.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "referencing" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" },
+]
+
+[[package]]
+name = "jupyter-client"
+version = "8.6.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jupyter-core" },
+ { name = "python-dateutil" },
+ { name = "pyzmq" },
+ { name = "tornado" },
+ { name = "traitlets" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/71/22/bf9f12fdaeae18019a468b68952a60fe6dbab5d67cd2a103cac7659b41ca/jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419", size = 342019, upload-time = "2024-09-17T10:44:17.613Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/11/85/b0394e0b6fcccd2c1eeefc230978a6f8cb0c5df1e4cd3e7625735a0d7d1e/jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f", size = 106105, upload-time = "2024-09-17T10:44:15.218Z" },
+]
+
+[[package]]
+name = "jupyter-core"
+version = "5.9.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "platformdirs" },
+ { name = "traitlets" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/02/49/9d1284d0dc65e2c757b74c6687b6d319b02f822ad039e5c512df9194d9dd/jupyter_core-5.9.1.tar.gz", hash = "sha256:4d09aaff303b9566c3ce657f580bd089ff5c91f5f89cf7d8846c3cdf465b5508", size = 89814, upload-time = "2025-10-16T19:19:18.444Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e7/e7/80988e32bf6f73919a113473a604f5a8f09094de312b9d52b79c2df7612b/jupyter_core-5.9.1-py3-none-any.whl", hash = "sha256:ebf87fdc6073d142e114c72c9e29a9d7ca03fad818c5d300ce2adc1fb0743407", size = 29032, upload-time = "2025-10-16T19:19:16.783Z" },
+]
+
+[[package]]
+name = "jupyterlab-pygments"
+version = "0.3.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/90/51/9187be60d989df97f5f0aba133fa54e7300f17616e065d1ada7d7646b6d6/jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d", size = 512900, upload-time = "2023-11-23T09:26:37.44Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b1/dd/ead9d8ea85bf202d90cc513b533f9c363121c7792674f78e0d8a854b63b4/jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780", size = 15884, upload-time = "2023-11-23T09:26:34.325Z" },
+]
+
[[package]]
name = "jupyterlab-widgets"
version = "3.0.16"
@@ -801,6 +996,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/1a/c1/31b3184cba7b257a4a3b5ca5b88b9204ccb7aa02fe3c992280899293ed54/lightning_utilities-0.14.3-py3-none-any.whl", hash = "sha256:4ab9066aa36cd7b93a05713808901909e96cc3f187ea6fd3052b2fd91313b468", size = 28894, upload-time = "2025-04-03T15:59:55.658Z" },
]
+[[package]]
+name = "linkify-it-py"
+version = "2.0.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "uc-micro-py" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/2a/ae/bb56c6828e4797ba5a4821eec7c43b8bf40f69cda4d4f5f8c8a2810ec96a/linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048", size = 27946, upload-time = "2024-02-04T14:48:04.179Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/1e/b832de447dee8b582cac175871d2f6c3d5077cc56d5575cadba1fd1cccfa/linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79", size = 19820, upload-time = "2024-02-04T14:48:02.496Z" },
+]
+
[[package]]
name = "markdown-it-py"
version = "3.0.0"
@@ -937,6 +1144,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" },
]
+[[package]]
+name = "mistune"
+version = "3.1.4"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d7/02/a7fb8b21d4d55ac93cdcde9d3638da5dd0ebdd3a4fed76c7725e10b81cbe/mistune-3.1.4.tar.gz", hash = "sha256:b5a7f801d389f724ec702840c11d8fc48f2b33519102fc7ee739e8177b672164", size = 94588, upload-time = "2025-08-29T07:20:43.594Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7a/f0/8282d9641415e9e33df173516226b404d367a0fc55e1a60424a152913abc/mistune-3.1.4-py3-none-any.whl", hash = "sha256:93691da911e5d9d2e23bc54472892aff676df27a75274962ff9edc210364266d", size = 53481, upload-time = "2025-08-29T07:20:42.218Z" },
+]
+
[[package]]
name = "mpmath"
version = "1.3.0"
@@ -1056,6 +1272,78 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/5f/df/76d0321c3797b54b60fef9ec3bd6f4cfd124b9e422182156a1dd418722cf/myst_parser-4.0.1-py3-none-any.whl", hash = "sha256:9134e88959ec3b5780aedf8a99680ea242869d012e8821db3126d427edc9c95d", size = 84579, upload-time = "2025-02-12T10:53:02.078Z" },
]
+[[package]]
+name = "nbclient"
+version = "0.10.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jupyter-client" },
+ { name = "jupyter-core" },
+ { name = "nbformat" },
+ { name = "traitlets" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/87/66/7ffd18d58eae90d5721f9f39212327695b749e23ad44b3881744eaf4d9e8/nbclient-0.10.2.tar.gz", hash = "sha256:90b7fc6b810630db87a6d0c2250b1f0ab4cf4d3c27a299b0cde78a4ed3fd9193", size = 62424, upload-time = "2024-12-19T10:32:27.164Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/34/6d/e7fa07f03a4a7b221d94b4d586edb754a9b0dc3c9e2c93353e9fa4e0d117/nbclient-0.10.2-py3-none-any.whl", hash = "sha256:4ffee11e788b4a27fabeb7955547e4318a5298f34342a4bfd01f2e1faaeadc3d", size = 25434, upload-time = "2024-12-19T10:32:24.139Z" },
+]
+
+[[package]]
+name = "nbconvert"
+version = "7.16.6"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "beautifulsoup4" },
+ { name = "bleach", extra = ["css"] },
+ { name = "defusedxml" },
+ { name = "jinja2" },
+ { name = "jupyter-core" },
+ { name = "jupyterlab-pygments" },
+ { name = "markupsafe" },
+ { name = "mistune" },
+ { name = "nbclient" },
+ { name = "nbformat" },
+ { name = "packaging" },
+ { name = "pandocfilters" },
+ { name = "pygments" },
+ { name = "traitlets" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a3/59/f28e15fc47ffb73af68a8d9b47367a8630d76e97ae85ad18271b9db96fdf/nbconvert-7.16.6.tar.gz", hash = "sha256:576a7e37c6480da7b8465eefa66c17844243816ce1ccc372633c6b71c3c0f582", size = 857715, upload-time = "2025-01-28T09:29:14.724Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cc/9a/cd673b2f773a12c992f41309ef81b99da1690426bd2f96957a7ade0d3ed7/nbconvert-7.16.6-py3-none-any.whl", hash = "sha256:1375a7b67e0c2883678c48e506dc320febb57685e5ee67faa51b18a90f3a712b", size = 258525, upload-time = "2025-01-28T09:29:12.551Z" },
+]
+
+[[package]]
+name = "nbformat"
+version = "5.10.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "fastjsonschema" },
+ { name = "jsonschema" },
+ { name = "jupyter-core" },
+ { name = "traitlets" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/6d/fd/91545e604bc3dad7dca9ed03284086039b294c6b3d75c0d2fa45f9e9caf3/nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a", size = 142749, upload-time = "2024-04-04T11:20:37.371Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a9/82/0340caa499416c78e5d8f5f05947ae4bc3cba53c9f038ab6e9ed964e22f1/nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b", size = 78454, upload-time = "2024-04-04T11:20:34.895Z" },
+]
+
+[[package]]
+name = "nbsphinx"
+version = "0.9.7"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "docutils" },
+ { name = "jinja2" },
+ { name = "nbconvert" },
+ { name = "nbformat" },
+ { name = "sphinx" },
+ { name = "traitlets" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/1e/84/b1856b7651ac34e965aa567a158714c7f3bd42a1b1ce76bf423ffb99872c/nbsphinx-0.9.7.tar.gz", hash = "sha256:abd298a686d55fa894ef697c51d44f24e53aa312dadae38e82920f250a5456fe", size = 180479, upload-time = "2025-03-03T19:46:08.069Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/49/2d/8c8e635bcc6757573d311bb3c5445426382f280da32b8cd6d82d501ef4a4/nbsphinx-0.9.7-py3-none-any.whl", hash = "sha256:7292c3767fea29e405c60743eee5393682a83982ab202ff98f5eb2db02629da8", size = 31660, upload-time = "2025-03-03T19:46:06.581Z" },
+]
+
[[package]]
name = "networkx"
version = "3.4.2"
@@ -1320,6 +1608,25 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436, upload-time = "2024-09-20T13:09:48.112Z" },
]
+[[package]]
+name = "pandoc"
+version = "2.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "plumbum" },
+ { name = "ply" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/10/9a/e3186e760c57ee5f1c27ea5cea577a0ff9abfca51eefcb4d9a4cd39aff2e/pandoc-2.4.tar.gz", hash = "sha256:ecd1f8cbb7f4180c6b5db4a17a7c1a74df519995f5f186ef81ce72a9cbd0dd9a", size = 34635, upload-time = "2024-08-07T14:33:58.016Z" }
+
+[[package]]
+name = "pandocfilters"
+version = "1.5.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/70/6f/3dd4940bbe001c06a65f88e36bad298bc7a0de5036115639926b0c5c0458/pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e", size = 8454, upload-time = "2024-01-18T20:08:13.726Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ef/af/4fbc8cab944db5d21b7e2a5b8e9211a03a79852b1157e2c102fcc61ac440/pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc", size = 8663, upload-time = "2024-01-18T20:08:11.28Z" },
+]
+
[[package]]
name = "parso"
version = "0.8.5"
@@ -1419,15 +1726,24 @@ wheels = [
]
[[package]]
-name = "pockets"
-version = "0.9.1"
+name = "plumbum"
+version = "1.10.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
- { name = "six" },
+ { name = "pywin32", marker = "platform_python_implementation != 'PyPy' and sys_platform == 'win32'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/df/8e/0601097cfcce2e8c2297db5080e9719f549c2bd4b94420ddc8d3f848bbca/pockets-0.9.1.tar.gz", hash = "sha256:9320f1a3c6f7a9133fe3b571f283bcf3353cd70249025ae8d618e40e9f7e92b3", size = 24993, upload-time = "2019-11-02T14:46:19.433Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/dc/c8/11a5f792704b70f071a3dbc329105a98e9cc8d25daaf09f733c44eb0ef8e/plumbum-1.10.0.tar.gz", hash = "sha256:f8cbf0ecec0b73ff4e349398b65112a9e3f9300e7dc019001217dcc148d5c97c", size = 320039, upload-time = "2025-10-31T05:02:48.697Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/79/ad/45312df6b63ba64ea35b8d8f5f0c577aac16e6b416eafe8e1cb34e03f9a7/plumbum-1.10.0-py3-none-any.whl", hash = "sha256:9583d737ac901c474d99d030e4d5eec4c4e6d2d7417b1cf49728cf3be34f6dc8", size = 127383, upload-time = "2025-10-31T05:02:47.002Z" },
+]
+
+[[package]]
+name = "ply"
+version = "3.11"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e5/69/882ee5c9d017149285cab114ebeab373308ef0f874fcdac9beb90e0ac4da/ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3", size = 159130, upload-time = "2018-02-15T19:01:31.097Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/e9/2f/a4583c70fbd8cd04910e2884bcc2bdd670e884061f7b4d70bc13e632a993/pockets-0.9.1-py2.py3-none-any.whl", hash = "sha256:68597934193c08a08eb2bf6a1d85593f627c22f9b065cc727a4f03f669d96d86", size = 26263, upload-time = "2019-11-02T14:46:17.814Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/58/35da89ee790598a0700ea49b2a66594140f44dec458c07e8e3d4979137fc/ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce", size = 49567, upload-time = "2018-02-15T19:01:27.172Z" },
]
[[package]]
@@ -1599,6 +1915,33 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/7b/03/f335d6c52b4a4761bcc83499789a1e2e16d9d201a58c327a9b5cc9a41bd9/pyarrow-22.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0c34fe18094686194f204a3b1787a27456897d8a2d62caf84b61e8dfbc0252ae", size = 29185594, upload-time = "2025-10-24T10:09:53.111Z" },
]
+[[package]]
+name = "pycparser"
+version = "2.23"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" },
+]
+
+[[package]]
+name = "pydata-sphinx-theme"
+version = "0.16.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "accessible-pygments" },
+ { name = "babel" },
+ { name = "beautifulsoup4" },
+ { name = "docutils" },
+ { name = "pygments" },
+ { name = "sphinx" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/00/20/bb50f9de3a6de69e6abd6b087b52fa2418a0418b19597601605f855ad044/pydata_sphinx_theme-0.16.1.tar.gz", hash = "sha256:a08b7f0b7f70387219dc659bff0893a7554d5eb39b59d3b8ef37b8401b7642d7", size = 2412693, upload-time = "2024-12-17T10:53:39.537Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e2/0d/8ba33fa83a7dcde13eb3c1c2a0c1cc29950a048bfed6d9b0d8b6bd710b4c/pydata_sphinx_theme-0.16.1-py3-none-any.whl", hash = "sha256:225331e8ac4b32682c18fcac5a57a6f717c4e632cea5dd0e247b55155faeccde", size = 6723264, upload-time = "2024-12-17T10:53:35.645Z" },
+]
+
[[package]]
name = "pygments"
version = "2.19.2"
@@ -1673,6 +2016,25 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" },
]
+[[package]]
+name = "pywin32"
+version = "311"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7c/af/449a6a91e5d6db51420875c54f6aff7c97a86a3b13a0b4f1a5c13b988de3/pywin32-311-cp311-cp311-win32.whl", hash = "sha256:184eb5e436dea364dcd3d2316d577d625c0351bf237c4e9a5fabbcfa5a58b151", size = 8697031, upload-time = "2025-07-14T20:13:13.266Z" },
+ { url = "https://files.pythonhosted.org/packages/51/8f/9bb81dd5bb77d22243d33c8397f09377056d5c687aa6d4042bea7fbf8364/pywin32-311-cp311-cp311-win_amd64.whl", hash = "sha256:3ce80b34b22b17ccbd937a6e78e7225d80c52f5ab9940fe0506a1a16f3dab503", size = 9508308, upload-time = "2025-07-14T20:13:15.147Z" },
+ { url = "https://files.pythonhosted.org/packages/44/7b/9c2ab54f74a138c491aba1b1cd0795ba61f144c711daea84a88b63dc0f6c/pywin32-311-cp311-cp311-win_arm64.whl", hash = "sha256:a733f1388e1a842abb67ffa8e7aad0e70ac519e09b0f6a784e65a136ec7cefd2", size = 8703930, upload-time = "2025-07-14T20:13:16.945Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/ab/01ea1943d4eba0f850c3c61e78e8dd59757ff815ff3ccd0a84de5f541f42/pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31", size = 8706543, upload-time = "2025-07-14T20:13:20.765Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/a8/a0e8d07d4d051ec7502cd58b291ec98dcc0c3fff027caad0470b72cfcc2f/pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067", size = 9495040, upload-time = "2025-07-14T20:13:22.543Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/3a/2ae996277b4b50f17d61f0603efd8253cb2d79cc7ae159468007b586396d/pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852", size = 8710102, upload-time = "2025-07-14T20:13:24.682Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/be/3fd5de0979fcb3994bfee0d65ed8ca9506a8a1260651b86174f6a86f52b3/pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d", size = 8705700, upload-time = "2025-07-14T20:13:26.471Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/28/e0a1909523c6890208295a29e05c2adb2126364e289826c0a8bc7297bd5c/pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d", size = 9494700, upload-time = "2025-07-14T20:13:28.243Z" },
+ { url = "https://files.pythonhosted.org/packages/04/bf/90339ac0f55726dce7d794e6d79a18a91265bdf3aa70b6b9ca52f35e022a/pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a", size = 8709318, upload-time = "2025-07-14T20:13:30.348Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" },
+ { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" },
+]
+
[[package]]
name = "pyyaml"
version = "6.0.2"
@@ -1708,6 +2070,78 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" },
]
+[[package]]
+name = "pyzmq"
+version = "27.1.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "cffi", marker = "implementation_name == 'pypy'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/04/0b/3c9baedbdf613ecaa7aa07027780b8867f57b6293b6ee50de316c9f3222b/pyzmq-27.1.0.tar.gz", hash = "sha256:ac0765e3d44455adb6ddbf4417dcce460fc40a05978c08efdf2948072f6db540", size = 281750, upload-time = "2025-09-08T23:10:18.157Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/06/5d/305323ba86b284e6fcb0d842d6adaa2999035f70f8c38a9b6d21ad28c3d4/pyzmq-27.1.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:226b091818d461a3bef763805e75685e478ac17e9008f49fce2d3e52b3d58b86", size = 1333328, upload-time = "2025-09-08T23:07:45.946Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/a0/fc7e78a23748ad5443ac3275943457e8452da67fda347e05260261108cbc/pyzmq-27.1.0-cp311-cp311-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:0790a0161c281ca9723f804871b4027f2e8b5a528d357c8952d08cd1a9c15581", size = 908803, upload-time = "2025-09-08T23:07:47.551Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/22/37d15eb05f3bdfa4abea6f6d96eb3bb58585fbd3e4e0ded4e743bc650c97/pyzmq-27.1.0-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c895a6f35476b0c3a54e3eb6ccf41bf3018de937016e6e18748317f25d4e925f", size = 668836, upload-time = "2025-09-08T23:07:49.436Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/c4/2a6fe5111a01005fc7af3878259ce17684fabb8852815eda6225620f3c59/pyzmq-27.1.0-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5bbf8d3630bf96550b3be8e1fc0fea5cbdc8d5466c1192887bd94869da17a63e", size = 857038, upload-time = "2025-09-08T23:07:51.234Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/eb/bfdcb41d0db9cd233d6fb22dc131583774135505ada800ebf14dfb0a7c40/pyzmq-27.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:15c8bd0fe0dabf808e2d7a681398c4e5ded70a551ab47482067a572c054c8e2e", size = 1657531, upload-time = "2025-09-08T23:07:52.795Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/21/e3180ca269ed4a0de5c34417dfe71a8ae80421198be83ee619a8a485b0c7/pyzmq-27.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bafcb3dd171b4ae9f19ee6380dfc71ce0390fefaf26b504c0e5f628d7c8c54f2", size = 2034786, upload-time = "2025-09-08T23:07:55.047Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/b1/5e21d0b517434b7f33588ff76c177c5a167858cc38ef740608898cd329f2/pyzmq-27.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e829529fcaa09937189178115c49c504e69289abd39967cd8a4c215761373394", size = 1894220, upload-time = "2025-09-08T23:07:57.172Z" },
+ { url = "https://files.pythonhosted.org/packages/03/f2/44913a6ff6941905efc24a1acf3d3cb6146b636c546c7406c38c49c403d4/pyzmq-27.1.0-cp311-cp311-win32.whl", hash = "sha256:6df079c47d5902af6db298ec92151db82ecb557af663098b92f2508c398bb54f", size = 567155, upload-time = "2025-09-08T23:07:59.05Z" },
+ { url = "https://files.pythonhosted.org/packages/23/6d/d8d92a0eb270a925c9b4dd039c0b4dc10abc2fcbc48331788824ef113935/pyzmq-27.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:190cbf120fbc0fc4957b56866830def56628934a9d112aec0e2507aa6a032b97", size = 633428, upload-time = "2025-09-08T23:08:00.663Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/14/01afebc96c5abbbd713ecfc7469cfb1bc801c819a74ed5c9fad9a48801cb/pyzmq-27.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:eca6b47df11a132d1745eb3b5b5e557a7dae2c303277aa0e69c6ba91b8736e07", size = 559497, upload-time = "2025-09-08T23:08:02.15Z" },
+ { url = "https://files.pythonhosted.org/packages/92/e7/038aab64a946d535901103da16b953c8c9cc9c961dadcbf3609ed6428d23/pyzmq-27.1.0-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:452631b640340c928fa343801b0d07eb0c3789a5ffa843f6e1a9cee0ba4eb4fc", size = 1306279, upload-time = "2025-09-08T23:08:03.807Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/5e/c3c49fdd0f535ef45eefcc16934648e9e59dace4a37ee88fc53f6cd8e641/pyzmq-27.1.0-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1c179799b118e554b66da67d88ed66cd37a169f1f23b5d9f0a231b4e8d44a113", size = 895645, upload-time = "2025-09-08T23:08:05.301Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/e5/b0b2504cb4e903a74dcf1ebae157f9e20ebb6ea76095f6cfffea28c42ecd/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3837439b7f99e60312f0c926a6ad437b067356dc2bc2ec96eb395fd0fe804233", size = 652574, upload-time = "2025-09-08T23:08:06.828Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/9b/c108cdb55560eaf253f0cbdb61b29971e9fb34d9c3499b0e96e4e60ed8a5/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43ad9a73e3da1fab5b0e7e13402f0b2fb934ae1c876c51d0afff0e7c052eca31", size = 840995, upload-time = "2025-09-08T23:08:08.396Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/bb/b79798ca177b9eb0825b4c9998c6af8cd2a7f15a6a1a4272c1d1a21d382f/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0de3028d69d4cdc475bfe47a6128eb38d8bc0e8f4d69646adfbcd840facbac28", size = 1642070, upload-time = "2025-09-08T23:08:09.989Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/80/2df2e7977c4ede24c79ae39dcef3899bfc5f34d1ca7a5b24f182c9b7a9ca/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:cf44a7763aea9298c0aa7dbf859f87ed7012de8bda0f3977b6fb1d96745df856", size = 2021121, upload-time = "2025-09-08T23:08:11.907Z" },
+ { url = "https://files.pythonhosted.org/packages/46/bd/2d45ad24f5f5ae7e8d01525eb76786fa7557136555cac7d929880519e33a/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f30f395a9e6fbca195400ce833c731e7b64c3919aa481af4d88c3759e0cb7496", size = 1878550, upload-time = "2025-09-08T23:08:13.513Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/2f/104c0a3c778d7c2ab8190e9db4f62f0b6957b53c9d87db77c284b69f33ea/pyzmq-27.1.0-cp312-abi3-win32.whl", hash = "sha256:250e5436a4ba13885494412b3da5d518cd0d3a278a1ae640e113c073a5f88edd", size = 559184, upload-time = "2025-09-08T23:08:15.163Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/7f/a21b20d577e4100c6a41795842028235998a643b1ad406a6d4163ea8f53e/pyzmq-27.1.0-cp312-abi3-win_amd64.whl", hash = "sha256:9ce490cf1d2ca2ad84733aa1d69ce6855372cb5ce9223802450c9b2a7cba0ccf", size = 619480, upload-time = "2025-09-08T23:08:17.192Z" },
+ { url = "https://files.pythonhosted.org/packages/78/c2/c012beae5f76b72f007a9e91ee9401cb88c51d0f83c6257a03e785c81cc2/pyzmq-27.1.0-cp312-abi3-win_arm64.whl", hash = "sha256:75a2f36223f0d535a0c919e23615fc85a1e23b71f40c7eb43d7b1dedb4d8f15f", size = 552993, upload-time = "2025-09-08T23:08:18.926Z" },
+ { url = "https://files.pythonhosted.org/packages/60/cb/84a13459c51da6cec1b7b1dc1a47e6db6da50b77ad7fd9c145842750a011/pyzmq-27.1.0-cp313-cp313-android_24_arm64_v8a.whl", hash = "sha256:93ad4b0855a664229559e45c8d23797ceac03183c7b6f5b4428152a6b06684a5", size = 1122436, upload-time = "2025-09-08T23:08:20.801Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/b6/94414759a69a26c3dd674570a81813c46a078767d931a6c70ad29fc585cb/pyzmq-27.1.0-cp313-cp313-android_24_x86_64.whl", hash = "sha256:fbb4f2400bfda24f12f009cba62ad5734148569ff4949b1b6ec3b519444342e6", size = 1156301, upload-time = "2025-09-08T23:08:22.47Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/ad/15906493fd40c316377fd8a8f6b1f93104f97a752667763c9b9c1b71d42d/pyzmq-27.1.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:e343d067f7b151cfe4eb3bb796a7752c9d369eed007b91231e817071d2c2fec7", size = 1341197, upload-time = "2025-09-08T23:08:24.286Z" },
+ { url = "https://files.pythonhosted.org/packages/14/1d/d343f3ce13db53a54cb8946594e567410b2125394dafcc0268d8dda027e0/pyzmq-27.1.0-cp313-cp313t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:08363b2011dec81c354d694bdecaef4770e0ae96b9afea70b3f47b973655cc05", size = 897275, upload-time = "2025-09-08T23:08:26.063Z" },
+ { url = "https://files.pythonhosted.org/packages/69/2d/d83dd6d7ca929a2fc67d2c3005415cdf322af7751d773524809f9e585129/pyzmq-27.1.0-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d54530c8c8b5b8ddb3318f481297441af102517602b569146185fa10b63f4fa9", size = 660469, upload-time = "2025-09-08T23:08:27.623Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/cd/9822a7af117f4bc0f1952dbe9ef8358eb50a24928efd5edf54210b850259/pyzmq-27.1.0-cp313-cp313t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6f3afa12c392f0a44a2414056d730eebc33ec0926aae92b5ad5cf26ebb6cc128", size = 847961, upload-time = "2025-09-08T23:08:29.672Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/12/f003e824a19ed73be15542f172fd0ec4ad0b60cf37436652c93b9df7c585/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c65047adafe573ff023b3187bb93faa583151627bc9c51fc4fb2c561ed689d39", size = 1650282, upload-time = "2025-09-08T23:08:31.349Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/4a/e82d788ed58e9a23995cee70dbc20c9aded3d13a92d30d57ec2291f1e8a3/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:90e6e9441c946a8b0a667356f7078d96411391a3b8f80980315455574177ec97", size = 2024468, upload-time = "2025-09-08T23:08:33.543Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/94/2da0a60841f757481e402b34bf4c8bf57fa54a5466b965de791b1e6f747d/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:add071b2d25f84e8189aaf0882d39a285b42fa3853016ebab234a5e78c7a43db", size = 1885394, upload-time = "2025-09-08T23:08:35.51Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/6f/55c10e2e49ad52d080dc24e37adb215e5b0d64990b57598abc2e3f01725b/pyzmq-27.1.0-cp313-cp313t-win32.whl", hash = "sha256:7ccc0700cfdf7bd487bea8d850ec38f204478681ea02a582a8da8171b7f90a1c", size = 574964, upload-time = "2025-09-08T23:08:37.178Z" },
+ { url = "https://files.pythonhosted.org/packages/87/4d/2534970ba63dd7c522d8ca80fb92777f362c0f321900667c615e2067cb29/pyzmq-27.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8085a9fba668216b9b4323be338ee5437a235fe275b9d1610e422ccc279733e2", size = 641029, upload-time = "2025-09-08T23:08:40.595Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/fa/f8aea7a28b0641f31d40dea42d7ef003fded31e184ef47db696bc74cd610/pyzmq-27.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:6bb54ca21bcfe361e445256c15eedf083f153811c37be87e0514934d6913061e", size = 561541, upload-time = "2025-09-08T23:08:42.668Z" },
+ { url = "https://files.pythonhosted.org/packages/87/45/19efbb3000956e82d0331bafca5d9ac19ea2857722fa2caacefb6042f39d/pyzmq-27.1.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ce980af330231615756acd5154f29813d553ea555485ae712c491cd483df6b7a", size = 1341197, upload-time = "2025-09-08T23:08:44.973Z" },
+ { url = "https://files.pythonhosted.org/packages/48/43/d72ccdbf0d73d1343936296665826350cb1e825f92f2db9db3e61c2162a2/pyzmq-27.1.0-cp314-cp314t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1779be8c549e54a1c38f805e56d2a2e5c009d26de10921d7d51cfd1c8d4632ea", size = 897175, upload-time = "2025-09-08T23:08:46.601Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/2e/a483f73a10b65a9ef0161e817321d39a770b2acf8bcf3004a28d90d14a94/pyzmq-27.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7200bb0f03345515df50d99d3db206a0a6bee1955fbb8c453c76f5bf0e08fb96", size = 660427, upload-time = "2025-09-08T23:08:48.187Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/d2/5f36552c2d3e5685abe60dfa56f91169f7a2d99bbaf67c5271022ab40863/pyzmq-27.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01c0e07d558b06a60773744ea6251f769cd79a41a97d11b8bf4ab8f034b0424d", size = 847929, upload-time = "2025-09-08T23:08:49.76Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/2a/404b331f2b7bf3198e9945f75c4c521f0c6a3a23b51f7a4a401b94a13833/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:80d834abee71f65253c91540445d37c4c561e293ba6e741b992f20a105d69146", size = 1650193, upload-time = "2025-09-08T23:08:51.7Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/0b/f4107e33f62a5acf60e3ded67ed33d79b4ce18de432625ce2fc5093d6388/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:544b4e3b7198dde4a62b8ff6685e9802a9a1ebf47e77478a5eb88eca2a82f2fd", size = 2024388, upload-time = "2025-09-08T23:08:53.393Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/01/add31fe76512642fd6e40e3a3bd21f4b47e242c8ba33efb6809e37076d9b/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cedc4c68178e59a4046f97eca31b148ddcf51e88677de1ef4e78cf06c5376c9a", size = 1885316, upload-time = "2025-09-08T23:08:55.702Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/59/a5f38970f9bf07cee96128de79590bb354917914a9be11272cfc7ff26af0/pyzmq-27.1.0-cp314-cp314t-win32.whl", hash = "sha256:1f0b2a577fd770aa6f053211a55d1c47901f4d537389a034c690291485e5fe92", size = 587472, upload-time = "2025-09-08T23:08:58.18Z" },
+ { url = "https://files.pythonhosted.org/packages/70/d8/78b1bad170f93fcf5e3536e70e8fadac55030002275c9a29e8f5719185de/pyzmq-27.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:19c9468ae0437f8074af379e986c5d3d7d7bfe033506af442e8c879732bedbe0", size = 661401, upload-time = "2025-09-08T23:08:59.802Z" },
+ { url = "https://files.pythonhosted.org/packages/81/d6/4bfbb40c9a0b42fc53c7cf442f6385db70b40f74a783130c5d0a5aa62228/pyzmq-27.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:dc5dbf68a7857b59473f7df42650c621d7e8923fb03fa74a526890f4d33cc4d7", size = 575170, upload-time = "2025-09-08T23:09:01.418Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/c6/c4dcdecdbaa70969ee1fdced6d7b8f60cfabe64d25361f27ac4665a70620/pyzmq-27.1.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:18770c8d3563715387139060d37859c02ce40718d1faf299abddcdcc6a649066", size = 836265, upload-time = "2025-09-08T23:09:49.376Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/79/f38c92eeaeb03a2ccc2ba9866f0439593bb08c5e3b714ac1d553e5c96e25/pyzmq-27.1.0-pp311-pypy311_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:ac25465d42f92e990f8d8b0546b01c391ad431c3bf447683fdc40565941d0604", size = 800208, upload-time = "2025-09-08T23:09:51.073Z" },
+ { url = "https://files.pythonhosted.org/packages/49/0e/3f0d0d335c6b3abb9b7b723776d0b21fa7f3a6c819a0db6097059aada160/pyzmq-27.1.0-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53b40f8ae006f2734ee7608d59ed661419f087521edbfc2149c3932e9c14808c", size = 567747, upload-time = "2025-09-08T23:09:52.698Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/cf/f2b3784d536250ffd4be70e049f3b60981235d70c6e8ce7e3ef21e1adb25/pyzmq-27.1.0-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f605d884e7c8be8fe1aa94e0a783bf3f591b84c24e4bc4f3e7564c82ac25e271", size = 747371, upload-time = "2025-09-08T23:09:54.563Z" },
+ { url = "https://files.pythonhosted.org/packages/01/1b/5dbe84eefc86f48473947e2f41711aded97eecef1231f4558f1f02713c12/pyzmq-27.1.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c9f7f6e13dff2e44a6afeaf2cf54cee5929ad64afaf4d40b50f93c58fc687355", size = 544862, upload-time = "2025-09-08T23:09:56.509Z" },
+]
+
+[[package]]
+name = "referencing"
+version = "0.37.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "attrs" },
+ { name = "rpds-py" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" },
+]
+
[[package]]
name = "regex"
version = "2024.11.6"
@@ -1777,12 +2211,111 @@ wheels = [
]
[[package]]
-name = "roman-numerals-py"
-version = "3.1.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/30/76/48fd56d17c5bdbdf65609abbc67288728a98ed4c02919428d4f52d23b24b/roman_numerals_py-3.1.0.tar.gz", hash = "sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d", size = 9017, upload-time = "2025-02-22T07:34:54.333Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/53/97/d2cbbaa10c9b826af0e10fdf836e1bf344d9f0abb873ebc34d1f49642d3f/roman_numerals_py-3.1.0-py3-none-any.whl", hash = "sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c", size = 7742, upload-time = "2025-02-22T07:34:52.422Z" },
+name = "rpds-py"
+version = "0.29.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/98/33/23b3b3419b6a3e0f559c7c0d2ca8fc1b9448382b25245033788785921332/rpds_py-0.29.0.tar.gz", hash = "sha256:fe55fe686908f50154d1dc599232016e50c243b438c3b7432f24e2895b0e5359", size = 69359, upload-time = "2025-11-16T14:50:39.532Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/36/ab/7fb95163a53ab122c74a7c42d2d2f012819af2cf3deb43fb0d5acf45cc1a/rpds_py-0.29.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9b9c764a11fd637e0322a488560533112837f5334ffeb48b1be20f6d98a7b437", size = 372344, upload-time = "2025-11-16T14:47:57.279Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/45/f3c30084c03b0d0f918cb4c5ae2c20b0a148b51ba2b3f6456765b629bedd/rpds_py-0.29.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3fd2164d73812026ce970d44c3ebd51e019d2a26a4425a5dcbdfa93a34abc383", size = 363041, upload-time = "2025-11-16T14:47:58.908Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/e9/4d044a1662608c47a87cbb37b999d4d5af54c6d6ebdda93a4d8bbf8b2a10/rpds_py-0.29.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a097b7f7f7274164566ae90a221fd725363c0e9d243e2e9ed43d195ccc5495c", size = 391775, upload-time = "2025-11-16T14:48:00.197Z" },
+ { url = "https://files.pythonhosted.org/packages/50/c9/7616d3ace4e6731aeb6e3cd85123e03aec58e439044e214b9c5c60fd8eb1/rpds_py-0.29.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cdc0490374e31cedefefaa1520d5fe38e82fde8748cbc926e7284574c714d6b", size = 405624, upload-time = "2025-11-16T14:48:01.496Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/e2/6d7d6941ca0843609fd2d72c966a438d6f22617baf22d46c3d2156c31350/rpds_py-0.29.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:89ca2e673ddd5bde9b386da9a0aac0cab0e76f40c8f0aaf0d6311b6bbf2aa311", size = 527894, upload-time = "2025-11-16T14:48:03.167Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/f7/aee14dc2db61bb2ae1e3068f134ca9da5f28c586120889a70ff504bb026f/rpds_py-0.29.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5d9da3ff5af1ca1249b1adb8ef0573b94c76e6ae880ba1852f033bf429d4588", size = 412720, upload-time = "2025-11-16T14:48:04.413Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/e2/2293f236e887c0360c2723d90c00d48dee296406994d6271faf1712e94ec/rpds_py-0.29.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8238d1d310283e87376c12f658b61e1ee23a14c0e54c7c0ce953efdbdc72deed", size = 392945, upload-time = "2025-11-16T14:48:06.252Z" },
+ { url = "https://files.pythonhosted.org/packages/14/cd/ceea6147acd3bd1fd028d1975228f08ff19d62098078d5ec3eed49703797/rpds_py-0.29.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:2d6fb2ad1c36f91c4646989811e84b1ea5e0c3cf9690b826b6e32b7965853a63", size = 406385, upload-time = "2025-11-16T14:48:07.575Z" },
+ { url = "https://files.pythonhosted.org/packages/52/36/fe4dead19e45eb77a0524acfdbf51e6cda597b26fc5b6dddbff55fbbb1a5/rpds_py-0.29.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:534dc9df211387547267ccdb42253aa30527482acb38dd9b21c5c115d66a96d2", size = 423943, upload-time = "2025-11-16T14:48:10.175Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/7b/4551510803b582fa4abbc8645441a2d15aa0c962c3b21ebb380b7e74f6a1/rpds_py-0.29.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d456e64724a075441e4ed648d7f154dc62e9aabff29bcdf723d0c00e9e1d352f", size = 574204, upload-time = "2025-11-16T14:48:11.499Z" },
+ { url = "https://files.pythonhosted.org/packages/64/ba/071ccdd7b171e727a6ae079f02c26f75790b41555f12ca8f1151336d2124/rpds_py-0.29.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a738f2da2f565989401bd6fd0b15990a4d1523c6d7fe83f300b7e7d17212feca", size = 600587, upload-time = "2025-11-16T14:48:12.822Z" },
+ { url = "https://files.pythonhosted.org/packages/03/09/96983d48c8cf5a1e03c7d9cc1f4b48266adfb858ae48c7c2ce978dbba349/rpds_py-0.29.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a110e14508fd26fd2e472bb541f37c209409876ba601cf57e739e87d8a53cf95", size = 562287, upload-time = "2025-11-16T14:48:14.108Z" },
+ { url = "https://files.pythonhosted.org/packages/40/f0/8c01aaedc0fa92156f0391f39ea93b5952bc0ec56b897763858f95da8168/rpds_py-0.29.0-cp311-cp311-win32.whl", hash = "sha256:923248a56dd8d158389a28934f6f69ebf89f218ef96a6b216a9be6861804d3f4", size = 221394, upload-time = "2025-11-16T14:48:15.374Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/a5/a8b21c54c7d234efdc83dc034a4d7cd9668e3613b6316876a29b49dece71/rpds_py-0.29.0-cp311-cp311-win_amd64.whl", hash = "sha256:539eb77eb043afcc45314d1be09ea6d6cafb3addc73e0547c171c6d636957f60", size = 235713, upload-time = "2025-11-16T14:48:16.636Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/1f/df3c56219523947b1be402fa12e6323fe6d61d883cf35d6cb5d5bb6db9d9/rpds_py-0.29.0-cp311-cp311-win_arm64.whl", hash = "sha256:bdb67151ea81fcf02d8f494703fb728d4d34d24556cbff5f417d74f6f5792e7c", size = 229157, upload-time = "2025-11-16T14:48:17.891Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/50/bc0e6e736d94e420df79be4deb5c9476b63165c87bb8f19ef75d100d21b3/rpds_py-0.29.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a0891cfd8db43e085c0ab93ab7e9b0c8fee84780d436d3b266b113e51e79f954", size = 376000, upload-time = "2025-11-16T14:48:19.141Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/3a/46676277160f014ae95f24de53bed0e3b7ea66c235e7de0b9df7bd5d68ba/rpds_py-0.29.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3897924d3f9a0361472d884051f9a2460358f9a45b1d85a39a158d2f8f1ad71c", size = 360575, upload-time = "2025-11-16T14:48:20.443Z" },
+ { url = "https://files.pythonhosted.org/packages/75/ba/411d414ed99ea1afdd185bbabeeaac00624bd1e4b22840b5e9967ade6337/rpds_py-0.29.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a21deb8e0d1571508c6491ce5ea5e25669b1dd4adf1c9d64b6314842f708b5d", size = 392159, upload-time = "2025-11-16T14:48:22.12Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/b1/e18aa3a331f705467a48d0296778dc1fea9d7f6cf675bd261f9a846c7e90/rpds_py-0.29.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9efe71687d6427737a0a2de9ca1c0a216510e6cd08925c44162be23ed7bed2d5", size = 410602, upload-time = "2025-11-16T14:48:23.563Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/6c/04f27f0c9f2299274c76612ac9d2c36c5048bb2c6c2e52c38c60bf3868d9/rpds_py-0.29.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:40f65470919dc189c833e86b2c4bd21bd355f98436a2cef9e0a9a92aebc8e57e", size = 515808, upload-time = "2025-11-16T14:48:24.949Z" },
+ { url = "https://files.pythonhosted.org/packages/83/56/a8412aa464fb151f8bc0d91fb0bb888adc9039bd41c1c6ba8d94990d8cf8/rpds_py-0.29.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:def48ff59f181130f1a2cb7c517d16328efac3ec03951cca40c1dc2049747e83", size = 416015, upload-time = "2025-11-16T14:48:26.782Z" },
+ { url = "https://files.pythonhosted.org/packages/04/4c/f9b8a05faca3d9e0a6397c90d13acb9307c9792b2bff621430c58b1d6e76/rpds_py-0.29.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad7bd570be92695d89285a4b373006930715b78d96449f686af422debb4d3949", size = 395325, upload-time = "2025-11-16T14:48:28.055Z" },
+ { url = "https://files.pythonhosted.org/packages/34/60/869f3bfbf8ed7b54f1ad9a5543e0fdffdd40b5a8f587fe300ee7b4f19340/rpds_py-0.29.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:5a572911cd053137bbff8e3a52d31c5d2dba51d3a67ad902629c70185f3f2181", size = 410160, upload-time = "2025-11-16T14:48:29.338Z" },
+ { url = "https://files.pythonhosted.org/packages/91/aa/e5b496334e3aba4fe4c8a80187b89f3c1294c5c36f2a926da74338fa5a73/rpds_py-0.29.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d583d4403bcbf10cffc3ab5cee23d7643fcc960dff85973fd3c2d6c86e8dbb0c", size = 425309, upload-time = "2025-11-16T14:48:30.691Z" },
+ { url = "https://files.pythonhosted.org/packages/85/68/4e24a34189751ceb6d66b28f18159922828dd84155876551f7ca5b25f14f/rpds_py-0.29.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:070befbb868f257d24c3bb350dbd6e2f645e83731f31264b19d7231dd5c396c7", size = 574644, upload-time = "2025-11-16T14:48:31.964Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/cf/474a005ea4ea9c3b4f17b6108b6b13cebfc98ebaff11d6e1b193204b3a93/rpds_py-0.29.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fc935f6b20b0c9f919a8ff024739174522abd331978f750a74bb68abd117bd19", size = 601605, upload-time = "2025-11-16T14:48:33.252Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/b1/c56f6a9ab8c5f6bb5c65c4b5f8229167a3a525245b0773f2c0896686b64e/rpds_py-0.29.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c5a8ecaa44ce2d8d9d20a68a2483a74c07f05d72e94a4dff88906c8807e77b0", size = 564593, upload-time = "2025-11-16T14:48:34.643Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/13/0494cecce4848f68501e0a229432620b4b57022388b071eeff95f3e1e75b/rpds_py-0.29.0-cp312-cp312-win32.whl", hash = "sha256:ba5e1aeaf8dd6d8f6caba1f5539cddda87d511331714b7b5fc908b6cfc3636b7", size = 223853, upload-time = "2025-11-16T14:48:36.419Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/6a/51e9aeb444a00cdc520b032a28b07e5f8dc7bc328b57760c53e7f96997b4/rpds_py-0.29.0-cp312-cp312-win_amd64.whl", hash = "sha256:b5f6134faf54b3cb83375db0f113506f8b7770785be1f95a631e7e2892101977", size = 239895, upload-time = "2025-11-16T14:48:37.956Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/d4/8bce56cdad1ab873e3f27cb31c6a51d8f384d66b022b820525b879f8bed1/rpds_py-0.29.0-cp312-cp312-win_arm64.whl", hash = "sha256:b016eddf00dca7944721bf0cd85b6af7f6c4efaf83ee0b37c4133bd39757a8c7", size = 230321, upload-time = "2025-11-16T14:48:39.71Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/d9/c5de60d9d371bbb186c3e9bf75f4fc5665e11117a25a06a6b2e0afb7380e/rpds_py-0.29.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1585648d0760b88292eecab5181f5651111a69d90eff35d6b78aa32998886a61", size = 375710, upload-time = "2025-11-16T14:48:41.063Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/b3/0860cdd012291dc21272895ce107f1e98e335509ba986dd83d72658b82b9/rpds_py-0.29.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:521807963971a23996ddaf764c682b3e46459b3c58ccd79fefbe16718db43154", size = 360582, upload-time = "2025-11-16T14:48:42.423Z" },
+ { url = "https://files.pythonhosted.org/packages/92/8a/a18c2f4a61b3407e56175f6aab6deacdf9d360191a3d6f38566e1eaf7266/rpds_py-0.29.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a8896986efaa243ab713c69e6491a4138410f0fe36f2f4c71e18bd5501e8014", size = 391172, upload-time = "2025-11-16T14:48:43.75Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/49/e93354258508c50abc15cdcd5fcf7ac4117f67bb6233ad7859f75e7372a0/rpds_py-0.29.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d24564a700ef41480a984c5ebed62b74e6ce5860429b98b1fede76049e953e6", size = 409586, upload-time = "2025-11-16T14:48:45.498Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/8d/a27860dae1c19a6bdc901f90c81f0d581df1943355802961a57cdb5b6cd1/rpds_py-0.29.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6596b93c010d386ae46c9fba9bfc9fc5965fa8228edeac51576299182c2e31c", size = 516339, upload-time = "2025-11-16T14:48:47.308Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/ad/a75e603161e79b7110c647163d130872b271c6b28712c803c65d492100f7/rpds_py-0.29.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5cc58aac218826d054c7da7f95821eba94125d88be673ff44267bb89d12a5866", size = 416201, upload-time = "2025-11-16T14:48:48.615Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/42/555b4ee17508beafac135c8b450816ace5a96194ce97fefc49d58e5652ea/rpds_py-0.29.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de73e40ebc04dd5d9556f50180395322193a78ec247e637e741c1b954810f295", size = 395095, upload-time = "2025-11-16T14:48:50.027Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/f0/c90b671b9031e800ec45112be42ea9f027f94f9ac25faaac8770596a16a1/rpds_py-0.29.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:295ce5ac7f0cf69a651ea75c8f76d02a31f98e5698e82a50a5f4d4982fbbae3b", size = 410077, upload-time = "2025-11-16T14:48:51.515Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/80/9af8b640b81fe21e6f718e9dec36c0b5f670332747243130a5490f292245/rpds_py-0.29.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1ea59b23ea931d494459c8338056fe7d93458c0bf3ecc061cd03916505369d55", size = 424548, upload-time = "2025-11-16T14:48:53.237Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/0b/b5647446e991736e6a495ef510e6710df91e880575a586e763baeb0aa770/rpds_py-0.29.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f49d41559cebd608042fdcf54ba597a4a7555b49ad5c1c0c03e0af82692661cd", size = 573661, upload-time = "2025-11-16T14:48:54.769Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/b3/1b1c9576839ff583d1428efbf59f9ee70498d8ce6c0b328ac02f1e470879/rpds_py-0.29.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:05a2bd42768ea988294ca328206efbcc66e220d2d9b7836ee5712c07ad6340ea", size = 600937, upload-time = "2025-11-16T14:48:56.247Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/7b/b6cfca2f9fee4c4494ce54f7fb1b9f578867495a9aa9fc0d44f5f735c8e0/rpds_py-0.29.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33ca7bdfedd83339ca55da3a5e1527ee5870d4b8369456b5777b197756f3ca22", size = 564496, upload-time = "2025-11-16T14:48:57.691Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/fb/ba29ec7f0f06eb801bac5a23057a9ff7670623b5e8013bd59bec4aa09de8/rpds_py-0.29.0-cp313-cp313-win32.whl", hash = "sha256:20c51ae86a0bb9accc9ad4e6cdeec58d5ebb7f1b09dd4466331fc65e1766aae7", size = 223126, upload-time = "2025-11-16T14:48:59.058Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/6b/0229d3bed4ddaa409e6d90b0ae967ed4380e4bdd0dad6e59b92c17d42457/rpds_py-0.29.0-cp313-cp313-win_amd64.whl", hash = "sha256:6410e66f02803600edb0b1889541f4b5cc298a5ccda0ad789cc50ef23b54813e", size = 239771, upload-time = "2025-11-16T14:49:00.872Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/38/d2868f058b164f8efd89754d85d7b1c08b454f5c07ac2e6cc2e9bd4bd05b/rpds_py-0.29.0-cp313-cp313-win_arm64.whl", hash = "sha256:56838e1cd9174dc23c5691ee29f1d1be9eab357f27efef6bded1328b23e1ced2", size = 229994, upload-time = "2025-11-16T14:49:02.673Z" },
+ { url = "https://files.pythonhosted.org/packages/52/91/5de91c5ec7d41759beec9b251630824dbb8e32d20c3756da1a9a9d309709/rpds_py-0.29.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:37d94eadf764d16b9a04307f2ab1d7af6dc28774bbe0535c9323101e14877b4c", size = 365886, upload-time = "2025-11-16T14:49:04.133Z" },
+ { url = "https://files.pythonhosted.org/packages/85/7c/415d8c1b016d5f47ecec5145d9d6d21002d39dce8761b30f6c88810b455a/rpds_py-0.29.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d472cf73efe5726a067dce63eebe8215b14beabea7c12606fd9994267b3cfe2b", size = 355262, upload-time = "2025-11-16T14:49:05.543Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/14/bf83e2daa4f980e4dc848aed9299792a8b84af95e12541d9e7562f84a6ef/rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72fdfd5ff8992e4636621826371e3ac5f3e3b8323e9d0e48378e9c13c3dac9d0", size = 384826, upload-time = "2025-11-16T14:49:07.301Z" },
+ { url = "https://files.pythonhosted.org/packages/33/b8/53330c50a810ae22b4fbba5e6cf961b68b9d72d9bd6780a7c0a79b070857/rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2549d833abdf8275c901313b9e8ff8fba57e50f6a495035a2a4e30621a2f7cc4", size = 394234, upload-time = "2025-11-16T14:49:08.782Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/32/01e2e9645cef0e584f518cfde4567563e57db2257244632b603f61b40e50/rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4448dad428f28a6a767c3e3b80cde3446a22a0efbddaa2360f4bb4dc836d0688", size = 520008, upload-time = "2025-11-16T14:49:10.253Z" },
+ { url = "https://files.pythonhosted.org/packages/98/c3/0d1b95a81affae2b10f950782e33a1fd2edd6ce2a479966cac98c9a66f57/rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:115f48170fd4296a33938d8c11f697f5f26e0472e43d28f35624764173a60e4d", size = 409569, upload-time = "2025-11-16T14:49:12.478Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/60/aa3b8678f3f009f675b99174fa2754302a7fbfe749162e8043d111de2d88/rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e5bb73ffc029820f4348e9b66b3027493ae00bca6629129cd433fd7a76308ee", size = 385188, upload-time = "2025-11-16T14:49:13.88Z" },
+ { url = "https://files.pythonhosted.org/packages/92/02/5546c1c8aa89c18d40c1fcffdcc957ba730dee53fb7c3ca3a46f114761d2/rpds_py-0.29.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:b1581fcde18fcdf42ea2403a16a6b646f8eb1e58d7f90a0ce693da441f76942e", size = 398587, upload-time = "2025-11-16T14:49:15.339Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/e0/ad6eeaf47e236eba052fa34c4073078b9e092bd44da6bbb35aaae9580669/rpds_py-0.29.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16e9da2bda9eb17ea318b4c335ec9ac1818e88922cbe03a5743ea0da9ecf74fb", size = 416641, upload-time = "2025-11-16T14:49:16.832Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/93/0acedfd50ad9cdd3879c615a6dc8c5f1ce78d2fdf8b87727468bb5bb4077/rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:28fd300326dd21198f311534bdb6d7e989dd09b3418b3a91d54a0f384c700967", size = 566683, upload-time = "2025-11-16T14:49:18.342Z" },
+ { url = "https://files.pythonhosted.org/packages/62/53/8c64e0f340a9e801459fc6456821abc15b3582cb5dc3932d48705a9d9ac7/rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2aba991e041d031c7939e1358f583ae405a7bf04804ca806b97a5c0e0af1ea5e", size = 592730, upload-time = "2025-11-16T14:49:19.767Z" },
+ { url = "https://files.pythonhosted.org/packages/85/ef/3109b6584f8c4b0d2490747c916df833c127ecfa82be04d9a40a376f2090/rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:7f437026dbbc3f08c99cc41a5b2570c6e1a1ddbe48ab19a9b814254128d4ea7a", size = 557361, upload-time = "2025-11-16T14:49:21.574Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/3b/61586475e82d57f01da2c16edb9115a618afe00ce86fe1b58936880b15af/rpds_py-0.29.0-cp313-cp313t-win32.whl", hash = "sha256:6e97846e9800a5d0fe7be4d008f0c93d0feeb2700da7b1f7528dabafb31dfadb", size = 211227, upload-time = "2025-11-16T14:49:23.03Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/3a/12dc43f13594a54ea0c9d7e9d43002116557330e3ad45bc56097ddf266e2/rpds_py-0.29.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f49196aec7c4b406495f60e6f947ad71f317a765f956d74bbd83996b9edc0352", size = 225248, upload-time = "2025-11-16T14:49:24.841Z" },
+ { url = "https://files.pythonhosted.org/packages/89/b1/0b1474e7899371d9540d3bbb2a499a3427ae1fc39c998563fe9035a1073b/rpds_py-0.29.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:394d27e4453d3b4d82bb85665dc1fcf4b0badc30fc84282defed71643b50e1a1", size = 363731, upload-time = "2025-11-16T14:49:26.683Z" },
+ { url = "https://files.pythonhosted.org/packages/28/12/3b7cf2068d0a334ed1d7b385a9c3c8509f4c2bcba3d4648ea71369de0881/rpds_py-0.29.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55d827b2ae95425d3be9bc9a5838b6c29d664924f98146557f7715e331d06df8", size = 354343, upload-time = "2025-11-16T14:49:28.24Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/73/5afcf8924bc02a749416eda64e17ac9c9b28f825f4737385295a0e99b0c1/rpds_py-0.29.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc31a07ed352e5462d3ee1b22e89285f4ce97d5266f6d1169da1142e78045626", size = 385406, upload-time = "2025-11-16T14:49:29.943Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/37/5db736730662508535221737a21563591b6f43c77f2e388951c42f143242/rpds_py-0.29.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c4695dd224212f6105db7ea62197144230b808d6b2bba52238906a2762f1d1e7", size = 396162, upload-time = "2025-11-16T14:49:31.833Z" },
+ { url = "https://files.pythonhosted.org/packages/70/0d/491c1017d14f62ce7bac07c32768d209a50ec567d76d9f383b4cfad19b80/rpds_py-0.29.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcae1770b401167f8b9e1e3f566562e6966ffa9ce63639916248a9e25fa8a244", size = 517719, upload-time = "2025-11-16T14:49:33.804Z" },
+ { url = "https://files.pythonhosted.org/packages/d7/25/b11132afcb17cd5d82db173f0c8dab270ffdfaba43e5ce7a591837ae9649/rpds_py-0.29.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:90f30d15f45048448b8da21c41703b31c61119c06c216a1bf8c245812a0f0c17", size = 409498, upload-time = "2025-11-16T14:49:35.222Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/7d/e6543cedfb2e6403a1845710a5ab0e0ccf8fc288e0b5af9a70bfe2c12053/rpds_py-0.29.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44a91e0ab77bdc0004b43261a4b8cd6d6b451e8d443754cfda830002b5745b32", size = 382743, upload-time = "2025-11-16T14:49:36.704Z" },
+ { url = "https://files.pythonhosted.org/packages/75/11/a4ebc9f654293ae9fefb83b2b6be7f3253e85ea42a5db2f77d50ad19aaeb/rpds_py-0.29.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:4aa195e5804d32c682e453b34474f411ca108e4291c6a0f824ebdc30a91c973c", size = 400317, upload-time = "2025-11-16T14:49:39.132Z" },
+ { url = "https://files.pythonhosted.org/packages/52/18/97677a60a81c7f0e5f64e51fb3f8271c5c8fcabf3a2df18e97af53d7c2bf/rpds_py-0.29.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7971bdb7bf4ee0f7e6f67fa4c7fbc6019d9850cc977d126904392d363f6f8318", size = 416979, upload-time = "2025-11-16T14:49:40.575Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/69/28ab391a9968f6c746b2a2db181eaa4d16afaa859fedc9c2f682d19f7e18/rpds_py-0.29.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8ae33ad9ce580c7a47452c3b3f7d8a9095ef6208e0a0c7e4e2384f9fc5bf8212", size = 567288, upload-time = "2025-11-16T14:49:42.24Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/d3/0c7afdcdb830eee94f5611b64e71354ffe6ac8df82d00c2faf2bfffd1d4e/rpds_py-0.29.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c661132ab2fb4eeede2ef69670fd60da5235209874d001a98f1542f31f2a8a94", size = 593157, upload-time = "2025-11-16T14:49:43.782Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/ac/a0fcbc2feed4241cf26d32268c195eb88ddd4bd862adfc9d4b25edfba535/rpds_py-0.29.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bb78b3a0d31ac1bde132c67015a809948db751cb4e92cdb3f0b242e430b6ed0d", size = 554741, upload-time = "2025-11-16T14:49:45.557Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/f1/fcc24137c470df8588674a677f33719d5800ec053aaacd1de8a5d5d84d9e/rpds_py-0.29.0-cp314-cp314-win32.whl", hash = "sha256:f475f103488312e9bd4000bc890a95955a07b2d0b6e8884aef4be56132adbbf1", size = 215508, upload-time = "2025-11-16T14:49:47.562Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/c7/1d169b2045512eac019918fc1021ea07c30e84a4343f9f344e3e0aa8c788/rpds_py-0.29.0-cp314-cp314-win_amd64.whl", hash = "sha256:b9cf2359a4fca87cfb6801fae83a76aedf66ee1254a7a151f1341632acf67f1b", size = 228125, upload-time = "2025-11-16T14:49:49.064Z" },
+ { url = "https://files.pythonhosted.org/packages/be/36/0cec88aaba70ec4a6e381c444b0d916738497d27f0c30406e3d9fcbd3bc2/rpds_py-0.29.0-cp314-cp314-win_arm64.whl", hash = "sha256:9ba8028597e824854f0f1733d8b964e914ae3003b22a10c2c664cb6927e0feb9", size = 221992, upload-time = "2025-11-16T14:49:50.777Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/fa/a2e524631717c9c0eb5d90d30f648cfba6b731047821c994acacb618406c/rpds_py-0.29.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:e71136fd0612556b35c575dc2726ae04a1669e6a6c378f2240312cf5d1a2ab10", size = 366425, upload-time = "2025-11-16T14:49:52.691Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/a4/6d43ebe0746ff694a30233f63f454aed1677bd50ab7a59ff6b2bb5ac61f2/rpds_py-0.29.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:76fe96632d53f3bf0ea31ede2f53bbe3540cc2736d4aec3b3801b0458499ef3a", size = 355282, upload-time = "2025-11-16T14:49:54.292Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/a7/52fd8270e0320b09eaf295766ae81dd175f65394687906709b3e75c71d06/rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9459a33f077130dbb2c7c3cea72ee9932271fb3126404ba2a2661e4fe9eb7b79", size = 384968, upload-time = "2025-11-16T14:49:55.857Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/7d/e6bc526b7a14e1ef80579a52c1d4ad39260a058a51d66c6039035d14db9d/rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5c9546cfdd5d45e562cc0444b6dddc191e625c62e866bf567a2c69487c7ad28a", size = 394714, upload-time = "2025-11-16T14:49:57.343Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/3f/f0ade3954e7db95c791e7eaf978aa7e08a756d2046e8bdd04d08146ed188/rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12597d11d97b8f7e376c88929a6e17acb980e234547c92992f9f7c058f1a7310", size = 520136, upload-time = "2025-11-16T14:49:59.162Z" },
+ { url = "https://files.pythonhosted.org/packages/87/b3/07122ead1b97009715ab9d4082be6d9bd9546099b2b03fae37c3116f72be/rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28de03cf48b8a9e6ec10318f2197b83946ed91e2891f651a109611be4106ac4b", size = 409250, upload-time = "2025-11-16T14:50:00.698Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/c6/dcbee61fd1dc892aedcb1b489ba661313101aa82ec84b1a015d4c63ebfda/rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd7951c964069039acc9d67a8ff1f0a7f34845ae180ca542b17dc1456b1f1808", size = 384940, upload-time = "2025-11-16T14:50:02.312Z" },
+ { url = "https://files.pythonhosted.org/packages/47/11/914ecb6f3574cf9bf8b38aced4063e0f787d6e1eb30b181a7efbc6c1da9a/rpds_py-0.29.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:c07d107b7316088f1ac0177a7661ca0c6670d443f6fe72e836069025e6266761", size = 399392, upload-time = "2025-11-16T14:50:03.829Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/fd/2f4bd9433f58f816434bb934313584caa47dbc6f03ce5484df8ac8980561/rpds_py-0.29.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1de2345af363d25696969befc0c1688a6cb5e8b1d32b515ef84fc245c6cddba3", size = 416796, upload-time = "2025-11-16T14:50:05.558Z" },
+ { url = "https://files.pythonhosted.org/packages/79/a5/449f0281af33efa29d5c71014399d74842342ae908d8cd38260320167692/rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:00e56b12d2199ca96068057e1ae7f9998ab6e99cda82431afafd32f3ec98cca9", size = 566843, upload-time = "2025-11-16T14:50:07.243Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/32/0a6a1ccee2e37fcb1b7ba9afde762b77182dbb57937352a729c6cd3cf2bb/rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3919a3bbecee589300ed25000b6944174e07cd20db70552159207b3f4bbb45b8", size = 593956, upload-time = "2025-11-16T14:50:09.029Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/3d/eb820f95dce4306f07a495ede02fb61bef36ea201d9137d4fcd5ab94ec1e/rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e7fa2ccc312bbd91e43aa5e0869e46bc03278a3dddb8d58833150a18b0f0283a", size = 557288, upload-time = "2025-11-16T14:50:10.73Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/f8/b8ff786f40470462a252918e0836e0db903c28e88e3eec66bc4a7856ee5d/rpds_py-0.29.0-cp314-cp314t-win32.whl", hash = "sha256:97c817863ffc397f1e6a6e9d2d89fe5408c0a9922dac0329672fb0f35c867ea5", size = 211382, upload-time = "2025-11-16T14:50:12.827Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/7f/1a65ae870bc9d0576aebb0c501ea5dccf1ae2178fe2821042150ebd2e707/rpds_py-0.29.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2023473f444752f0f82a58dfcbee040d0a1b3d1b3c2ec40e884bd25db6d117d2", size = 225919, upload-time = "2025-11-16T14:50:14.734Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/ac/b97e80bf107159e5b9ba9c91df1ab95f69e5e41b435f27bdd737f0d583ac/rpds_py-0.29.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:acd82a9e39082dc5f4492d15a6b6c8599aa21db5c35aaf7d6889aea16502c07d", size = 373963, upload-time = "2025-11-16T14:50:16.205Z" },
+ { url = "https://files.pythonhosted.org/packages/40/5a/55e72962d5d29bd912f40c594e68880d3c7a52774b0f75542775f9250712/rpds_py-0.29.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:715b67eac317bf1c7657508170a3e011a1ea6ccb1c9d5f296e20ba14196be6b3", size = 364644, upload-time = "2025-11-16T14:50:18.22Z" },
+ { url = "https://files.pythonhosted.org/packages/99/2a/6b6524d0191b7fc1351c3c0840baac42250515afb48ae40c7ed15499a6a2/rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3b1b87a237cb2dba4db18bcfaaa44ba4cd5936b91121b62292ff21df577fc43", size = 393847, upload-time = "2025-11-16T14:50:20.012Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/b8/c5692a7df577b3c0c7faed7ac01ee3c608b81750fc5d89f84529229b6873/rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1c3c3e8101bb06e337c88eb0c0ede3187131f19d97d43ea0e1c5407ea74c0cbf", size = 407281, upload-time = "2025-11-16T14:50:21.64Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/57/0546c6f84031b7ea08b76646a8e33e45607cc6bd879ff1917dc077bb881e/rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8e54d6e61f3ecd3abe032065ce83ea63417a24f437e4a3d73d2f85ce7b7cfe", size = 529213, upload-time = "2025-11-16T14:50:23.219Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/c1/01dd5f444233605555bc11fe5fed6a5c18f379f02013870c176c8e630a23/rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3fbd4e9aebf110473a420dea85a238b254cf8a15acb04b22a5a6b5ce8925b760", size = 413808, upload-time = "2025-11-16T14:50:25.262Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/0a/60f98b06156ea2a7af849fb148e00fbcfdb540909a5174a5ed10c93745c7/rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fdf53d36e6c72819993e35d1ebeeb8e8fc688d0c6c2b391b55e335b3afba5a", size = 394600, upload-time = "2025-11-16T14:50:26.956Z" },
+ { url = "https://files.pythonhosted.org/packages/37/f1/dc9312fc9bec040ece08396429f2bd9e0977924ba7a11c5ad7056428465e/rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:ea7173df5d86f625f8dde6d5929629ad811ed8decda3b60ae603903839ac9ac0", size = 408634, upload-time = "2025-11-16T14:50:28.989Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/41/65024c9fd40c89bb7d604cf73beda4cbdbcebe92d8765345dd65855b6449/rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:76054d540061eda273274f3d13a21a4abdde90e13eaefdc205db37c05230efce", size = 426064, upload-time = "2025-11-16T14:50:30.674Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/e0/cf95478881fc88ca2fdbf56381d7df36567cccc39a05394beac72182cd62/rpds_py-0.29.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:9f84c549746a5be3bc7415830747a3a0312573afc9f95785eb35228bb17742ec", size = 575871, upload-time = "2025-11-16T14:50:33.428Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/c0/df88097e64339a0218b57bd5f9ca49898e4c394db756c67fccc64add850a/rpds_py-0.29.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:0ea962671af5cb9a260489e311fa22b2e97103e3f9f0caaea6f81390af96a9ed", size = 601702, upload-time = "2025-11-16T14:50:36.051Z" },
+ { url = "https://files.pythonhosted.org/packages/87/f4/09ffb3ebd0cbb9e2c7c9b84d252557ecf434cd71584ee1e32f66013824df/rpds_py-0.29.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:f7728653900035fb7b8d06e1e5900545d8088efc9d5d4545782da7df03ec803f", size = 564054, upload-time = "2025-11-16T14:50:37.733Z" },
]
[[package]]
@@ -1949,9 +2482,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c8/78/3565d011c61f5a43488987ee32b6f3f656e7f107ac2782dd57bdd7d91d9a/snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064", size = 103274, upload-time = "2025-05-09T16:34:50.371Z" },
]
+[[package]]
+name = "soupsieve"
+version = "2.8"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/6d/e6/21ccce3262dd4889aa3332e5a119a3491a95e8f60939870a3a035aabac0d/soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f", size = 103472, upload-time = "2025-08-27T15:39:51.78Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" },
+]
+
[[package]]
name = "sphinx"
-version = "8.2.3"
+version = "8.1.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "alabaster" },
@@ -1963,7 +2505,6 @@ dependencies = [
{ name = "packaging" },
{ name = "pygments" },
{ name = "requests" },
- { name = "roman-numerals-py" },
{ name = "snowballstemmer" },
{ name = "sphinxcontrib-applehelp" },
{ name = "sphinxcontrib-devhelp" },
@@ -1972,21 +2513,21 @@ dependencies = [
{ name = "sphinxcontrib-qthelp" },
{ name = "sphinxcontrib-serializinghtml" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/38/ad/4360e50ed56cb483667b8e6dadf2d3fda62359593faabbe749a27c4eaca6/sphinx-8.2.3.tar.gz", hash = "sha256:398ad29dee7f63a75888314e9424d40f52ce5a6a87ae88e7071e80af296ec348", size = 8321876, upload-time = "2025-03-02T22:31:59.658Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/be0b61178fe2cdcb67e2a92fc9ebb488e3c51c4f74a36a7824c0adf23425/sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927", size = 8184611, upload-time = "2024-10-13T20:27:13.93Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/31/53/136e9eca6e0b9dc0e1962e2c908fbea2e5ac000c2a2fbd9a35797958c48b/sphinx-8.2.3-py3-none-any.whl", hash = "sha256:4405915165f13521d875a8c29c8970800a0141c14cc5416a38feca4ea5d9b9c3", size = 3589741, upload-time = "2025-03-02T22:31:56.836Z" },
+ { url = "https://files.pythonhosted.org/packages/26/60/1ddff83a56d33aaf6f10ec8ce84b4c007d9368b21008876fceda7e7381ef/sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2", size = 3487125, upload-time = "2024-10-13T20:27:10.448Z" },
]
[[package]]
name = "sphinx-autodoc-typehints"
-version = "3.2.0"
+version = "3.0.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "sphinx" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/93/68/a388a9b8f066cd865d9daa65af589d097efbfab9a8c302d2cb2daa43b52e/sphinx_autodoc_typehints-3.2.0.tar.gz", hash = "sha256:107ac98bc8b4837202c88c0736d59d6da44076e65a0d7d7d543a78631f662a9b", size = 36724, upload-time = "2025-04-25T16:53:25.872Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/26/f0/43c6a5ff3e7b08a8c3b32f81b859f1b518ccc31e45f22e2b41ced38be7b9/sphinx_autodoc_typehints-3.0.1.tar.gz", hash = "sha256:b9b40dd15dee54f6f810c924f863f9cf1c54f9f3265c495140ea01be7f44fa55", size = 36282, upload-time = "2025-01-16T18:25:30.958Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/f7/c7/8aab362e86cbf887e58be749a78d20ad743e1eb2c73c2b13d4761f39a104/sphinx_autodoc_typehints-3.2.0-py3-none-any.whl", hash = "sha256:884b39be23b1d884dcc825d4680c9c6357a476936e3b381a67ae80091984eb49", size = 20563, upload-time = "2025-04-25T16:53:24.492Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/dc/dc46c5c7c566b7ec5e8f860f9c89533bf03c0e6aadc96fb9b337867e4460/sphinx_autodoc_typehints-3.0.1-py3-none-any.whl", hash = "sha256:4b64b676a14b5b79cefb6628a6dc8070e320d4963e8ff640a2f3e9390ae9045a", size = 20245, upload-time = "2025-01-16T18:25:27.394Z" },
]
[[package]]
@@ -2013,20 +2554,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c6/43/65c0acbd8cc6f50195a3a1fc195c404988b15c67090e73c7a41a9f57d6bd/sphinx_design-0.6.1-py3-none-any.whl", hash = "sha256:b11f37db1a802a183d61b159d9a202314d4d2fe29c163437001324fe2f19549c", size = 2215338, upload-time = "2024-08-02T13:48:42.106Z" },
]
-[[package]]
-name = "sphinx-rtd-theme"
-version = "3.0.2"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "docutils" },
- { name = "sphinx" },
- { name = "sphinxcontrib-jquery" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/91/44/c97faec644d29a5ceddd3020ae2edffa69e7d00054a8c7a6021e82f20335/sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85", size = 7620463, upload-time = "2024-11-13T11:06:04.545Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/85/77/46e3bac77b82b4df5bb5b61f2de98637724f246b4966cfc34bc5895d852a/sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13", size = 7655561, upload-time = "2024-11-13T11:06:02.094Z" },
-]
-
[[package]]
name = "sphinxcontrib-applehelp"
version = "2.0.0"
@@ -2055,15 +2582,15 @@ wheels = [
]
[[package]]
-name = "sphinxcontrib-jquery"
-version = "4.1"
+name = "sphinxcontrib-images"
+version = "1.0.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
+ { name = "requests" },
{ name = "sphinx" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/de/f3/aa67467e051df70a6330fe7770894b3e4f09436dea6881ae0b4f3d87cad8/sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a", size = 122331, upload-time = "2023-03-14T15:01:01.944Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/76/85/749bd22d1a68db7291c89e2ebca53f4306c3f205853cf31e9de279034c3c/sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae", size = 121104, upload-time = "2023-03-14T15:01:00.356Z" },
+ { url = "https://files.pythonhosted.org/packages/08/c5/402cce1cc18caff306effa09e77fdb3872289edc956a5c1bf53c03799b3b/sphinxcontrib_images-1.0.1-py3-none-any.whl", hash = "sha256:3cc9738dc15bacb3ab153b411a1b50dbfaa2535b49853ef3eae4d22adbbffa26", size = 119672, upload-time = "2025-06-07T22:42:48.954Z" },
]
[[package]]
@@ -2075,19 +2602,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071, upload-time = "2019-01-21T16:10:14.333Z" },
]
-[[package]]
-name = "sphinxcontrib-napoleon"
-version = "0.7"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pockets" },
- { name = "six" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/fa/eb/ad89500f4cee83187596e07f43ad561f293e8e6e96996005c3319653b89f/sphinxcontrib-napoleon-0.7.tar.gz", hash = "sha256:407382beed396e9f2d7f3043fad6afda95719204a1e1a231ac865f40abcbfcf8", size = 21232, upload-time = "2018-09-23T14:16:47.272Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/75/f2/6b7627dfe7b4e418e295e254bb15c3a6455f11f8c0ad0d43113f678049c3/sphinxcontrib_napoleon-0.7-py2.py3-none-any.whl", hash = "sha256:711e41a3974bdf110a484aec4c1a556799eb0b3f3b897521a018ad7e2db13fef", size = 17151, upload-time = "2018-09-23T14:16:45.548Z" },
-]
-
[[package]]
name = "sphinxcontrib-qthelp"
version = "2.0.0"
@@ -2141,6 +2655,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb", size = 18638, upload-time = "2025-03-13T13:49:21.846Z" },
]
+[[package]]
+name = "tinycss2"
+version = "1.4.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "webencodings" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/7a/fd/7a5ee21fd08ff70d3d33a5781c255cbe779659bd03278feb98b19ee550f4/tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7", size = 87085, upload-time = "2024-10-24T14:58:29.895Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e6/34/ebdc18bae6aa14fbee1a08b63c015c72b64868ff7dae68808ab500c492e2/tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289", size = 26610, upload-time = "2024-10-24T14:58:28.029Z" },
+]
+
[[package]]
name = "tokenizers"
version = "0.22.1"
@@ -2267,13 +2793,17 @@ dev = [
{ name = "unidecode" },
]
docs = [
+ { name = "ipython" },
+ { name = "linkify-it-py" },
{ name = "myst-parser" },
+ { name = "nbsphinx" },
+ { name = "pandoc" },
+ { name = "pydata-sphinx-theme" },
{ name = "sphinx" },
{ name = "sphinx-autodoc-typehints" },
{ name = "sphinx-copybutton" },
{ name = "sphinx-design" },
- { name = "sphinx-rtd-theme" },
- { name = "sphinxcontrib-napoleon" },
+ { name = "sphinxcontrib-images" },
]
[package.metadata]
@@ -2305,13 +2835,36 @@ dev = [
{ name = "unidecode" },
]
docs = [
- { name = "myst-parser", specifier = ">=0.18.0" },
- { name = "sphinx", specifier = ">=5.0.0" },
- { name = "sphinx-autodoc-typehints", specifier = ">=1.19.0" },
+ { name = "ipython", specifier = ">=8.0.0" },
+ { name = "linkify-it-py", specifier = ">=2.0.0" },
+ { name = "myst-parser", specifier = ">=4.0.0" },
+ { name = "nbsphinx", specifier = ">=0.9.0" },
+ { name = "pandoc", specifier = ">=2.0.0" },
+ { name = "pydata-sphinx-theme", specifier = ">=0.16.0" },
+ { name = "sphinx", specifier = ">=8.1.0" },
+ { name = "sphinx-autodoc-typehints", specifier = ">=2.0.0" },
{ name = "sphinx-copybutton", specifier = ">=0.5.0" },
- { name = "sphinx-design", specifier = ">=0.3.0" },
- { name = "sphinx-rtd-theme", specifier = ">=1.2.0" },
- { name = "sphinxcontrib-napoleon", specifier = ">=0.7" },
+ { name = "sphinx-design", specifier = ">=0.6.0" },
+ { name = "sphinxcontrib-images", specifier = ">=1.0.1" },
+]
+
+[[package]]
+name = "tornado"
+version = "6.5.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/09/ce/1eb500eae19f4648281bb2186927bb062d2438c2e5093d1360391afd2f90/tornado-6.5.2.tar.gz", hash = "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0", size = 510821, upload-time = "2025-08-08T18:27:00.78Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f6/48/6a7529df2c9cc12efd2e8f5dd219516184d703b34c06786809670df5b3bd/tornado-6.5.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6", size = 442563, upload-time = "2025-08-08T18:26:42.945Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/b5/9b575a0ed3e50b00c40b08cbce82eb618229091d09f6d14bce80fc01cb0b/tornado-6.5.2-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef", size = 440729, upload-time = "2025-08-08T18:26:44.473Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/4e/619174f52b120efcf23633c817fd3fed867c30bff785e2cd5a53a70e483c/tornado-6.5.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e", size = 444295, upload-time = "2025-08-08T18:26:46.021Z" },
+ { url = "https://files.pythonhosted.org/packages/95/fa/87b41709552bbd393c85dd18e4e3499dcd8983f66e7972926db8d96aa065/tornado-6.5.2-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882", size = 443644, upload-time = "2025-08-08T18:26:47.625Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/41/fb15f06e33d7430ca89420283a8762a4e6b8025b800ea51796ab5e6d9559/tornado-6.5.2-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108", size = 443878, upload-time = "2025-08-08T18:26:50.599Z" },
+ { url = "https://files.pythonhosted.org/packages/11/92/fe6d57da897776ad2e01e279170ea8ae726755b045fe5ac73b75357a5a3f/tornado-6.5.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c", size = 444549, upload-time = "2025-08-08T18:26:51.864Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/02/c8f4f6c9204526daf3d760f4aa555a7a33ad0e60843eac025ccfd6ff4a93/tornado-6.5.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4", size = 443973, upload-time = "2025-08-08T18:26:53.625Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/2d/f5f5707b655ce2317190183868cd0f6822a1121b4baeae509ceb9590d0bd/tornado-6.5.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04", size = 443954, upload-time = "2025-08-08T18:26:55.072Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/59/593bd0f40f7355806bf6573b47b8c22f8e1374c9b6fd03114bd6b7a3dcfd/tornado-6.5.2-cp39-abi3-win32.whl", hash = "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0", size = 445023, upload-time = "2025-08-08T18:26:56.677Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/2a/f609b420c2f564a748a2d80ebfb2ee02a73ca80223af712fca591386cafb/tornado-6.5.2-cp39-abi3-win_amd64.whl", hash = "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f", size = 445427, upload-time = "2025-08-08T18:26:57.91Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/4f/e1f65e8f8c76d73658b33d33b81eed4322fb5085350e4328d5c956f0c8f9/tornado-6.5.2-cp39-abi3-win_arm64.whl", hash = "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af", size = 444456, upload-time = "2025-08-08T18:26:59.207Z" },
]
[[package]]
@@ -2388,6 +2941,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" },
]
+[[package]]
+name = "uc-micro-py"
+version = "1.0.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/91/7a/146a99696aee0609e3712f2b44c6274566bc368dfe8375191278045186b8/uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a", size = 6043, upload-time = "2024-02-09T16:52:01.654Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229, upload-time = "2024-02-09T16:52:00.371Z" },
+]
+
[[package]]
name = "unidecode"
version = "1.4.0"
@@ -2429,6 +2991,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" },
]
+[[package]]
+name = "webencodings"
+version = "0.5.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0b/02/ae6ceac1baeda530866a85075641cec12989bd8d31af6d5ab4a3e8c92f47/webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923", size = 9721, upload-time = "2017-04-05T20:21:34.189Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", size = 11774, upload-time = "2017-04-05T20:21:32.581Z" },
+]
+
[[package]]
name = "widgetsnbextension"
version = "4.0.15"