Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 47 additions & 0 deletions .github/actions/validate-sdk/action.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
name: 'Validate SDK'
description: 'Run build, typecheck, and tests for the SDK'

runs:
using: 'composite'
steps:
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '22'
cache: 'npm'

- name: Install dependencies
shell: bash
run: npm install

- name: Build SDK
shell: bash
run: npm run build

- name: Typecheck tests directory
shell: bash
run: npx tsc --noEmit --skipLibCheck --esModuleInterop --moduleResolution node --module esnext --target es2020 'tests/**/*.ts'

- name: Install examples dependencies
shell: bash
working-directory: examples
run: npm install

- name: Typecheck examples root
shell: bash
working-directory: examples
run: npx tsc --noEmit --skipLibCheck --esModuleInterop --moduleResolution node --module esnext --target es2020 '*.ts'

- name: Install nextjs-example dependencies
shell: bash
working-directory: examples/nextjs-example
run: npm install

- name: Typecheck nextjs-example
shell: bash
working-directory: examples/nextjs-example
run: npx tsc --noEmit

- name: Run tests
shell: bash
run: npm test
10 changes: 10 additions & 0 deletions .github/workflows/pr-validation.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
name: PR Validation

on:
pull_request:
paths-ignore:
- .speakeasy/in.openapi.yaml

jobs:
validate:
uses: ./.github/workflows/validation-checks.yaml
32 changes: 2 additions & 30 deletions .github/workflows/speakeasy_run_on_pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,36 +30,8 @@ jobs:
- name: Run Speakeasy
run: speakeasy run

- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '22'
cache: 'npm'

- name: Install dependencies
run: npm install

- name: Build SDK
run: npm run build

- name: Typecheck tests directory
run: npx tsc --noEmit --skipLibCheck --esModuleInterop --moduleResolution node --module esnext --target es2020 'tests/**/*.ts'

- name: Install examples dependencies
working-directory: examples
run: npm install

- name: Typecheck examples root
working-directory: examples
run: npx tsc --noEmit --skipLibCheck --esModuleInterop --moduleResolution node --module esnext --target es2020 '*.ts'

- name: Install nextjs-example dependencies
working-directory: examples/nextjs-example
run: npm install

- name: Typecheck nextjs-example
working-directory: examples/nextjs-example
run: npx tsc --noEmit
- name: Validate SDK
uses: ./.github/actions/validate-sdk

- name: Commit changes
run: |
Expand Down
14 changes: 14 additions & 0 deletions .github/workflows/validation-checks.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
name: Validation Checks

on:
workflow_call:

jobs:
validate:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Validate SDK
uses: ./.github/actions/validate-sdk
5 changes: 4 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,10 @@
"scripts": {
"lint": "eslint --cache --max-warnings=0 src",
"build": "tsc",
"prepublishOnly": "npm run build"
"prepublishOnly": "npm run build",
"test": "vitest run",
"test:e2e": "vitest run tests/e2e",
"test:watch": "vitest"
},
"peerDependencies": {
"@tanstack/react-query": "^5",
Expand Down
69 changes: 69 additions & 0 deletions tests/e2e/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# E2E Tests

This directory contains end-to-end tests for the OpenRouter SDK.

## Prerequisites

1. Install dependencies:
```bash
npm install
```

2. Set up your OpenRouter API key:
```bash
export OPENROUTER_API_KEY=your_api_key_here
```

Or create a `.env` file in the project root:
```
OPENROUTER_API_KEY=your_api_key_here
```

## Running Tests

Run all tests:
```bash
npm test
```

Run only e2e tests:
```bash
npm run test:e2e
```

Run tests in watch mode:
```bash
npm run test:watch
```

## Test Coverage

The e2e test suite includes:

### Models Tests (`models.test.ts`)
- Fetching the list of available models
- Validating model properties
- Filtering models by category
- Getting the total count of models
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we add one test where we intentionally garble the openrouter key or ask for an invalid model so that we can check that the error parsing works correctly?


### Chat Tests (`chat.test.ts`)
- **Non-streaming mode:**
- Sending chat requests and receiving responses
- Multi-turn conversations
- Token limit enforcement

- **Streaming mode:**
- Streaming chat responses
- Progressive content delivery
- Finish reason detection

### Beta Responses Tests (`responses.test.ts`)
- Testing the beta responses endpoint
- Note: This endpoint is in alpha/beta and may require updates

## Notes

- Tests make real API calls to OpenRouter, so you need a valid API key
- Tests may consume API credits
- Some tests use the `openai/gpt-3.5-turbo` model by default
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: should we replace gpt-3.5-turbo openai/gpt-4.1-nano. gpt-3.5-turbo will be deprecated in a year. Also it's more expensive than nano.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

actually, would meta-llama/llama-3.2-1b-instruct be better/cheaper/faster to run?

- The beta responses endpoint has limited test coverage as it's still in development
186 changes: 186 additions & 0 deletions tests/e2e/chat.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
import { beforeAll, describe, expect, it } from "vitest";
import { OpenRouter } from "../../src/sdk/sdk.js";

describe("Chat E2E Tests", () => {
let client: OpenRouter;

beforeAll(() => {
const apiKey = process.env.OPENROUTER_API_KEY;
if (!apiKey) {
throw new Error(
"OPENROUTER_API_KEY environment variable is required for e2e tests"
);
}

client = new OpenRouter({
apiKey,
});
});

describe("chat.send() - Non-streaming", () => {
it("should successfully send a chat request and get a response", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "Say 'Hello, World!' and nothing else.",
},
],
stream: false,
});

expect(response).toBeDefined();


expect(Array.isArray(response.choices)).toBe(true);
expect(response.choices.length).toBeGreaterThan(0);

const firstChoice = response.choices[0];
expect(firstChoice).toBeDefined();
expect(firstChoice?.message).toBeDefined();
expect(firstChoice?.message?.content).toBeDefined();
expect(typeof firstChoice?.message?.content).toBe("string");

// Verify it has usage information
expect(response.usage).toBeDefined();
expect(response.usage?.totalTokens).toBeGreaterThan(0);

});

it("should handle multi-turn conversations", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "My name is Alice.",
},
{
role: "assistant",
content: "Hello Alice! How can I help you today?",
},
{
role: "user",
content: "What is my name?",
},
],
stream: false,
});

expect(response).toBeDefined();

const content = typeof response.choices[0]?.message?.content === "string" ? response.choices[0]?.message?.content?.toLowerCase() : response.choices[0]?.message?.content?.map((item) => item.type === "text" ? item.text : "").join("").toLowerCase();
expect(content).toBeDefined();
expect(content).toContain("alice");

});

it("should respect max_tokens parameter", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "Write a long story about a cat.",
},
],
maxTokens: 10,
stream: false,
});

expect(response).toBeDefined();

expect(response.usage?.completionTokens).toBeLessThanOrEqual(10);

});
});

describe("chat.send() - Streaming", () => {
it("should successfully stream chat responses", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "Count from 1 to 5.",
},
],
stream: true,
});

expect(response).toBeDefined();

const chunks: any[] = [];

for await (const chunk of response) {
expect(chunk).toBeDefined();
chunks.push(chunk);
}

expect(chunks.length).toBeGreaterThan(0);

// Verify chunks have expected structure
const firstChunk = chunks[0];
expect(firstChunk?.choices).toBeDefined();
expect(Array.isArray(firstChunk?.choices)).toBe(true);

});

it("should stream complete content progressively", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "Say 'test'.",
},
],
stream: true,
});

expect(response).toBeDefined();

let fullContent = "";
let chunkCount = 0;

for await (const chunk of response) {
chunkCount++;
const delta = chunk.choices?.[0]?.delta;
if (delta?.content) {
fullContent += delta.content;
}
}

expect(chunkCount).toBeGreaterThan(0);
expect(fullContent.length).toBeGreaterThan(0);
}

it("should include finish_reason in final chunk", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "Say 'done'.",
},
],
stream: true,
});

expect(response).toBeDefined();

let foundFinishReason = false;

for await (const chunk of response) {
const finishReason = chunk.choices?.[0]?.finishReason;
if (finishReason) {
foundFinishReason = true;
expect(typeof finishReason).toBe("string");
}
}

expect(foundFinishReason).toBe(true);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's also add a usage check to this test.

}
});
});
Loading
Loading