Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/speakeasy_run_on_pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ jobs:
working-directory: examples/nextjs-example
run: npx tsc --noEmit

- name: Run tests
run: npm test

- name: Commit changes
run: |
git config --global user.name 'github-actions[bot]'
Expand Down
5 changes: 4 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,10 @@
"scripts": {
"lint": "eslint --cache --max-warnings=0 src",
"build": "tsc",
"prepublishOnly": "npm run build"
"prepublishOnly": "npm run build",
"test": "vitest run",
"test:e2e": "vitest run tests/e2e",
"test:watch": "vitest"
},
"peerDependencies": {
"@tanstack/react-query": "^5",
Expand Down
69 changes: 69 additions & 0 deletions tests/e2e/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# E2E Tests

This directory contains end-to-end tests for the OpenRouter SDK.

## Prerequisites

1. Install dependencies:
```bash
npm install
```

2. Set up your OpenRouter API key:
```bash
export OPENROUTER_API_KEY=your_api_key_here
```

Or create a `.env` file in the project root:
```
OPENROUTER_API_KEY=your_api_key_here
```

## Running Tests

Run all tests:
```bash
npm test
```

Run only e2e tests:
```bash
npm run test:e2e
```

Run tests in watch mode:
```bash
npm run test:watch
```

## Test Coverage

The e2e test suite includes:

### Models Tests (`models.test.ts`)
- Fetching the list of available models
- Validating model properties
- Filtering models by category
- Getting the total count of models
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we add one test where we intentionally garble the openrouter key or ask for an invalid model so that we can check that the error parsing works correctly?


### Chat Tests (`chat.test.ts`)
- **Non-streaming mode:**
- Sending chat requests and receiving responses
- Multi-turn conversations
- Token limit enforcement

- **Streaming mode:**
- Streaming chat responses
- Progressive content delivery
- Finish reason detection

### Beta Responses Tests (`responses.test.ts`)
- Testing the beta responses endpoint
- Note: This endpoint is in alpha/beta and may require updates

## Notes

- Tests make real API calls to OpenRouter, so you need a valid API key
- Tests may consume API credits
- Some tests use the `openai/gpt-3.5-turbo` model by default
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: should we replace gpt-3.5-turbo openai/gpt-4.1-nano. gpt-3.5-turbo will be deprecated in a year. Also it's more expensive than nano.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

actually, would meta-llama/llama-3.2-1b-instruct be better/cheaper/faster to run?

- The beta responses endpoint has limited test coverage as it's still in development
186 changes: 186 additions & 0 deletions tests/e2e/chat.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
import { beforeAll, describe, expect, it } from "vitest";
import { OpenRouter } from "../../src/sdk/sdk.js";

describe("Chat E2E Tests", () => {
let client: OpenRouter;

beforeAll(() => {
const apiKey = process.env.OPENROUTER_API_KEY;
if (!apiKey) {
throw new Error(
"OPENROUTER_API_KEY environment variable is required for e2e tests"
);
}

client = new OpenRouter({
apiKey,
});
});

describe("chat.send() - Non-streaming", () => {
it("should successfully send a chat request and get a response", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "Say 'Hello, World!' and nothing else.",
},
],
stream: false,
});

expect(response).toBeDefined();


expect(Array.isArray(response.choices)).toBe(true);
expect(response.choices.length).toBeGreaterThan(0);

const firstChoice = response.choices[0];
expect(firstChoice).toBeDefined();
expect(firstChoice?.message).toBeDefined();
expect(firstChoice?.message?.content).toBeDefined();
expect(typeof firstChoice?.message?.content).toBe("string");

// Verify it has usage information
expect(response.usage).toBeDefined();
expect(response.usage?.totalTokens).toBeGreaterThan(0);

});

it("should handle multi-turn conversations", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "My name is Alice.",
},
{
role: "assistant",
content: "Hello Alice! How can I help you today?",
},
{
role: "user",
content: "What is my name?",
},
],
stream: false,
});

expect(response).toBeDefined();

const content = typeof response.choices[0]?.message?.content === "string" ? response.choices[0]?.message?.content?.toLowerCase() : response.choices[0]?.message?.content?.map((item) => item.type === "text" ? item.text : "").join("").toLowerCase();
expect(content).toBeDefined();
expect(content).toContain("alice");

});

it("should respect max_tokens parameter", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "Write a long story about a cat.",
},
],
maxTokens: 10,
stream: false,
});

expect(response).toBeDefined();

expect(response.usage?.completionTokens).toBeLessThanOrEqual(10);

});
});

describe("chat.send() - Streaming", () => {
it("should successfully stream chat responses", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "Count from 1 to 5.",
},
],
stream: true,
});

expect(response).toBeDefined();

const chunks: any[] = [];

for await (const chunk of response) {
expect(chunk).toBeDefined();
chunks.push(chunk);
}

expect(chunks.length).toBeGreaterThan(0);

// Verify chunks have expected structure
const firstChunk = chunks[0];
expect(firstChunk?.choices).toBeDefined();
expect(Array.isArray(firstChunk?.choices)).toBe(true);

});

it("should stream complete content progressively", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "Say 'test'.",
},
],
stream: true,
});

expect(response).toBeDefined();

let fullContent = "";
let chunkCount = 0;

for await (const chunk of response) {
chunkCount++;
const delta = chunk.choices?.[0]?.delta;
if (delta?.content) {
fullContent += delta.content;
}
}

expect(chunkCount).toBeGreaterThan(0);
expect(fullContent.length).toBeGreaterThan(0);
}

it("should include finish_reason in final chunk", async () => {
const response = await client.chat.send({
model: "meta-llama/llama-3.2-1b-instruct",
messages: [
{
role: "user",
content: "Say 'done'.",
},
],
stream: true,
});

expect(response).toBeDefined();

let foundFinishReason = false;

for await (const chunk of response) {
const finishReason = chunk.choices?.[0]?.finishReason;
if (finishReason) {
foundFinishReason = true;
expect(typeof finishReason).toBe("string");
}
}

expect(foundFinishReason).toBe(true);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's also add a usage check to this test.

}
});
});
59 changes: 59 additions & 0 deletions tests/e2e/models.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import { beforeAll, describe, expect, it } from "vitest";
import { OpenRouter } from "../../src/sdk/sdk.js";

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nice!

describe("Models E2E Tests", () => {
let client: OpenRouter;

beforeAll(() => {
const apiKey = process.env.OPENROUTER_API_KEY;
if (!apiKey) {
throw new Error(
"OPENROUTER_API_KEY environment variable is required for e2e tests"
);
}

client = new OpenRouter({
apiKey,
});
});

describe("models.list()", () => {
it("should successfully fetch models list", async () => {
const response = await client.models.list();

expect(response).toBeDefined();
expect(Array.isArray(response)).toBe(true);
expect(response.length).toBeGreaterThan(0);
});

it("should return models with expected properties", async () => {
const response = await client.models.list();

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we will eventually need to paginate this API, and may be able to do it without versioning the api. should we have the SDK prep for it?


const firstModel = response?.[0];
expect(firstModel).toBeDefined();
expect(firstModel?.id).toBeDefined();
expect(typeof firstModel?.id).toBe("string");
expect(firstModel?.name).toBeDefined();
});

it("should support filtering by category", async () => {
const response = await client.models.list({
category: "text",
});

expect(response).toBeDefined();
expect(Array.isArray(response)).toBe(true);
});
});

describe("models.count()", () => {
it("should successfully fetch models count", async () => {
const response = await client.models.count();

expect(response).toBeDefined();
expect(response.count).toBeDefined();
expect(typeof response.count).toBe("number");
expect(response.count).toBeGreaterThan(0);
});
});
});
11 changes: 11 additions & 0 deletions vitest.config.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import { defineConfig } from "vitest/config";

export default defineConfig({
test: {
globals: true,
environment: "node",
testMatch: ["**/*.test.ts"],
hookTimeout: 30000,
testTimeout: 30000,
},
});