Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 12 additions & 21 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,12 @@
# dependencies
/node_modules
/.pnp
.pnp.js
.yarn/install-state.gz
.pnp.*
.yarn/*
!.yarn/patches
!.yarn/plugins
!.yarn/releases
!.yarn/versions

# testing
/coverage
Expand All @@ -24,9 +28,10 @@
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.pnpm-debug.log*

# local env files
.env*.local
# env files (can opt-in for committing if needed)
.env*

# vercel
.vercel
Expand All @@ -35,20 +40,6 @@ yarn-error.log*
*.tsbuildinfo
next-env.d.ts

# OpenNext
/.open-next

# wrangler files
.wrangler
.dev.vars*
!.dev.vars.example
.env*
!.env.example

.vscode
.vscode/settings.json

public/_headers

*lock.yaml
*lock.json
pnpm-lock.yaml
package-lock.json
yarn.lock
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,13 @@ You can check out [the Next.js GitHub repository](https://github.com/vercel/next
The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js.

Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details.

## IMPORTANT: When deploying to Vercel, you MUST disable Vercel Authentication to allow Layercode webhooks to be received

By default, Vercel blocks external requests to your application /api routes. This means that Layercode webhooks will not be received by your application, and your voice agent will not work.

Disable Vercel Authentication by going to your project settings in the Vercel dashboard, then go to "Deployment Protection" in left sidebar menu, then turn off "Vercel Authentication" and Save. You do not need to redeploy.

![disable-vercel-auth](./disable-vercel-auth.png)

Remember to check your Webhook Logs in the Layercode dashboard to ensure that webhooks are being received successfully. If you receive a 405 error response to webhooks, this indicates that Vercel Authentication is still enabled.
9 changes: 0 additions & 9 deletions app/agent/page.tsx

This file was deleted.

158 changes: 76 additions & 82 deletions app/api/agent/route.ts
Original file line number Diff line number Diff line change
@@ -1,122 +1,116 @@
export const dynamic = 'force-dynamic';
export const dynamic = "force-dynamic";

import { createOpenAI } from "@ai-sdk/openai";
import {
streamText,
UIMessage,
convertToModelMessages,
AssistantModelMessage,
} from "ai";
import { streamResponse, verifySignature } from "@layercode/node-server-sdk";
import config from "@/layercode.config.json";

type LayercodeMetadata = {
conversation_id: string;
};

type LayercodePart = {
content: string;
};

import { createOpenAI } from '@ai-sdk/openai';
import { streamText, ModelMessage, tool, stepCountIs } from 'ai';
import z from 'zod';
import { streamResponse, verifySignature } from '@layercode/node-server-sdk';
import { prettyPrintMsgs } from '@/app/utils/msgs';
import config from '@/layercode.config.json';
type LayercodeUIMessage = UIMessage<LayercodeMetadata, LayercodePart>;

export type MessageWithTurnId = ModelMessage & { turn_id: string };
type WebhookRequest = {
conversation_id: string;
text: string;
turn_id: string;
interruption_context?: {
previous_turn_interrupted: boolean;
words_heard: number;
text_heard: string;
assistant_turn_id?: string;
};
type: 'message' | 'session.start' | 'session.update' | 'session.end';
type: "message" | "session.start" | "session.end" | "session.update";
};

const SYSTEM_PROMPT = config.prompt;
const WELCOME_MESSAGE = config.welcome_message;

const openai = createOpenAI({ apiKey: process.env.OPENAI_API_KEY! });

// In production we recommend fast datastore like Redis or Cloudflare D1 for storing conversation history
// Here we use a simple in-memory object for demo purposes
const conversations = {} as Record<string, MessageWithTurnId[]>;
const conversations = {} as Record<string, LayercodeUIMessage[]>;

export const POST = async (request: Request) => {
const requestBody = (await request.json()) as WebhookRequest;
console.log('Webhook received from Layercode', requestBody);
console.log("Webhook received from Layercode", requestBody);

// Verify this webhook request is from Layercode
const signature = request.headers.get('layercode-signature') || '';
const secret = process.env.LAYERCODE_WEBHOOK_SECRET || '';
// Verify webhook signature
const signature = request.headers.get("layercode-signature") || "";
const secret = process.env.LAYERCODE_WEBHOOK_SECRET || "";
const isValid = verifySignature({
payload: JSON.stringify(requestBody),
signature,
secret
secret,
});
if (!isValid) return new Response('Invalid layercode-signature', { status: 401 });
if (!isValid)
return new Response("Invalid layercode-signature", { status: 401 });

const { conversation_id, text: userText, turn_id, type, interruption_context } = requestBody;
const { conversation_id, text: userText, turn_id, type } = requestBody;

// If this is a new conversation, create a new array to hold messages
if (!conversations[conversation_id]) {
conversations[conversation_id] = [];
}
if (!conversations[conversation_id]) conversations[conversation_id] = [];

// Immediately store the user message received
conversations[conversation_id].push({ role: 'user', turn_id, content: userText });
const userMessage: LayercodeUIMessage = {
id: turn_id,
role: "user",
metadata: { conversation_id },
parts: [{ type: "text", text: userText }],
};
conversations[conversation_id].push(userMessage);

switch (type) {
case 'session.start':
// A new session/call has started. If you want to send a welcome message (have the agent speak first), return that here.
case "session.start":
const message: LayercodeUIMessage = {
id: turn_id,
role: "assistant",
metadata: { conversation_id },
parts: [{ type: "text", text: WELCOME_MESSAGE }],
};

return streamResponse(requestBody, async ({ stream }) => {
// Save the welcome message to the conversation history
conversations[conversation_id].push({ role: 'assistant', turn_id, content: WELCOME_MESSAGE });
// Send the welcome message to be spoken
conversations[conversation_id].push(message);
stream.tts(WELCOME_MESSAGE);
stream.end();
});
case 'message':
// The user has spoken and the transcript has been received. Call our LLM and genereate a response.

// Before generating a response, we store a placeholder assistant msg in the history. This is so that if the agent response is interrupted (which is common for voice agents), before we have the chance to save our agent's response, our conversation history will still follow the correct user-assistant turn order.
const assistantResposneIdx = conversations[conversation_id].push({ role: 'assistant', turn_id, content: '' });
case "message":
return streamResponse(requestBody, async ({ stream }) => {
const weather = tool({
description: 'Get the weather in a location',
inputSchema: z.object({
location: z.string().describe('The location to get the weather for')
}),
execute: async ({ location }) => {
stream.data({ isThinking: true });
// do something to get the weather
stream.data({ isThinking: false });

return {
location,
temperature: 72 + Math.floor(Math.random() * 21) - 10
};
}
});
const { textStream } = streamText({
model: openai('gpt-4o-mini'),
model: openai("gpt-4o-mini"),
system: SYSTEM_PROMPT,
messages: conversations[conversation_id], // The user message has already been added to the conversation array earlier, so the LLM will be responding to that.
tools: { weather },
toolChoice: 'auto',
stopWhen: stepCountIs(10),
messages: convertToModelMessages(conversations[conversation_id]),
onFinish: async ({ response }) => {
// The assistant has finished generating the full response text. Now we update our conversation history with the additional messages generated. For a simple LLM generated single agent response, there will be one additional message. If you add some tools, and allow multi-step agent mode, there could be multiple additional messages which all need to be added to the conversation history.

// First, we remove the placeholder assistant message we added earlier, as we will be replacing it with the actual generated messages.
conversations[conversation_id].splice(assistantResposneIdx - 1, 1);

// Push the new messages returned from the LLM into the conversation history, adding the Layercode turn_id to each message.
conversations[conversation_id].push(...response.messages.map((m) => ({ ...m, turn_id })));

console.log('--- final message history ---');
prettyPrintMsgs(conversations[conversation_id]);

stream.end(); // Tell Layercode we are done responding
}
const generatedMessages: LayercodeUIMessage[] = response.messages
.filter(
(message): message is AssistantModelMessage =>
message.role === "assistant"
)
.map((message) => ({
id: crypto.randomUUID(),
role: "assistant", // now the type matches your UI message union
metadata: { conversation_id },
parts: Array.isArray(message.content)
? message.content
.filter(
(part): part is { type: "text"; text: string } =>
part.type === "text"
)
.map((part) => ({ type: "text", text: part.text }))
: [{ type: "text", text: message.content }],
}));

conversations[conversation_id].push(...generatedMessages);
stream.end();
},
});

// Stream the text response as it is generated, and have it spoken in real-time
await stream.ttsTextStream(textStream);
});
case 'session.end':
// The session/call has ended. Here you could store or analyze the conversation transcript (stored in your conversations history)
return new Response('OK', { status: 200 });
case 'session.update':
// The session/call state has been updated. This happens after the session has ended, and when the recording audio file has been processed and is available for download.
return new Response('OK', { status: 200 });

case "session.end":
case "session.update":
return new Response("OK", { status: 200 });
}
};
47 changes: 27 additions & 20 deletions app/api/authorize/route.ts
Original file line number Diff line number Diff line change
@@ -1,26 +1,33 @@
export const dynamic = 'force-dynamic';
import { NextResponse } from 'next/server';
export const dynamic = "force-dynamic";
import { NextResponse } from "next/server";

export const POST = async (request: Request) => {
const baseApiUrl = (process.env.LAYERCODE_API_URL || 'https://api.layercode.com').replace(/\/+$/, '');
const endpoint = `${baseApiUrl}/v1/agents/web/authorize_session`;
// Here you could do any user authorization checks you need for your app
const endpoint = "https://api.layercode.com/v1/agents/web/authorize_session";
const apiKey = process.env.LAYERCODE_API_KEY;
if (!apiKey) throw new Error('LAYERCODE_API_KEY is not set.');

if (!apiKey) {
throw new Error("LAYERCODE_API_KEY is not set.");
}
const requestBody = await request.json();

const response = await fetch(endpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`
},
body: JSON.stringify(requestBody)
});

if (!response.ok) {
const text = await response.text();
return NextResponse.json({ error: text || response.statusText }, { status: response.status });
if (!requestBody || !requestBody.agent_id) {
throw new Error("Missing agent_id in request body.");
}
try {
const response = await fetch(endpoint, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify(requestBody),
});
if (!response.ok) {
const text = await response.text();
throw new Error(text || response.statusText);
}
return NextResponse.json(await response.json());
} catch (error: any) {
console.log("Layercode authorize session response error:", error.message);
return NextResponse.json({ error: error.message }, { status: 500 });
}
return NextResponse.json(await response.json());
};
24 changes: 10 additions & 14 deletions app/globals.css
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
@import "tailwindcss";

:root {
--foreground: #E6E6E6;
--background: #0F0F0F ;
--background: #ffffff;
--foreground: #171717;
}

@theme inline {
Expand All @@ -12,19 +12,15 @@
--font-mono: var(--font-geist-mono);
}

@media (prefers-color-scheme: dark) {
:root {
--background: #0a0a0a;
--foreground: #ededed;
}
}

body {
background: var(--background);
color: var(--foreground);
font-family: 'Inter', system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, 'Noto Sans', sans-serif
}

/* Prevent horizontal layout shift when vertical scrollbar appears/disappears */
html {
scrollbar-gutter: stable;
}

@supports not (scrollbar-gutter: stable) {
html {
overflow-y: scroll;
}
font-family: Arial, Helvetica, sans-serif;
}
Loading