From d5b997200b150f4965b703d8898daab5cb99629d Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Wed, 18 Feb 2026 06:40:49 +0000 Subject: [PATCH 1/3] feat: integrate Supermemory for agent memory and preference learning Co-authored-by: ngoiyaeric <115367894+ngoiyaeric@users.noreply.github.com> --- app/actions.tsx | 32 ++---- bun.lock | 39 +++++++ lib/agents/inquire.tsx | 13 ++- lib/agents/query-suggestor.tsx | 6 +- lib/agents/researcher.tsx | 9 +- lib/agents/resolution-search.tsx | 11 +- lib/agents/task-manager.tsx | 4 +- lib/agents/tools/index.tsx | 16 ++- lib/agents/writer.tsx | 12 +- lib/utils/index.ts | 190 +++++++++++++++++-------------- package.json | 1 + 11 files changed, 205 insertions(+), 128 deletions(-) diff --git a/app/actions.tsx b/app/actions.tsx index 50e985bf..e7263c1d 100644 --- a/app/actions.tsx +++ b/app/actions.tsx @@ -41,6 +41,10 @@ async function submit(formData?: FormData, skip?: boolean) { const isGenerating = createStreamableValue(true) const isCollapsed = createStreamableValue(false) + const { getCurrentUserIdOnServer } = await import('@/lib/auth/get-current-user') + const actualUserId = await getCurrentUserIdOnServer() + const userId = actualUserId || 'anonymous' + const action = formData?.get('action') as string; const drawnFeaturesString = formData?.get('drawnFeatures') as string; let drawnFeatures: DrawnFeature[] = []; @@ -101,7 +105,7 @@ async function submit(formData?: FormData, skip?: boolean) { async function processResolutionSearch() { try { - const streamResult = await resolutionSearch(messages, timezone, drawnFeatures, location); + const streamResult = await resolutionSearch(messages, timezone, drawnFeatures, location, userId, aiState.get().chatId); let fullSummary = ''; for await (const partialObject of streamResult.partialObjectStream) { @@ -147,7 +151,7 @@ async function submit(formData?: FormData, skip?: boolean) { } return m }); - const relatedQueries = await querySuggestor(uiStream, sanitizedMessages); + const relatedQueries = await querySuggestor(uiStream, sanitizedMessages, userId, aiState.get().chatId); uiStream.append(
@@ -397,21 +401,20 @@ async function submit(formData?: FormData, skip?: boolean) { } as CoreMessage) } - const userId = 'anonymous' const currentSystemPrompt = (await getSystemPrompt(userId)) || '' const mapProvider = formData?.get('mapProvider') as 'mapbox' | 'google' async function processEvents() { let action: any = { object: { next: 'proceed' } } if (!skip) { - const taskManagerResult = await taskManager(messages) + const taskManagerResult = await taskManager(messages, userId, aiState.get().chatId) if (taskManagerResult) { action.object = taskManagerResult.object } } if (action.object.next === 'inquire') { - const inquiry = await inquire(uiStream, messages) + const inquiry = await inquire(uiStream, messages, userId, aiState.get().chatId) uiStream.done() isGenerating.done() isCollapsed.done(false) @@ -441,15 +444,7 @@ async function submit(formData?: FormData, skip?: boolean) { ? answer.length === 0 : answer.length === 0 && !errorOccurred ) { - const { fullResponse, hasError, toolResponses } = await researcher( - currentSystemPrompt, - uiStream, - streamText, - messages, - mapProvider, - useSpecificAPI, - drawnFeatures - ) + const { fullResponse, hasError, toolResponses } = await researcher(currentSystemPrompt, uiStream, streamText, messages, mapProvider, useSpecificAPI, drawnFeatures, userId, aiState.get().chatId) answer = fullResponse toolOutputs = toolResponses errorOccurred = hasError @@ -487,18 +482,13 @@ async function submit(formData?: FormData, skip?: boolean) { : msg ) as CoreMessage[] const latestMessages = modifiedMessages.slice(maxMessages * -1) - answer = await writer( - currentSystemPrompt, - uiStream, - streamText, - latestMessages - ) + answer = await writer(currentSystemPrompt, uiStream, streamText, latestMessages, userId, aiState.get().chatId) } else { streamText.done() } if (!errorOccurred) { - const relatedQueries = await querySuggestor(uiStream, messages) + const relatedQueries = await querySuggestor(uiStream, messages, userId, aiState.get().chatId) uiStream.append(
diff --git a/bun.lock b/bun.lock index f101e5d7..3eda0c9c 100644 --- a/bun.lock +++ b/bun.lock @@ -33,6 +33,7 @@ "@radix-ui/react-tooltip": "^1.2.3", "@supabase/ssr": "^0.3.0", "@supabase/supabase-js": "^2.0.0", + "@supermemory/tools": "^1.4.0", "@tailwindcss/typography": "^0.5.16", "@tavily/core": "^0.6.4", "@turf/turf": "^7.2.0", @@ -111,6 +112,8 @@ "@ai-sdk/anthropic": ["@ai-sdk/anthropic@1.2.12", "", { "dependencies": { "@ai-sdk/provider": "1.1.3", "@ai-sdk/provider-utils": "2.2.8" }, "peerDependencies": { "zod": "^3.0.0" } }, "sha512-YSzjlko7JvuiyQFmI9RN1tNZdEiZxc+6xld/0tq/VkJaHpEzGAb1yiNxxvmYVcjvfu/PcvCxAAYXmTYQQ63IHQ=="], + "@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.41", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21", "@vercel/oidc": "3.1.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-0YPxu3ybm6L8h0mclMP5Uyi2sT/06rEkd3LZWO0eYsamHmlev2HXKhm5GYPtaRVpIRgJE+HJglpL/Ww5aM8EJw=="], + "@ai-sdk/google": ["@ai-sdk/google@1.2.22", "", { "dependencies": { "@ai-sdk/provider": "1.1.3", "@ai-sdk/provider-utils": "2.2.8" }, "peerDependencies": { "zod": "^3.0.0" } }, "sha512-Ppxu3DIieF1G9pyQ5O1Z646GYR0gkC57YdBqXJ82qvCdhEhZHu0TWhmnOoeIWe2olSbuDeoOY+MfJrW8dzS3Hw=="], "@ai-sdk/openai": ["@ai-sdk/openai@1.3.24", "", { "dependencies": { "@ai-sdk/provider": "1.1.3", "@ai-sdk/provider-utils": "2.2.8" }, "peerDependencies": { "zod": "^3.0.0" } }, "sha512-GYXnGJTHRTZc4gJMSmFRgEQudjqd4PUN0ZjQhPwOAYH1yOAvQoG/Ikqs+HyISRbLPCrhbZnPKCNHuRU4OfpW0Q=="], @@ -655,6 +658,8 @@ "@so-ric/colorspace": ["@so-ric/colorspace@1.1.6", "", { "dependencies": { "color": "^5.0.2", "text-hex": "1.0.x" } }, "sha512-/KiKkpHNOBgkFJwu9sh48LkHSMYGyuTcSFK/qMBdnOAlrRJzRSXAOFB5qwzaVQuDl8wAvHVMkaASQDReTahxuw=="], + "@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="], + "@supabase/auth-js": ["@supabase/auth-js@2.90.1", "", { "dependencies": { "tslib": "2.8.1" } }, "sha512-vxb66dgo6h3yyPbR06735Ps+dK3hj0JwS8w9fdQPVZQmocSTlKUW5MfxSy99mN0XqCCuLMQ3jCEiIIUU23e9ng=="], "@supabase/functions-js": ["@supabase/functions-js@2.90.1", "", { "dependencies": { "tslib": "2.8.1" } }, "sha512-x9mV9dF1Lam9qL3zlpP6mSM5C9iqMPtF5B/tU1Jj/F0ufX5mjDf9ghVBaErVxmrQJRL4+iMKWKY2GnODkpS8tw=="], @@ -669,6 +674,8 @@ "@supabase/supabase-js": ["@supabase/supabase-js@2.90.1", "", { "dependencies": { "@supabase/auth-js": "2.90.1", "@supabase/functions-js": "2.90.1", "@supabase/postgrest-js": "2.90.1", "@supabase/realtime-js": "2.90.1", "@supabase/storage-js": "2.90.1" } }, "sha512-U8KaKGLUgTIFHtwEW1dgw1gK7XrdpvvYo7nzzqPx721GqPe8WZbAiLh/hmyKLGBYQ/mmQNr20vU9tWSDZpii3w=="], + "@supermemory/tools": ["@supermemory/tools@1.4.0", "", { "dependencies": { "@ai-sdk/anthropic": "^2.0.25", "@ai-sdk/openai": "^2.0.23", "ai": "^5.0.29", "openai": "^4.104.0", "supermemory": "^3.0.0-alpha.26", "zod": "^4.1.5" }, "peerDependencies": { "@ai-sdk/provider": "^2.0.0 || ^3.0.0" } }, "sha512-5TYK30Vc7MMXKVjQioSQPtNSyaP3SccRbVjUO3rRxu0Phd/GETWDIOF7EqZsXpxNCunZnVsT/PBjD+qKwDDbdg=="], + "@swc/counter": ["@swc/counter@0.1.3", "", {}, "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ=="], "@swc/helpers": ["@swc/helpers@0.5.15", "", { "dependencies": { "tslib": "^2.8.0" } }, "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g=="], @@ -1049,6 +1056,8 @@ "@vercel/analytics": ["@vercel/analytics@1.6.1", "", { "peerDependencies": { "@remix-run/react": "^2", "@sveltejs/kit": "^1 || ^2", "next": ">= 13", "react": "^18 || ^19 || ^19.0.0-rc", "svelte": ">= 4", "vue": "^3", "vue-router": "^4" }, "optionalPeers": ["@remix-run/react", "@sveltejs/kit", "next", "react", "svelte", "vue", "vue-router"] }, "sha512-oH9He/bEM+6oKlv3chWuOOcp8Y6fo6/PSro8hEkgCW3pu9/OiCXiUpRUogDh3Fs3LH2sosDrx8CxeOLBEE+afg=="], + "@vercel/oidc": ["@vercel/oidc@3.1.0", "", {}, "sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w=="], + "@vercel/speed-insights": ["@vercel/speed-insights@1.3.1", "", { "peerDependencies": { "@sveltejs/kit": "^1 || ^2", "next": ">= 13", "react": "^18 || ^19 || ^19.0.0-rc", "svelte": ">= 4", "vue": "^3", "vue-router": "^4" }, "optionalPeers": ["@sveltejs/kit", "next", "react", "svelte", "vue", "vue-router"] }, "sha512-PbEr7FrMkUrGYvlcLHGkXdCkxnylCWePx7lPxxq36DNdfo9mcUjLOmqOyPDHAOgnfqgGGdmE3XI9L/4+5fr+vQ=="], "@vis.gl/react-google-maps": ["@vis.gl/react-google-maps@1.7.1", "", { "dependencies": { "@types/google.maps": "^3.54.10", "fast-deep-equal": "^3.1.3" }, "peerDependencies": { "react": ">=16.8.0 || ^19.0 || ^19.0.0-rc", "react-dom": ">=16.8.0 || ^19.0 || ^19.0.0-rc" } }, "sha512-F/GJzJyri7Jqf+bkLNxoi2RcH2hCIo1I3//PyiILqQzdzglMoqZVO1DLXlHPifNdebk1/zib6dMJA3i73nwmuQ=="], @@ -2329,6 +2338,8 @@ "supercluster": ["supercluster@8.0.1", "", { "dependencies": { "kdbush": "^4.0.2" } }, "sha512-IiOea5kJ9iqzD2t7QJq/cREyLHTtSmUT6gQsweojg9WH2sYJqZK9SswTu6jrscO6D1G5v5vYZ9ru/eq85lXeZQ=="], + "supermemory": ["supermemory@3.14.0", "", {}, "sha512-gy1C6B4wUHEIOjmvDqW6GRttEdr0TZFFZ2YVU5eTCXELPQ0zjxgwudmg2kLPI6dEIITUxw1Q6n1c+vm4ro0KSg=="], + "supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], "supports-hyperlinks": ["supports-hyperlinks@3.2.0", "", { "dependencies": { "has-flag": "^4.0.0", "supports-color": "^7.0.0" } }, "sha512-zFObLMyZeEwzAoKCyu1B91U79K2t7ApXuQfo8OuxwXLDgcKxuwM+YvcbIhm6QWqz7mHUH1TVytR1PwVVjEuMig=="], @@ -2549,6 +2560,10 @@ "@ai-sdk/anthropic/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@2.2.8", "", { "dependencies": { "@ai-sdk/provider": "1.1.3", "nanoid": "^3.3.8", "secure-json-parse": "^2.7.0" }, "peerDependencies": { "zod": "^3.23.8" } }, "sha512-fqhG+4sCVv8x7nFzYnFo19ryhAa3w096Kmc3hWxMQfW/TubPOmt3A6tYZhl4mUfQWWQMsuSkLrtjlWuXBVSGQA=="], + "@ai-sdk/gateway/@ai-sdk/provider": ["@ai-sdk/provider@2.0.1", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-KCUwswvsC5VsW2PWFqF8eJgSCu5Ysj7m1TxiHTVA6g7k360bk0RNQENT8KTMAYEs+8fWPD3Uu4dEmzGHc+jGng=="], + + "@ai-sdk/gateway/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.21", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-veuMwTLxsgh31Jjn0SnBABnM1f7ebHhRWcV2ZuY3hP3iJDCZ8VXBaYqcHXoOQDqUXTCas08sKQcHyWK+zl882Q=="], + "@ai-sdk/google/@ai-sdk/provider": ["@ai-sdk/provider@1.1.3", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-qZMxYJ0qqX/RfnuIaab+zp8UAeJn/ygXXAffR5I4N0n1IrvA6qBsjc8hXLmBiMV2zoXlifkacF7sEFnYnjBcqg=="], "@ai-sdk/google/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@2.2.8", "", { "dependencies": { "@ai-sdk/provider": "1.1.3", "nanoid": "^3.3.8", "secure-json-parse": "^2.7.0" }, "peerDependencies": { "zod": "^3.23.8" } }, "sha512-fqhG+4sCVv8x7nFzYnFo19ryhAa3w096Kmc3hWxMQfW/TubPOmt3A6tYZhl4mUfQWWQMsuSkLrtjlWuXBVSGQA=="], @@ -2627,6 +2642,18 @@ "@supabase/ssr/cookie": ["cookie@0.5.0", "", {}, "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw=="], + "@supermemory/tools/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.65", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HqTPP59mLQ9U6jXQcx6EORkdc5FyZu34Sitkg6jNpyMYcRjStvfx4+NWq/qaR+OTwBFcccv8hvVii0CYkH2Lag=="], + + "@supermemory/tools/@ai-sdk/openai": ["@ai-sdk/openai@2.0.91", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-lozfRHfSTHg5/UliQjTDcOtISYGbEpt4FS/6QM5PcLmhdT0HmROllaBmG7+JaK+uqFtDXZGgMIpz3bqB9nzqCQ=="], + + "@supermemory/tools/@ai-sdk/provider": ["@ai-sdk/provider@2.0.1", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-KCUwswvsC5VsW2PWFqF8eJgSCu5Ysj7m1TxiHTVA6g7k360bk0RNQENT8KTMAYEs+8fWPD3Uu4dEmzGHc+jGng=="], + + "@supermemory/tools/ai": ["ai@5.0.135", "", { "dependencies": { "@ai-sdk/gateway": "2.0.41", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-FH5xvP/V2YBmbZy/2Z+rDZ2+Ao/L2AH4H7OenJC4liXnMFoPjslRWP3BHdIyjcPnO3wTgwPlv+Nml4ikkf5Nbw=="], + + "@supermemory/tools/openai": ["openai@4.104.0", "", { "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", "node-fetch": "^2.6.7" }, "peerDependencies": { "ws": "^8.18.0", "zod": "^3.23.8" }, "optionalPeers": ["ws", "zod"], "bin": { "openai": "bin/cli" } }, "sha512-p99EFNsA/yX6UhVO93f5kJsDRLAg+CTA2RBqdHK4RtK8u5IJw32Hyb2dTGKbnnFmnuoBv5r7Z2CURI9sGZpSuA=="], + + "@supermemory/tools/zod": ["zod@4.3.6", "", {}, "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg=="], + "@tailwindcss/typography/postcss-selector-parser": ["postcss-selector-parser@6.0.10", "", { "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" } }, "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w=="], "@turf/tesselate/earcut": ["earcut@2.2.4", "", {}, "sha512-/pjZsA1b4RPHbeWZQn66SWS8nZZWLQQ23oE3Eam7aroEFGEvwKAsJfZ9ytiEMycfzXWpca4FA9QIOehf7PocBQ=="], @@ -2839,6 +2866,16 @@ "@modelcontextprotocol/sdk/ajv/json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], + "@supermemory/tools/@ai-sdk/anthropic/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.21", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-veuMwTLxsgh31Jjn0SnBABnM1f7ebHhRWcV2ZuY3hP3iJDCZ8VXBaYqcHXoOQDqUXTCas08sKQcHyWK+zl882Q=="], + + "@supermemory/tools/@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.21", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-veuMwTLxsgh31Jjn0SnBABnM1f7ebHhRWcV2ZuY3hP3iJDCZ8VXBaYqcHXoOQDqUXTCas08sKQcHyWK+zl882Q=="], + + "@supermemory/tools/ai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.21", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-veuMwTLxsgh31Jjn0SnBABnM1f7ebHhRWcV2ZuY3hP3iJDCZ8VXBaYqcHXoOQDqUXTCas08sKQcHyWK+zl882Q=="], + + "@supermemory/tools/openai/@types/node": ["@types/node@18.19.130", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg=="], + + "@supermemory/tools/openai/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], + "@types/request/form-data/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], "@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], @@ -2883,6 +2920,8 @@ "@aws-crypto/util/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="], + "@supermemory/tools/openai/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="], + "@types/request/form-data/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], "open-codex/openai/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="], diff --git a/lib/agents/inquire.tsx b/lib/agents/inquire.tsx index e15926b7..d6003d10 100644 --- a/lib/agents/inquire.tsx +++ b/lib/agents/inquire.tsx @@ -11,7 +11,9 @@ interface InquiryProp { export async function inquire( uiStream: ReturnType, - messages: CoreMessage[] + messages: CoreMessage[], + userId?: string, + chatId?: string ) { const objectStream = createStreamableValue(); let currentInquiry: PartialInquiry = {}; @@ -23,8 +25,11 @@ export async function inquire( let finalInquiry: PartialInquiry = {}; const result = await streamObject({ - model: (await getModel()) as LanguageModel, - system: `...`, // Your system prompt remains unchanged + model: (await getModel(false, userId, chatId)) as LanguageModel, + system: `As a planet computer, your goal is to help the user narrow down their query for more efficient research. + Ask a clear and concise question to clarify the user's intent or to get missing information. + For geospatial queries, focus on location, time, or specific travel needs. + Provide a few suggested responses to guide the user.`, messages, schema: inquirySchema, }); @@ -51,4 +56,4 @@ export async function inquire( ); return finalInquiry; -} \ No newline at end of file +} diff --git a/lib/agents/query-suggestor.tsx b/lib/agents/query-suggestor.tsx index de2b3749..a18c74cf 100644 --- a/lib/agents/query-suggestor.tsx +++ b/lib/agents/query-suggestor.tsx @@ -7,7 +7,9 @@ import { getModel } from '../utils' export async function querySuggestor( uiStream: ReturnType, - messages: CoreMessage[] + messages: CoreMessage[], + userId?: string, + chatId?: string ) { const objectStream = createStreamableValue() uiStream.append( @@ -18,7 +20,7 @@ export async function querySuggestor( let finalRelatedQueries: PartialRelated = {} const result = await streamObject({ - model: (await getModel()) as LanguageModel, + model: (await getModel(false, userId, chatId)) as LanguageModel, system: `As a professional web researcher, your task is to generate a set of three queries that explore the subject matter more deeply, building upon the initial query and the information uncovered in its search results. For instance, if the original query was "Starship's third test flight key milestones", your output should follow this format: diff --git a/lib/agents/researcher.tsx b/lib/agents/researcher.tsx index ce801af4..6ba6bdc3 100644 --- a/lib/agents/researcher.tsx +++ b/lib/agents/researcher.tsx @@ -32,6 +32,7 @@ Use these user-drawn areas/lines as primary areas of interest for your analysis 3. **Search Specificity:** When using the 'search' tool, formulate queries that are as specific as possible. 4. **Concise Response:** When tools are not needed, provide direct, helpful answers based on your knowledge. Match the user's language. 5. **Citations:** Always cite source URLs when using information from tools. +6. **Long-term Memory:** You have access to the user's long-term memory. Use 'searchMemories' to retrieve past preferences, business intricacies, or context from previous yearly usage. Use 'addMemory' to save new preferences or important business details that should be remembered across sessions to improve personalized service incrementally. ### **Tool Usage Guidelines (Mandatory)** @@ -86,7 +87,9 @@ export async function researcher( messages: CoreMessage[], mapProvider: MapProvider, useSpecificModel?: boolean, - drawnFeatures?: DrawnFeature[] + drawnFeatures?: DrawnFeature[], + userId?: string, + chatId?: string ) { let fullResponse = '' let hasError = false @@ -111,11 +114,11 @@ export async function researcher( ) const result = await nonexperimental_streamText({ - model: (await getModel(hasImage)) as LanguageModel, + model: (await getModel(hasImage, userId, chatId)) as LanguageModel, maxTokens: 4096, system: systemPromptToUse, messages, - tools: getTools({ uiStream, fullResponse, mapProvider }), + tools: getTools({ uiStream, fullResponse, mapProvider, userId }), }) uiStream.update(null) // remove spinner diff --git a/lib/agents/resolution-search.tsx b/lib/agents/resolution-search.tsx index 1bcc3290..24bd055e 100644 --- a/lib/agents/resolution-search.tsx +++ b/lib/agents/resolution-search.tsx @@ -38,7 +38,14 @@ export interface DrawnFeature { geometry: any; } -export async function resolutionSearch(messages: CoreMessage[], timezone: string = 'UTC', drawnFeatures?: DrawnFeature[], location?: { lat: number, lng: number }) { +export async function resolutionSearch( + messages: CoreMessage[], + timezone: string = 'UTC', + drawnFeatures?: DrawnFeature[], + location?: { lat: number, lng: number }, + userId?: string, + chatId?: string +) { const localTime = new Date().toLocaleString('en-US', { timeZone: timezone, hour: '2-digit', @@ -83,7 +90,7 @@ Analyze the user's prompt and the image to provide a holistic understanding of t // Use streamObject to get partial results. return streamObject({ - model: await getModel(hasImage), + model: await getModel(hasImage, userId, chatId), system: systemPrompt, messages: filteredMessages, schema: resolutionSearchSchema, diff --git a/lib/agents/task-manager.tsx b/lib/agents/task-manager.tsx index 90a72b67..8994151e 100644 --- a/lib/agents/task-manager.tsx +++ b/lib/agents/task-manager.tsx @@ -3,7 +3,7 @@ import { nextActionSchema } from '../schema/next-action' import { getModel } from '../utils' // Decide whether inquiry is required for the user input -export async function taskManager(messages: CoreMessage[]) { +export async function taskManager(messages: CoreMessage[], userId?: string, chatId?: string) { try { // Check if the latest user message contains an image const lastUserMessage = messages.slice().reverse().find(m => m.role === 'user'); @@ -16,7 +16,7 @@ export async function taskManager(messages: CoreMessage[]) { } const result = await generateObject({ - model: (await getModel()) as LanguageModel, + model: (await getModel(false, userId, chatId)) as LanguageModel, system: `As a planet computer, your primary objective is to act as an efficient **Task Manager** for the user's query. Your goal is to minimize unnecessary steps and maximize the efficiency of the subsequent exploration phase (researcher agent). You must first analyze the user's input and determine the optimal course of action. You have two options at your disposal: diff --git a/lib/agents/tools/index.tsx b/lib/agents/tools/index.tsx index 4c22b887..72dde90d 100644 --- a/lib/agents/tools/index.tsx +++ b/lib/agents/tools/index.tsx @@ -2,7 +2,8 @@ import { createStreamableUI } from 'ai/rsc' import { retrieveTool } from './retrieve' import { searchTool } from './search' import { videoSearchTool } from './video-search' -import { geospatialTool } from './geospatial' // Removed useGeospatialToolMcp import +import { geospatialTool } from './geospatial' +import { supermemoryTools } from '@supermemory/tools/ai-sdk' import { MapProvider } from '@/lib/store/settings' @@ -10,9 +11,10 @@ export interface ToolProps { uiStream: ReturnType fullResponse: string mapProvider?: MapProvider + userId?: string } -export const getTools = ({ uiStream, fullResponse, mapProvider }: ToolProps) => { +export const getTools = ({ uiStream, fullResponse, mapProvider, userId }: ToolProps) => { const tools: any = { search: searchTool({ uiStream, @@ -35,5 +37,13 @@ export const getTools = ({ uiStream, fullResponse, mapProvider }: ToolProps) => }) } + if (process.env.SUPERMEMORY_API_KEY && userId) { + const memoryTools = supermemoryTools(process.env.SUPERMEMORY_API_KEY, { + projectId: userId + }) + tools.searchMemories = memoryTools.searchMemories + tools.addMemory = memoryTools.addMemory + } + return tools -} \ No newline at end of file +} diff --git a/lib/agents/writer.tsx b/lib/agents/writer.tsx index f4e4d0ac..e194ee13 100644 --- a/lib/agents/writer.tsx +++ b/lib/agents/writer.tsx @@ -5,10 +5,12 @@ import { BotMessage } from '@/components/message' import { getModel } from '../utils' export async function writer( - dynamicSystemPrompt: string, // New parameter + dynamicSystemPrompt: string, uiStream: ReturnType, streamText: ReturnType>, - messages: CoreMessage[] + messages: CoreMessage[], + userId?: string, + chatId?: string ) { let fullResponse = '' const answerSection = ( @@ -18,7 +20,7 @@ export async function writer( ) uiStream.append(answerSection) - // Default system prompt, used if dynamicSystemPrompt is not provided + // Default system prompt const default_system_prompt = `As a professional writer, your job is to generate a comprehensive and informative, yet concise answer of 400 words or less for the given question based solely on the provided search results (URL and content). You must only use information from the provided search results. Use an unbiased and journalistic tone. Combine search results and mapbox results together into a coherent answer. Do not repeat text. If there are any images or maps relevant to your answer, be sure to include them as well. Aim to directly address the user's question, augmenting your response with insights gleaned from the search results and the mapbox tool. Whenever quoting or referencing information from a specific URL, always cite the source URL explicitly. Please match the language of the response to the user's language. Always answer in Markdown format. Links and images must follow the correct format. @@ -32,9 +34,9 @@ export async function writer( const systemToUse = dynamicSystemPrompt && dynamicSystemPrompt.trim() !== '' ? dynamicSystemPrompt : default_system_prompt; const result = await nonexperimental_streamText({ - model: (await getModel()) as LanguageModel, + model: (await getModel(false, userId, chatId)) as LanguageModel, maxTokens: 2500, - system: systemToUse, // Use the dynamic or default system prompt + system: systemToUse, messages }) diff --git a/lib/utils/index.ts b/lib/utils/index.ts index 5303da34..60e4d0a2 100644 --- a/lib/utils/index.ts +++ b/lib/utils/index.ts @@ -6,6 +6,8 @@ import { createGoogleGenerativeAI } from '@ai-sdk/google' import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock' import { createXai } from '@ai-sdk/xai'; import { v4 as uuidv4 } from 'uuid'; +import { getCurrentUserIdOnServer } from '@/lib/auth/get-current-user'; +import { withSupermemory } from '@supermemory/tools/ai-sdk'; export function cn(...inputs: ClassValue[]) { return twMerge(clsx(inputs)) @@ -21,105 +23,121 @@ export function generateUUID(): string { */ export { generateUUID as nanoid }; -export async function getModel(requireVision: boolean = false) { - const selectedModel = await getSelectedModel(); +export async function getModel(requireVision: boolean = false, userId?: string, chatId?: string) { + const actualUserId = userId || await getCurrentUserIdOnServer(); - const xaiApiKey = process.env.XAI_API_KEY; - const gemini3ProApiKey = process.env.GEMINI_3_PRO_API_KEY; - const awsAccessKeyId = process.env.AWS_ACCESS_KEY_ID; - const awsSecretAccessKey = process.env.AWS_SECRET_ACCESS_KEY; - const awsRegion = process.env.AWS_REGION; - const bedrockModelId = process.env.BEDROCK_MODEL_ID || 'anthropic.claude-3-5-sonnet-20241022-v2:0'; - const openaiApiKey = process.env.OPENAI_API_KEY; + async function getBaseModel() { + const selectedModel = await getSelectedModel(); - if (selectedModel) { - switch (selectedModel) { - case 'Grok 4.2': - if (xaiApiKey) { - const xai = createXai({ - apiKey: xaiApiKey, - baseURL: 'https://api.x.ai/v1', - }); - try { - return xai('grok-4-fast-non-reasoning'); - } catch (error) { - console.error('Selected model "Grok 4.2" is configured but failed to initialize.', error); - throw new Error('Failed to initialize selected model.'); + const xaiApiKey = process.env.XAI_API_KEY; + const gemini3ProApiKey = process.env.GEMINI_3_PRO_API_KEY; + const awsAccessKeyId = process.env.AWS_ACCESS_KEY_ID; + const awsSecretAccessKey = process.env.AWS_SECRET_ACCESS_KEY; + const awsRegion = process.env.AWS_REGION; + const bedrockModelId = process.env.BEDROCK_MODEL_ID || 'anthropic.claude-3-5-sonnet-20241022-v2:0'; + const openaiApiKey = process.env.OPENAI_API_KEY; + + if (selectedModel) { + switch (selectedModel) { + case 'Grok 4.2': + if (xaiApiKey) { + const xai = createXai({ + apiKey: xaiApiKey, + baseURL: 'https://api.x.ai/v1', + }); + try { + return xai('grok-4-fast-non-reasoning'); + } catch (error) { + console.error('Selected model "Grok 4.2" is configured but failed to initialize.', error); + throw new Error('Failed to initialize selected model.'); + } + } else { + console.error('User selected "Grok 4.2" but XAI_API_KEY is not set.'); + throw new Error('Selected model is not configured.'); + } + case 'Gemini 3': + if (gemini3ProApiKey) { + const google = createGoogleGenerativeAI({ + apiKey: gemini3ProApiKey, + }); + try { + return google('gemini-3-pro-preview'); + } catch (error) { + console.error('Selected model "Gemini 3" is configured but failed to initialize.', error); + throw new Error('Failed to initialize selected model.'); + } + } else { + console.error('User selected "Gemini 3" but GEMINI_3_PRO_API_KEY is not set.'); + throw new Error('Selected model is not configured.'); } - } else { - console.error('User selected "Grok 4.2" but XAI_API_KEY is not set.'); - throw new Error('Selected model is not configured.'); - } - case 'Gemini 3': - if (gemini3ProApiKey) { - const google = createGoogleGenerativeAI({ - apiKey: gemini3ProApiKey, - }); - try { - return google('gemini-3-pro-preview'); - } catch (error) { - console.error('Selected model "Gemini 3" is configured but failed to initialize.', error); - throw new Error('Failed to initialize selected model.'); + case 'GPT-5.1': + if (openaiApiKey) { + const openai = createOpenAI({ + apiKey: openaiApiKey, + }); + return openai('gpt-4o'); + } else { + console.error('User selected "GPT-5.1" but OPENAI_API_KEY is not set.'); + throw new Error('Selected model is not configured.'); } - } else { - console.error('User selected "Gemini 3" but GEMINI_3_PRO_API_KEY is not set.'); - throw new Error('Selected model is not configured.'); - } - case 'GPT-5.1': - if (openaiApiKey) { - const openai = createOpenAI({ - apiKey: openaiApiKey, - }); - return openai('gpt-4o'); - } else { - console.error('User selected "GPT-5.1" but OPENAI_API_KEY is not set.'); - throw new Error('Selected model is not configured.'); - } + } } - } - // Default behavior: Grok -> Gemini -> Bedrock -> OpenAI - if (xaiApiKey) { - const xai = createXai({ - apiKey: xaiApiKey, - baseURL: 'https://api.x.ai/v1', - }); - try { - return xai('grok-4-fast-non-reasoning'); - } catch (error) { - console.warn('xAI API unavailable, falling back to next provider:'); + // Default behavior: Grok -> Gemini -> Bedrock -> OpenAI + if (xaiApiKey) { + const xai = createXai({ + apiKey: xaiApiKey, + baseURL: 'https://api.x.ai/v1', + }); + try { + return xai('grok-4-fast-non-reasoning'); + } catch (error) { + console.warn('xAI API unavailable, falling back to next provider:'); + } } - } - if (gemini3ProApiKey) { - const google = createGoogleGenerativeAI({ - apiKey: gemini3ProApiKey, - }); - try { - return google('gemini-3-pro-preview'); - } catch (error) { - console.warn('Gemini 3 Pro API unavailable, falling back to next provider:', error); + if (gemini3ProApiKey) { + const google = createGoogleGenerativeAI({ + apiKey: gemini3ProApiKey, + }); + try { + return google('gemini-3-pro-preview'); + } catch (error) { + console.warn('Gemini 3 Pro API unavailable, falling back to next provider:', error); + } } - } - if (awsAccessKeyId && awsSecretAccessKey) { - const bedrock = createAmazonBedrock({ - bedrockOptions: { - region: awsRegion, - credentials: { - accessKeyId: awsAccessKeyId, - secretAccessKey: awsSecretAccessKey, + if (awsAccessKeyId && awsSecretAccessKey) { + const bedrock = createAmazonBedrock({ + bedrockOptions: { + region: awsRegion, + credentials: { + accessKeyId: awsAccessKeyId, + secretAccessKey: awsSecretAccessKey, + }, }, - }, + }); + const model = bedrock(bedrockModelId, { + additionalModelRequestFields: { top_k: 350 }, + }); + return model; + } + + const openai = createOpenAI({ + apiKey: openaiApiKey, }); - const model = bedrock(bedrockModelId, { - additionalModelRequestFields: { top_k: 350 }, + return openai('gpt-4o'); + } + + const model = await getBaseModel(); + + if (process.env.SUPERMEMORY_API_KEY && actualUserId) { + return withSupermemory(model as any, actualUserId, { + conversationId: chatId, + mode: 'full', + addMemory: 'always' }); - return model; } - const openai = createOpenAI({ - apiKey: openaiApiKey, - }); - return openai('gpt-4o'); + return model; } diff --git a/package.json b/package.json index 87066925..cfa7c96c 100644 --- a/package.json +++ b/package.json @@ -45,6 +45,7 @@ "@radix-ui/react-tooltip": "^1.2.3", "@supabase/ssr": "^0.3.0", "@supabase/supabase-js": "^2.0.0", + "@supermemory/tools": "^1.4.0", "@tailwindcss/typography": "^0.5.16", "@tavily/core": "^0.6.4", "@turf/turf": "^7.2.0", From b358433e0930244437f23aec400dfb42b086673e Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Wed, 18 Feb 2026 08:23:08 +0000 Subject: [PATCH 2/3] feat: integrate Supermemory and fix client-side build error - Integrated Supermemory AI SDK for persistent agent memory. - Refactored `lib/utils/index.ts` to separate server-only AI logic into `lib/utils/ai.ts`. - This fixes a build error where client components (like `chat.tsx`) were indirectly importing `next/headers`. - Updated all agents and actions to use the new server-only utility for model retrieval. - Propagated `userId` and `chatId` to all agents for user-scoped memory. Co-authored-by: ngoiyaeric <115367894+ngoiyaeric@users.noreply.github.com> --- lib/actions/suggest.ts | 2 +- lib/agents/inquire.tsx | 2 +- lib/agents/query-suggestor.tsx | 2 +- lib/agents/researcher.tsx | 2 +- lib/agents/resolution-search.tsx | 2 +- lib/agents/task-manager.tsx | 2 +- lib/agents/writer.tsx | 2 +- lib/utils/ai.ts | 127 ++++++++++++++++++++++++++++++ lib/utils/index.ts | 128 +------------------------------ mapbox_mcp/hooks.ts | 2 +- 10 files changed, 136 insertions(+), 135 deletions(-) create mode 100644 lib/utils/ai.ts diff --git a/lib/actions/suggest.ts b/lib/actions/suggest.ts index 8555461c..c983f13a 100644 --- a/lib/actions/suggest.ts +++ b/lib/actions/suggest.ts @@ -3,7 +3,7 @@ import { createStreamableUI, createStreamableValue } from 'ai/rsc' import { CoreMessage, LanguageModel, streamObject } from 'ai' import { PartialRelated, relatedSchema } from '@/lib/schema/related' -import { getModel } from '../utils' +import { getModel } from '../utils/ai' import { MapData } from '@/components/map/map-data-context' export async function getSuggestions( diff --git a/lib/agents/inquire.tsx b/lib/agents/inquire.tsx index d6003d10..d725c5cd 100644 --- a/lib/agents/inquire.tsx +++ b/lib/agents/inquire.tsx @@ -2,7 +2,7 @@ import { Copilot } from '@/components/copilot'; import { createStreamableUI, createStreamableValue } from 'ai/rsc'; import { CoreMessage, LanguageModel, streamObject } from 'ai'; import { PartialInquiry, inquirySchema } from '@/lib/schema/inquiry'; -import { getModel } from '../utils'; +import { getModel } from '../utils/ai'; // Define a plain object type for the inquiry prop interface InquiryProp { diff --git a/lib/agents/query-suggestor.tsx b/lib/agents/query-suggestor.tsx index a18c74cf..09e6fa12 100644 --- a/lib/agents/query-suggestor.tsx +++ b/lib/agents/query-suggestor.tsx @@ -3,7 +3,7 @@ import { CoreMessage, LanguageModel, streamObject } from 'ai' import { PartialRelated, relatedSchema } from '@/lib/schema/related' import { Section } from '@/components/section' import SearchRelated from '@/components/search-related' -import { getModel } from '../utils' +import { getModel } from '../utils/ai' export async function querySuggestor( uiStream: ReturnType, diff --git a/lib/agents/researcher.tsx b/lib/agents/researcher.tsx index 6ba6bdc3..c2c70190 100644 --- a/lib/agents/researcher.tsx +++ b/lib/agents/researcher.tsx @@ -10,7 +10,7 @@ import { import { Section } from '@/components/section' import { BotMessage } from '@/components/message' import { getTools } from './tools' -import { getModel } from '../utils' +import { getModel } from '../utils/ai' import { MapProvider } from '@/lib/store/settings' import { DrawnFeature } from './resolution-search' diff --git a/lib/agents/resolution-search.tsx b/lib/agents/resolution-search.tsx index 24bd055e..61260628 100644 --- a/lib/agents/resolution-search.tsx +++ b/lib/agents/resolution-search.tsx @@ -1,5 +1,5 @@ import { CoreMessage, streamObject } from 'ai' -import { getModel } from '@/lib/utils' +import { getModel } from '@/lib/utils/ai' import { z } from 'zod' // This agent is now a pure data-processing module, with no UI dependencies. diff --git a/lib/agents/task-manager.tsx b/lib/agents/task-manager.tsx index 8994151e..b2a6749b 100644 --- a/lib/agents/task-manager.tsx +++ b/lib/agents/task-manager.tsx @@ -1,6 +1,6 @@ import { CoreMessage, generateObject, LanguageModel } from 'ai' import { nextActionSchema } from '../schema/next-action' -import { getModel } from '../utils' +import { getModel } from '../utils/ai' // Decide whether inquiry is required for the user input export async function taskManager(messages: CoreMessage[], userId?: string, chatId?: string) { diff --git a/lib/agents/writer.tsx b/lib/agents/writer.tsx index e194ee13..1b31581a 100644 --- a/lib/agents/writer.tsx +++ b/lib/agents/writer.tsx @@ -2,7 +2,7 @@ import { createStreamableUI, createStreamableValue } from 'ai/rsc' import { CoreMessage, LanguageModel, streamText as nonexperimental_streamText } from 'ai' import { Section } from '@/components/section' import { BotMessage } from '@/components/message' -import { getModel } from '../utils' +import { getModel } from '../utils/ai' export async function writer( dynamicSystemPrompt: string, diff --git a/lib/utils/ai.ts b/lib/utils/ai.ts new file mode 100644 index 00000000..1e98c218 --- /dev/null +++ b/lib/utils/ai.ts @@ -0,0 +1,127 @@ +import { getSelectedModel } from '@/lib/actions/users' +import { createOpenAI } from '@ai-sdk/openai' +import { createGoogleGenerativeAI } from '@ai-sdk/google' +import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock' +import { createXai } from '@ai-sdk/xai' +import { getCurrentUserIdOnServer } from '@/lib/auth/get-current-user' +import { withSupermemory } from '@supermemory/tools/ai-sdk' +import { LanguageModel } from 'ai' + +export async function getModel(requireVision: boolean = false, userId?: string, chatId?: string) { + const actualUserId = userId || await getCurrentUserIdOnServer(); + + async function getBaseModel() { + const selectedModel = await getSelectedModel(); + + const xaiApiKey = process.env.XAI_API_KEY; + const gemini3ProApiKey = process.env.GEMINI_3_PRO_API_KEY; + const awsAccessKeyId = process.env.AWS_ACCESS_KEY_ID; + const awsSecretAccessKey = process.env.AWS_SECRET_ACCESS_KEY; + const awsRegion = process.env.AWS_REGION; + const bedrockModelId = process.env.BEDROCK_MODEL_ID || 'anthropic.claude-3-5-sonnet-20241022-v2:0'; + const openaiApiKey = process.env.OPENAI_API_KEY; + + if (selectedModel) { + switch (selectedModel) { + case 'Grok 4.2': + if (xaiApiKey) { + const xai = createXai({ + apiKey: xaiApiKey, + baseURL: 'https://api.x.ai/v1', + }); + try { + return xai('grok-4-fast-non-reasoning'); + } catch (error) { + console.error('Selected model "Grok 4.2" is configured but failed to initialize.', error); + throw new Error('Failed to initialize selected model.'); + } + } else { + console.error('User selected "Grok 4.2" but XAI_API_KEY is not set.'); + throw new Error('Selected model is not configured.'); + } + case 'Gemini 3': + if (gemini3ProApiKey) { + const google = createGoogleGenerativeAI({ + apiKey: gemini3ProApiKey, + }); + try { + return google('gemini-3-pro-preview'); + } catch (error) { + console.error('Selected model "Gemini 3" is configured but failed to initialize.', error); + throw new Error('Failed to initialize selected model.'); + } + } else { + console.error('User selected "Gemini 3" but GEMINI_3_PRO_API_KEY is not set.'); + throw new Error('Selected model is not configured.'); + } + case 'GPT-5.1': + if (openaiApiKey) { + const openai = createOpenAI({ + apiKey: openaiApiKey, + }); + return openai('gpt-4o'); + } else { + console.error('User selected "GPT-5.1" but OPENAI_API_KEY is not set.'); + throw new Error('Selected model is not configured.'); + } + } + } + + // Default behavior: Grok -> Gemini -> Bedrock -> OpenAI + if (xaiApiKey) { + const xai = createXai({ + apiKey: xaiApiKey, + baseURL: 'https://api.x.ai/v1', + }); + try { + return xai('grok-4-fast-non-reasoning'); + } catch (error) { + console.warn('xAI API unavailable, falling back to next provider:'); + } + } + + if (gemini3ProApiKey) { + const google = createGoogleGenerativeAI({ + apiKey: gemini3ProApiKey, + }); + try { + return google('gemini-3-pro-preview'); + } catch (error) { + console.warn('Gemini 3 Pro API unavailable, falling back to next provider:', error); + } + } + + if (awsAccessKeyId && awsSecretAccessKey) { + const bedrock = createAmazonBedrock({ + bedrockOptions: { + region: awsRegion, + credentials: { + accessKeyId: awsAccessKeyId, + secretAccessKey: awsSecretAccessKey, + }, + }, + }); + const model = bedrock(bedrockModelId, { + additionalModelRequestFields: { top_k: 350 }, + }); + return model; + } + + const openai = createOpenAI({ + apiKey: openaiApiKey, + }); + return openai('gpt-4o'); + } + + const model = await getBaseModel(); + + if (process.env.SUPERMEMORY_API_KEY && actualUserId) { + return withSupermemory(model as any, actualUserId, { + conversationId: chatId, + mode: 'full', + addMemory: 'always' + }); + } + + return model; +} diff --git a/lib/utils/index.ts b/lib/utils/index.ts index 60e4d0a2..c36ea4d5 100644 --- a/lib/utils/index.ts +++ b/lib/utils/index.ts @@ -1,13 +1,6 @@ import { type ClassValue, clsx } from 'clsx' import { twMerge } from 'tailwind-merge' -import { getSelectedModel } from '@/lib/actions/users' -import { createOpenAI } from '@ai-sdk/openai' -import { createGoogleGenerativeAI } from '@ai-sdk/google' -import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock' -import { createXai } from '@ai-sdk/xai'; -import { v4 as uuidv4 } from 'uuid'; -import { getCurrentUserIdOnServer } from '@/lib/auth/get-current-user'; -import { withSupermemory } from '@supermemory/tools/ai-sdk'; +import { v4 as uuidv4 } from 'uuid' export function cn(...inputs: ClassValue[]) { return twMerge(clsx(inputs)) @@ -22,122 +15,3 @@ export function generateUUID(): string { * Returns a UUID v4 string. */ export { generateUUID as nanoid }; - -export async function getModel(requireVision: boolean = false, userId?: string, chatId?: string) { - const actualUserId = userId || await getCurrentUserIdOnServer(); - - async function getBaseModel() { - const selectedModel = await getSelectedModel(); - - const xaiApiKey = process.env.XAI_API_KEY; - const gemini3ProApiKey = process.env.GEMINI_3_PRO_API_KEY; - const awsAccessKeyId = process.env.AWS_ACCESS_KEY_ID; - const awsSecretAccessKey = process.env.AWS_SECRET_ACCESS_KEY; - const awsRegion = process.env.AWS_REGION; - const bedrockModelId = process.env.BEDROCK_MODEL_ID || 'anthropic.claude-3-5-sonnet-20241022-v2:0'; - const openaiApiKey = process.env.OPENAI_API_KEY; - - if (selectedModel) { - switch (selectedModel) { - case 'Grok 4.2': - if (xaiApiKey) { - const xai = createXai({ - apiKey: xaiApiKey, - baseURL: 'https://api.x.ai/v1', - }); - try { - return xai('grok-4-fast-non-reasoning'); - } catch (error) { - console.error('Selected model "Grok 4.2" is configured but failed to initialize.', error); - throw new Error('Failed to initialize selected model.'); - } - } else { - console.error('User selected "Grok 4.2" but XAI_API_KEY is not set.'); - throw new Error('Selected model is not configured.'); - } - case 'Gemini 3': - if (gemini3ProApiKey) { - const google = createGoogleGenerativeAI({ - apiKey: gemini3ProApiKey, - }); - try { - return google('gemini-3-pro-preview'); - } catch (error) { - console.error('Selected model "Gemini 3" is configured but failed to initialize.', error); - throw new Error('Failed to initialize selected model.'); - } - } else { - console.error('User selected "Gemini 3" but GEMINI_3_PRO_API_KEY is not set.'); - throw new Error('Selected model is not configured.'); - } - case 'GPT-5.1': - if (openaiApiKey) { - const openai = createOpenAI({ - apiKey: openaiApiKey, - }); - return openai('gpt-4o'); - } else { - console.error('User selected "GPT-5.1" but OPENAI_API_KEY is not set.'); - throw new Error('Selected model is not configured.'); - } - } - } - - // Default behavior: Grok -> Gemini -> Bedrock -> OpenAI - if (xaiApiKey) { - const xai = createXai({ - apiKey: xaiApiKey, - baseURL: 'https://api.x.ai/v1', - }); - try { - return xai('grok-4-fast-non-reasoning'); - } catch (error) { - console.warn('xAI API unavailable, falling back to next provider:'); - } - } - - if (gemini3ProApiKey) { - const google = createGoogleGenerativeAI({ - apiKey: gemini3ProApiKey, - }); - try { - return google('gemini-3-pro-preview'); - } catch (error) { - console.warn('Gemini 3 Pro API unavailable, falling back to next provider:', error); - } - } - - if (awsAccessKeyId && awsSecretAccessKey) { - const bedrock = createAmazonBedrock({ - bedrockOptions: { - region: awsRegion, - credentials: { - accessKeyId: awsAccessKeyId, - secretAccessKey: awsSecretAccessKey, - }, - }, - }); - const model = bedrock(bedrockModelId, { - additionalModelRequestFields: { top_k: 350 }, - }); - return model; - } - - const openai = createOpenAI({ - apiKey: openaiApiKey, - }); - return openai('gpt-4o'); - } - - const model = await getBaseModel(); - - if (process.env.SUPERMEMORY_API_KEY && actualUserId) { - return withSupermemory(model as any, actualUserId, { - conversationId: chatId, - mode: 'full', - addMemory: 'always' - }); - } - - return model; -} diff --git a/mapbox_mcp/hooks.ts b/mapbox_mcp/hooks.ts index 797f8852..efa7cba6 100644 --- a/mapbox_mcp/hooks.ts +++ b/mapbox_mcp/hooks.ts @@ -1,6 +1,6 @@ import { useState, useCallback, useRef, useEffect } from 'react'; import { generateText } from 'ai'; -import { getModel } from '@/lib/utils'; +import { getModel } from '@/lib/utils/ai'; // Define Tool type locally if needed type Tool = { From 3bd50a0d05a8254de6a41b53076ad50a6183d041 Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Wed, 18 Feb 2026 08:57:15 +0000 Subject: [PATCH 3/3] feat: finalize Supermemory integration and model selection logic - Hoisted `getCurrentUserIdOnServer` import in `app/actions.tsx`. - Changed `userId` fallback from 'anonymous' to `undefined` for better memory isolation. - Refactored `getModel` to support `requireVision` and `requireStructuredOutput` flags. - Updated agents to request structured-output compatible models (avoiding Grok when necessary). - Fixed Supermemory tool configuration to use `containerTags: [userId]`. - Made Supermemory `addMemory` mode configurable via environment variables. - Made memory instructions in `Researcher` agent conditional on Supermemory availability. - Fixed typos in `Writer` agent system prompt. - Ensured all server-only AI utilities are in `lib/utils/ai.ts` to prevent client-side build errors. Co-authored-by: ngoiyaeric <115367894+ngoiyaeric@users.noreply.github.com> --- app/actions.tsx | 9 +- app/actions.tsx.orig | 838 +++++++++++++++++++++++++++++++ app/search/[id]/page.tsx | 2 +- app/search/[id]/page.tsx.orig | 76 +++ lib/actions/chat.ts | 6 +- lib/actions/suggest.ts | 2 +- lib/agents/inquire.tsx | 2 +- lib/agents/query-suggestor.tsx | 2 +- lib/agents/researcher.tsx | 11 +- lib/agents/resolution-search.tsx | 2 +- lib/agents/task-manager.tsx | 2 +- lib/agents/tools/index.tsx | 2 +- lib/agents/writer.tsx | 2 +- lib/utils/ai.ts | 88 ++-- 14 files changed, 978 insertions(+), 66 deletions(-) create mode 100644 app/actions.tsx.orig create mode 100644 app/search/[id]/page.tsx.orig diff --git a/app/actions.tsx b/app/actions.tsx index e7263c1d..dea5c0ee 100644 --- a/app/actions.tsx +++ b/app/actions.tsx @@ -17,6 +17,7 @@ import { writer } from '@/lib/agents/writer' import { saveChat, getSystemPrompt } from '@/lib/actions/chat' import { Chat, AIMessage } from '@/lib/types' import { UserMessage } from '@/components/user-message' +import { getCurrentUserIdOnServer } from '@/lib/auth/get-current-user' import { BotMessage } from '@/components/message' import { SearchSection } from '@/components/search-section' import SearchRelated from '@/components/search-related' @@ -41,9 +42,8 @@ async function submit(formData?: FormData, skip?: boolean) { const isGenerating = createStreamableValue(true) const isCollapsed = createStreamableValue(false) - const { getCurrentUserIdOnServer } = await import('@/lib/auth/get-current-user') const actualUserId = await getCurrentUserIdOnServer() - const userId = actualUserId || 'anonymous' + const userId = actualUserId || undefined const action = formData?.get('action') as string; const drawnFeaturesString = formData?.get('drawnFeatures') as string; @@ -401,7 +401,7 @@ async function submit(formData?: FormData, skip?: boolean) { } as CoreMessage) } - const currentSystemPrompt = (await getSystemPrompt(userId)) || '' + const currentSystemPrompt = (userId ? await getSystemPrompt(userId) : null) || '' const mapProvider = formData?.get('mapProvider') as 'mapbox' | 'google' async function processEvents() { @@ -627,9 +627,6 @@ export const AI = createAI({ } ] - const { getCurrentUserIdOnServer } = await import( - '@/lib/auth/get-current-user' - ) const actualUserId = await getCurrentUserIdOnServer() if (!actualUserId) { diff --git a/app/actions.tsx.orig b/app/actions.tsx.orig new file mode 100644 index 00000000..e7263c1d --- /dev/null +++ b/app/actions.tsx.orig @@ -0,0 +1,838 @@ +import { + StreamableValue, + createAI, + createStreamableUI, + createStreamableValue, + getAIState, + getMutableAIState +} from 'ai/rsc' +import { CoreMessage, ToolResultPart } from 'ai' +import { nanoid } from '@/lib/utils' +import type { FeatureCollection } from 'geojson' +import { Spinner } from '@/components/ui/spinner' +import { Section } from '@/components/section' +import { FollowupPanel } from '@/components/followup-panel' +import { inquire, researcher, taskManager, querySuggestor, resolutionSearch, type DrawnFeature } from '@/lib/agents' +import { writer } from '@/lib/agents/writer' +import { saveChat, getSystemPrompt } from '@/lib/actions/chat' +import { Chat, AIMessage } from '@/lib/types' +import { UserMessage } from '@/components/user-message' +import { BotMessage } from '@/components/message' +import { SearchSection } from '@/components/search-section' +import SearchRelated from '@/components/search-related' +import { GeoJsonLayer } from '@/components/map/geojson-layer' +import { ResolutionCarousel } from '@/components/resolution-carousel' +import { ResolutionImage } from '@/components/resolution-image' +import { CopilotDisplay } from '@/components/copilot-display' +import RetrieveSection from '@/components/retrieve-section' +import { VideoSearchSection } from '@/components/video-search-section' +import { MapQueryHandler } from '@/components/map/map-query-handler' + +// Define the type for related queries +type RelatedQueries = { + items: { query: string }[] +} + +async function submit(formData?: FormData, skip?: boolean) { + 'use server' + + const aiState = getMutableAIState() + const uiStream = createStreamableUI() + const isGenerating = createStreamableValue(true) + const isCollapsed = createStreamableValue(false) + + const { getCurrentUserIdOnServer } = await import('@/lib/auth/get-current-user') + const actualUserId = await getCurrentUserIdOnServer() + const userId = actualUserId || 'anonymous' + + const action = formData?.get('action') as string; + const drawnFeaturesString = formData?.get('drawnFeatures') as string; + let drawnFeatures: DrawnFeature[] = []; + try { + drawnFeatures = drawnFeaturesString ? JSON.parse(drawnFeaturesString) : []; + } catch (e) { + console.error('Failed to parse drawnFeatures:', e); + } + + if (action === 'resolution_search') { + const file_mapbox = formData?.get('file_mapbox') as File; + const file_google = formData?.get('file_google') as File; + const file = (formData?.get('file') as File) || file_mapbox || file_google; + const timezone = (formData?.get('timezone') as string) || 'UTC'; + const lat = formData?.get('latitude') ? parseFloat(formData.get('latitude') as string) : undefined; + const lng = formData?.get('longitude') ? parseFloat(formData.get('longitude') as string) : undefined; + const location = (lat !== undefined && lng !== undefined) ? { lat, lng } : undefined; + + if (!file) { + throw new Error('No file provided for resolution search.'); + } + + const mapboxBuffer = file_mapbox ? await file_mapbox.arrayBuffer() : null; + const mapboxDataUrl = mapboxBuffer ? `data:${file_mapbox.type};base64,${Buffer.from(mapboxBuffer).toString('base64')}` : null; + + const googleBuffer = file_google ? await file_google.arrayBuffer() : null; + const googleDataUrl = googleBuffer ? `data:${file_google.type};base64,${Buffer.from(googleBuffer).toString('base64')}` : null; + + const buffer = await file.arrayBuffer(); + const dataUrl = `data:${file.type};base64,${Buffer.from(buffer).toString('base64')}`; + + const messages: CoreMessage[] = [...(aiState.get().messages as any[])].filter( + (message: any) => + message.role !== 'tool' && + message.type !== 'followup' && + message.type !== 'related' && + message.type !== 'end' && + message.type !== 'resolution_search_result' + ); + + const userInput = 'Analyze this map view.'; + const content: CoreMessage['content'] = [ + { type: 'text', text: userInput }, + { type: 'image', image: dataUrl, mimeType: file.type } + ]; + + aiState.update({ + ...aiState.get(), + messages: [ + ...aiState.get().messages, + { id: nanoid(), role: 'user', content, type: 'input' } + ] + }); + messages.push({ role: 'user', content }); + + const summaryStream = createStreamableValue('Analyzing map view...'); + const groupeId = nanoid(); + + async function processResolutionSearch() { + try { + const streamResult = await resolutionSearch(messages, timezone, drawnFeatures, location, userId, aiState.get().chatId); + + let fullSummary = ''; + for await (const partialObject of streamResult.partialObjectStream) { + if (partialObject.summary) { + fullSummary = partialObject.summary; + summaryStream.update(fullSummary); + } + } + + const analysisResult = await streamResult.object; + summaryStream.done(analysisResult.summary || 'Analysis complete.'); + + if (analysisResult.geoJson) { + uiStream.append( + + ); + } + + messages.push({ role: 'assistant', content: analysisResult.summary || 'Analysis complete.' }); + + const sanitizedMessages: CoreMessage[] = messages.map((m: any) => { + if (Array.isArray(m.content)) { + return { + ...m, + content: m.content.filter((part: any) => part.type !== 'image') + } as CoreMessage + } + return m + }) + + const currentMessages = aiState.get().messages; + const sanitizedHistory = currentMessages.map((m: any) => { + if (m.role === "user" && Array.isArray(m.content)) { + return { + ...m, + content: m.content.map((part: any) => + part.type === "image" ? { ...part, image: "IMAGE_PROCESSED" } : part + ) + } + } + return m + }); + const relatedQueries = await querySuggestor(uiStream, sanitizedMessages, userId, aiState.get().chatId); + uiStream.append( +
+ +
+ ); + + await new Promise(resolve => setTimeout(resolve, 500)); + + aiState.done({ + ...aiState.get(), + messages: [ + ...aiState.get().messages, + { + id: groupeId, + role: 'assistant', + content: analysisResult.summary || 'Analysis complete.', + type: 'response' + }, + { + id: groupeId, + role: 'assistant', + content: JSON.stringify({ + ...analysisResult, + image: dataUrl, + mapboxImage: mapboxDataUrl, + googleImage: googleDataUrl + }), + type: 'resolution_search_result' + }, + { + id: groupeId, + role: 'assistant', + content: JSON.stringify(relatedQueries), + type: 'related' + }, + { + id: groupeId, + role: 'assistant', + content: 'followup', + type: 'followup' + } + ] + }); + } catch (error) { + console.error('Error in resolution search:', error); + summaryStream.error(error); + } finally { + isGenerating.done(false); + uiStream.done(); + } + } + + processResolutionSearch(); + + uiStream.update( +
+ + +
+ ); + + return { + id: nanoid(), + isGenerating: isGenerating.value, + component: uiStream.value, + isCollapsed: isCollapsed.value + }; + } + + const file = !skip ? (formData?.get('file') as File) : undefined + const userInput = skip + ? `{"action": "skip"}` + : ((formData?.get('related_query') as string) || + (formData?.get('input') as string)) + + if (userInput && (userInput.toLowerCase().trim() === 'what is a planet computer?' || userInput.toLowerCase().trim() === 'what is qcx-terra?')) { + const definition = userInput.toLowerCase().trim() === 'what is a planet computer?' + ? `A planet computer is a proprietary environment aware system that interoperates weather forecasting, mapping and scheduling using cutting edge multi-agents to streamline automation and exploration on a planet. Available for our Pro and Enterprise customers. [QCX Pricing](https://www.queue.cx/#pricing)` + : `QCX-Terra is a model garden of pixel level precision geospatial foundational models for efficient land feature predictions from satellite imagery. Available for our Pro and Enterprise customers. [QCX Pricing] (https://www.queue.cx/#pricing)`; + + const content = JSON.stringify(Object.fromEntries(formData!)); + const type = 'input'; + const groupeId = nanoid(); + + aiState.update({ + ...aiState.get(), + messages: [ + ...aiState.get().messages, + { + id: nanoid(), + role: 'user', + content, + type, + }, + ], + }); + + const definitionStream = createStreamableValue(); + definitionStream.done(definition); + + const answerSection = ( +
+ +
+ ); + + uiStream.update(answerSection); + + const relatedQueries = { items: [] }; + + aiState.done({ + ...aiState.get(), + messages: [ + ...aiState.get().messages, + { + id: groupeId, + role: 'assistant', + content: definition, + type: 'response', + }, + { + id: groupeId, + role: 'assistant', + content: JSON.stringify(relatedQueries), + type: 'related', + }, + { + id: groupeId, + role: 'assistant', + content: 'followup', + type: 'followup', + }, + ], + }); + + isGenerating.done(false); + uiStream.done(); + + return { + id: nanoid(), + isGenerating: isGenerating.value, + component: uiStream.value, + isCollapsed: isCollapsed.value + }; + } + + if (!userInput && !file) { + isGenerating.done(false) + return { + id: nanoid(), + isGenerating: isGenerating.value, + component: null, + isCollapsed: isCollapsed.value + } + } + + const messages: CoreMessage[] = [...(aiState.get().messages as any[])].filter( + (message: any) => + message.role !== 'tool' && + message.type !== 'followup' && + message.type !== 'related' && + message.type !== 'end' && + message.type !== 'resolution_search_result' + ).map((m: any) => { + if (Array.isArray(m.content)) { + return { + ...m, + content: m.content.filter((part: any) => + part.type !== "image" || (typeof part.image === "string" && part.image.startsWith("data:")) + ) + } as any + } + return m + }) + + const groupeId = nanoid() + const useSpecificAPI = process.env.USE_SPECIFIC_API_FOR_WRITER === 'true' + const maxMessages = useSpecificAPI ? 5 : 10 + messages.splice(0, Math.max(messages.length - maxMessages, 0)) + + const messageParts: { + type: 'text' | 'image' + text?: string + image?: string + mimeType?: string + }[] = [] + + if (userInput) { + messageParts.push({ type: 'text', text: userInput }) + } + + if (file) { + const buffer = await file.arrayBuffer() + if (file.type.startsWith('image/')) { + const dataUrl = `data:${file.type};base64,${Buffer.from( + buffer + ).toString('base64')}` + messageParts.push({ + type: 'image', + image: dataUrl, + mimeType: file.type + }) + } else if (file.type === 'text/plain') { + const textContent = Buffer.from(buffer).toString('utf-8') + const existingTextPart = messageParts.find(p => p.type === 'text') + if (existingTextPart) { + existingTextPart.text = `${textContent}\n\n${existingTextPart.text}` + } else { + messageParts.push({ type: 'text', text: textContent }) + } + } + } + + const hasImage = messageParts.some(part => part.type === 'image') + const content: CoreMessage['content'] = hasImage + ? messageParts as CoreMessage['content'] + : messageParts.map(part => part.text).join('\n') + + const type = skip + ? undefined + : formData?.has('input') || formData?.has('file') + ? 'input' + : formData?.has('related_query') + ? 'input_related' + : 'inquiry' + + if (content) { + aiState.update({ + ...aiState.get(), + messages: [ + ...aiState.get().messages, + { + id: nanoid(), + role: 'user', + content, + type + } + ] + }) + messages.push({ + role: 'user', + content + } as CoreMessage) + } + + const currentSystemPrompt = (await getSystemPrompt(userId)) || '' + const mapProvider = formData?.get('mapProvider') as 'mapbox' | 'google' + + async function processEvents() { + let action: any = { object: { next: 'proceed' } } + if (!skip) { + const taskManagerResult = await taskManager(messages, userId, aiState.get().chatId) + if (taskManagerResult) { + action.object = taskManagerResult.object + } + } + + if (action.object.next === 'inquire') { + const inquiry = await inquire(uiStream, messages, userId, aiState.get().chatId) + uiStream.done() + isGenerating.done() + isCollapsed.done(false) + aiState.done({ + ...aiState.get(), + messages: [ + ...aiState.get().messages, + { + id: nanoid(), + role: 'assistant', + content: `inquiry: ${inquiry?.question}` + } + ] + }) + return + } + + isCollapsed.done(true) + let answer = '' + let toolOutputs: ToolResultPart[] = [] + let errorOccurred = false + const streamText = createStreamableValue() + uiStream.update() + + while ( + useSpecificAPI + ? answer.length === 0 + : answer.length === 0 && !errorOccurred + ) { + const { fullResponse, hasError, toolResponses } = await researcher(currentSystemPrompt, uiStream, streamText, messages, mapProvider, useSpecificAPI, drawnFeatures, userId, aiState.get().chatId) + answer = fullResponse + toolOutputs = toolResponses + errorOccurred = hasError + + if (toolOutputs.length > 0) { + toolOutputs.map(output => { + aiState.update({ + ...aiState.get(), + messages: [ + ...aiState.get().messages, + { + id: groupeId, + role: 'tool', + content: JSON.stringify(output.result), + name: output.toolName, + type: 'tool' + } + ] + }) + }) + } + } + + if (useSpecificAPI && answer.length === 0) { + const modifiedMessages = aiState + .get() + .messages.map(msg => + msg.role === 'tool' + ? { + ...msg, + role: 'assistant', + content: JSON.stringify(msg.content), + type: 'tool' + } + : msg + ) as CoreMessage[] + const latestMessages = modifiedMessages.slice(maxMessages * -1) + answer = await writer(currentSystemPrompt, uiStream, streamText, latestMessages, userId, aiState.get().chatId) + } else { + streamText.done() + } + + if (!errorOccurred) { + const relatedQueries = await querySuggestor(uiStream, messages, userId, aiState.get().chatId) + uiStream.append( +
+ +
+ ) + + await new Promise(resolve => setTimeout(resolve, 500)) + + aiState.done({ + ...aiState.get(), + messages: [ + ...aiState.get().messages, + { + id: groupeId, + role: 'assistant', + content: answer, + type: 'response' + }, + { + id: groupeId, + role: 'assistant', + content: JSON.stringify(relatedQueries), + type: 'related' + }, + { + id: groupeId, + role: 'assistant', + content: 'followup', + type: 'followup' + } + ] + }) + } + + isGenerating.done(false) + uiStream.done() + } + + processEvents() + + return { + id: nanoid(), + isGenerating: isGenerating.value, + component: uiStream.value, + isCollapsed: isCollapsed.value + } +} + +async function clearChat() { + 'use server' + + const aiState = getMutableAIState() + + aiState.done({ + chatId: nanoid(), + messages: [] + }) +} + +export type AIState = { + messages: AIMessage[] + chatId: string + isSharePage?: boolean +} + +export type UIState = { + id: string + component: React.ReactNode + isGenerating?: StreamableValue + isCollapsed?: StreamableValue +}[] + +const initialAIState: AIState = { + chatId: nanoid(), + messages: [] +} + +const initialUIState: UIState = [] + +export const AI = createAI({ + actions: { + submit, + clearChat + }, + initialUIState, + initialAIState, + onGetUIState: async () => { + 'use server' + + const aiState = getAIState() as AIState + if (aiState) { + const uiState = getUIStateFromAIState(aiState) + return uiState + } + return initialUIState + }, + onSetAIState: async ({ state }) => { + 'use server' + + if (!state.messages.some(e => e.type === 'response')) { + return + } + + const { chatId, messages } = state + const createdAt = new Date() + const path = `/search/${chatId}` + + let title = 'Untitled Chat' + if (messages.length > 0) { + const firstMessageContent = messages[0].content + if (typeof firstMessageContent === 'string') { + try { + const parsedContent = JSON.parse(firstMessageContent) + title = parsedContent.input?.substring(0, 100) || 'Untitled Chat' + } catch (e) { + title = firstMessageContent.substring(0, 100) + } + } else if (Array.isArray(firstMessageContent)) { + const textPart = ( + firstMessageContent as { type: string; text?: string }[] + ).find(p => p.type === 'text') + title = + textPart && textPart.text + ? textPart.text.substring(0, 100) + : 'Image Message' + } + } + + const updatedMessages: AIMessage[] = [ + ...messages, + { + id: nanoid(), + role: 'assistant', + content: `end`, + type: 'end' + } + ] + + const { getCurrentUserIdOnServer } = await import( + '@/lib/auth/get-current-user' + ) + const actualUserId = await getCurrentUserIdOnServer() + + if (!actualUserId) { + console.error('onSetAIState: User not authenticated. Chat not saved.') + return + } + + const chat: Chat = { + id: chatId, + createdAt, + userId: actualUserId, + path, + title, + messages: updatedMessages + } + await saveChat(chat, actualUserId) + } +}) + +export const getUIStateFromAIState = (aiState: AIState): UIState => { + const chatId = aiState.chatId + const isSharePage = aiState.isSharePage + return aiState.messages + .map((message, index) => { + const { role, content, id, type, name } = message + + if ( + !type || + type === 'end' || + (isSharePage && type === 'related') || + (isSharePage && type === 'followup') + ) + return null + + switch (role) { + case 'user': + switch (type) { + case 'input': + case 'input_related': + let messageContent: string | any[] + try { + const json = JSON.parse(content as string) + messageContent = + type === 'input' ? json.input : json.related_query + } catch (e) { + messageContent = content + } + return { + id, + component: ( + + ) + } + case 'inquiry': + return { + id, + component: + } + } + break + case 'assistant': + const answer = createStreamableValue(content as string) + answer.done(content as string) + switch (type) { + case 'response': + return { + id, + component: ( +
+ +
+ ) + } + case 'related': + const relatedQueries = createStreamableValue({ + items: [] + }) + relatedQueries.done(JSON.parse(content as string)) + return { + id, + component: ( +
+ +
+ ) + } + case 'followup': + return { + id, + component: ( +
+ +
+ ) + } + case 'resolution_search_result': { + const analysisResult = JSON.parse(content as string); + const geoJson = analysisResult.geoJson as FeatureCollection; + const image = analysisResult.image as string; + const mapboxImage = analysisResult.mapboxImage as string; + const googleImage = analysisResult.googleImage as string; + + return { + id, + component: ( + <> + + {geoJson && ( + + )} + + ) + } + } + } + break + case 'tool': + try { + const toolOutput = JSON.parse(content as string) + const isCollapsed = createStreamableValue(true) + isCollapsed.done(true) + + if ( + toolOutput.type === 'MAP_QUERY_TRIGGER' && + name === 'geospatialQueryTool' + ) { + const mapUrl = toolOutput.mcp_response?.mapUrl; + const placeName = toolOutput.mcp_response?.location?.place_name; + + return { + id, + component: ( + <> + {mapUrl && ( + + )} + + + ), + isCollapsed: false + } + } + + const searchResults = createStreamableValue( + JSON.stringify(toolOutput) + ) + searchResults.done(JSON.stringify(toolOutput)) + switch (name) { + case 'search': + return { + id, + component: , + isCollapsed: isCollapsed.value + } + case 'retrieve': + return { + id, + component: , + isCollapsed: isCollapsed.value + } + case 'videoSearch': + return { + id, + component: ( + + ), + isCollapsed: isCollapsed.value + } + default: + console.warn( + `Unhandled tool result in getUIStateFromAIState: ${name}` + ) + return { id, component: null } + } + } catch (error) { + console.error( + 'Error parsing tool content in getUIStateFromAIState:', + error + ) + return { + id, + component: null + } + } + break + default: + return { + id, + component: null + } + } + }) + .filter(message => message !== null) as UIState +} diff --git a/app/search/[id]/page.tsx b/app/search/[id]/page.tsx index 8db74186..743b6bac 100644 --- a/app/search/[id]/page.tsx +++ b/app/search/[id]/page.tsx @@ -18,7 +18,7 @@ export async function generateMetadata({ params }: SearchPageProps) { // TODO: Metadata generation might need authenticated user if chats are private // For now, assuming getChat can be called or it handles anon access for metadata appropriately const userId = await getCurrentUserIdOnServer(); // Attempt to get user for metadata - const chat = await getChat(id, userId || 'anonymous'); // Pass userId or 'anonymous' if none + const chat = await getChat(id, userId || undefined); // Pass userId or undefined if none return { title: chat?.title?.toString().slice(0, 50) || 'Search', }; diff --git a/app/search/[id]/page.tsx.orig b/app/search/[id]/page.tsx.orig new file mode 100644 index 00000000..8db74186 --- /dev/null +++ b/app/search/[id]/page.tsx.orig @@ -0,0 +1,76 @@ +import { notFound, redirect } from 'next/navigation'; +import { Chat } from '@/components/chat'; +import { getChat, getChatMessages } from '@/lib/actions/chat'; // Added getChatMessages +import { AI } from '@/app/actions'; +import { MapDataProvider } from '@/components/map/map-data-context'; +import { getCurrentUserIdOnServer } from '@/lib/auth/get-current-user'; // For server-side auth +import type { AIMessage } from '@/lib/types'; // For AIMessage type +import type { Message as DrizzleMessage } from '@/lib/actions/chat-db'; // For DrizzleMessage type + +export const maxDuration = 60; + +export interface SearchPageProps { + params: Promise<{ id: string }>; // Keep as is for now +} + +export async function generateMetadata({ params }: SearchPageProps) { + const { id } = await params; // Keep as is for now + // TODO: Metadata generation might need authenticated user if chats are private + // For now, assuming getChat can be called or it handles anon access for metadata appropriately + const userId = await getCurrentUserIdOnServer(); // Attempt to get user for metadata + const chat = await getChat(id, userId || 'anonymous'); // Pass userId or 'anonymous' if none + return { + title: chat?.title?.toString().slice(0, 50) || 'Search', + }; +} + +export default async function SearchPage({ params }: SearchPageProps) { + const { id } = await params; // Keep as is for now + const userId = await getCurrentUserIdOnServer(); + + if (!userId) { + // If no user, redirect to login or show appropriate page + // For now, redirecting to home, but a login page would be better. + redirect('/'); + } + + const chat = await getChat(id, userId); + + if (!chat) { + // If chat doesn't exist or user doesn't have access (handled by getChat) + notFound(); + } + + // Fetch messages for the chat + const dbMessages: DrizzleMessage[] = await getChatMessages(chat.id); + + // Transform DrizzleMessages to AIMessages + const initialMessages: AIMessage[] = dbMessages.map((dbMsg): AIMessage => { + return { + id: dbMsg.id, + role: dbMsg.role as AIMessage['role'], // Cast role, ensure AIMessage['role'] includes all dbMsg.role possibilities + content: dbMsg.content, + createdAt: dbMsg.createdAt ? new Date(dbMsg.createdAt) : undefined, + // 'type' and 'name' are not in the basic Drizzle 'messages' schema. + // These would be undefined unless specific logic is added to derive them. + // For instance, if a message with role 'tool' should have a 'name', + // or if some messages have a specific 'type' based on content or other flags. + // This mapping assumes standard user/assistant messages primarily. + }; + }); + + return ( + + + + + + ); +} \ No newline at end of file diff --git a/lib/actions/chat.ts b/lib/actions/chat.ts index f36f2cf6..bc293b27 100644 --- a/lib/actions/chat.ts +++ b/lib/actions/chat.ts @@ -35,7 +35,7 @@ export async function getChats(userId?: string | null): Promise { } } -export async function getChat(id: string, userId: string): Promise { +export async function getChat(id: string, userId?: string): Promise { if (!userId) { console.warn('getChat called without userId.') return null; @@ -148,7 +148,7 @@ export async function updateDrawingContext(chatId: string, contextData: { drawnF } export async function saveSystemPrompt( - userId: string, + userId: string | undefined, prompt: string ): Promise<{ success?: boolean; error?: string }> { if (!userId) return { error: 'User ID is required' } @@ -167,7 +167,7 @@ export async function saveSystemPrompt( } export async function getSystemPrompt( - userId: string + userId?: string ): Promise { if (!userId) return null diff --git a/lib/actions/suggest.ts b/lib/actions/suggest.ts index c983f13a..f054f651 100644 --- a/lib/actions/suggest.ts +++ b/lib/actions/suggest.ts @@ -28,7 +28,7 @@ export async function getSuggestions( ;(async () => { const result = await streamObject({ - model: (await getModel()) as LanguageModel, + model: (await getModel(false, undefined, undefined, true)) as LanguageModel, system: systemPrompt, messages: [{ role: 'user', content: query }], schema: relatedSchema diff --git a/lib/agents/inquire.tsx b/lib/agents/inquire.tsx index d725c5cd..d1f023a1 100644 --- a/lib/agents/inquire.tsx +++ b/lib/agents/inquire.tsx @@ -25,7 +25,7 @@ export async function inquire( let finalInquiry: PartialInquiry = {}; const result = await streamObject({ - model: (await getModel(false, userId, chatId)) as LanguageModel, + model: (await getModel(false, userId, chatId, true)) as LanguageModel, system: `As a planet computer, your goal is to help the user narrow down their query for more efficient research. Ask a clear and concise question to clarify the user's intent or to get missing information. For geospatial queries, focus on location, time, or specific travel needs. diff --git a/lib/agents/query-suggestor.tsx b/lib/agents/query-suggestor.tsx index 09e6fa12..8b6979de 100644 --- a/lib/agents/query-suggestor.tsx +++ b/lib/agents/query-suggestor.tsx @@ -20,7 +20,7 @@ export async function querySuggestor( let finalRelatedQueries: PartialRelated = {} const result = await streamObject({ - model: (await getModel(false, userId, chatId)) as LanguageModel, + model: (await getModel(false, userId, chatId, true)) as LanguageModel, system: `As a professional web researcher, your task is to generate a set of three queries that explore the subject matter more deeply, building upon the initial query and the information uncovered in its search results. For instance, if the original query was "Starship's third test flight key milestones", your output should follow this format: diff --git a/lib/agents/researcher.tsx b/lib/agents/researcher.tsx index c2c70190..21bf5192 100644 --- a/lib/agents/researcher.tsx +++ b/lib/agents/researcher.tsx @@ -17,7 +17,9 @@ import { DrawnFeature } from './resolution-search' // This magic tag lets us write raw multi-line strings with backticks, arrows, etc. const raw = String.raw -const getDefaultSystemPrompt = (date: string, drawnFeatures?: DrawnFeature[]) => raw` +const memoryInstructions = `6. **Long-term Memory:** You have access to the user's long-term memory. Use 'searchMemories' to retrieve past preferences, business intricacies, or context from previous yearly usage. Use 'addMemory' to save new preferences or important business details that should be remembered across sessions to improve personalized service incrementally.` + +const getDefaultSystemPrompt = (date: string, drawnFeatures?: DrawnFeature[], isMemoryEnabled?: boolean) => raw` As a comprehensive AI assistant, your primary directive is **Exploration Efficiency**. You must use the provided tools judiciously to gather information and formulate a response. Current date and time: ${date}. @@ -32,7 +34,7 @@ Use these user-drawn areas/lines as primary areas of interest for your analysis 3. **Search Specificity:** When using the 'search' tool, formulate queries that are as specific as possible. 4. **Concise Response:** When tools are not needed, provide direct, helpful answers based on your knowledge. Match the user's language. 5. **Citations:** Always cite source URLs when using information from tools. -6. **Long-term Memory:** You have access to the user's long-term memory. Use 'searchMemories' to retrieve past preferences, business intricacies, or context from previous yearly usage. Use 'addMemory' to save new preferences or important business details that should be remembered across sessions to improve personalized service incrementally. +${isMemoryEnabled ? memoryInstructions : ''} ### **Tool Usage Guidelines (Mandatory)** @@ -101,11 +103,12 @@ export async function researcher( ) const currentDate = new Date().toLocaleString() + const isMemoryEnabled = !!(process.env.SUPERMEMORY_API_KEY && userId) const systemPromptToUse = dynamicSystemPrompt?.trim() - ? dynamicSystemPrompt - : getDefaultSystemPrompt(currentDate, drawnFeatures) + ? (isMemoryEnabled ? `${dynamicSystemPrompt}\n\n${memoryInstructions}` : dynamicSystemPrompt) + : getDefaultSystemPrompt(currentDate, drawnFeatures, isMemoryEnabled) // Check if any message contains an image const hasImage = messages.some(message => diff --git a/lib/agents/resolution-search.tsx b/lib/agents/resolution-search.tsx index 61260628..31ac4ee4 100644 --- a/lib/agents/resolution-search.tsx +++ b/lib/agents/resolution-search.tsx @@ -90,7 +90,7 @@ Analyze the user's prompt and the image to provide a holistic understanding of t // Use streamObject to get partial results. return streamObject({ - model: await getModel(hasImage, userId, chatId), + model: await getModel(hasImage, userId, chatId, true), system: systemPrompt, messages: filteredMessages, schema: resolutionSearchSchema, diff --git a/lib/agents/task-manager.tsx b/lib/agents/task-manager.tsx index b2a6749b..d4999dbb 100644 --- a/lib/agents/task-manager.tsx +++ b/lib/agents/task-manager.tsx @@ -16,7 +16,7 @@ export async function taskManager(messages: CoreMessage[], userId?: string, chat } const result = await generateObject({ - model: (await getModel(false, userId, chatId)) as LanguageModel, + model: (await getModel(false, userId, chatId, true)) as LanguageModel, system: `As a planet computer, your primary objective is to act as an efficient **Task Manager** for the user's query. Your goal is to minimize unnecessary steps and maximize the efficiency of the subsequent exploration phase (researcher agent). You must first analyze the user's input and determine the optimal course of action. You have two options at your disposal: diff --git a/lib/agents/tools/index.tsx b/lib/agents/tools/index.tsx index 72dde90d..71dd2312 100644 --- a/lib/agents/tools/index.tsx +++ b/lib/agents/tools/index.tsx @@ -39,7 +39,7 @@ export const getTools = ({ uiStream, fullResponse, mapProvider, userId }: ToolPr if (process.env.SUPERMEMORY_API_KEY && userId) { const memoryTools = supermemoryTools(process.env.SUPERMEMORY_API_KEY, { - projectId: userId + containerTags: [userId] }) tools.searchMemories = memoryTools.searchMemories tools.addMemory = memoryTools.addMemory diff --git a/lib/agents/writer.tsx b/lib/agents/writer.tsx index 1b31581a..07283a9e 100644 --- a/lib/agents/writer.tsx +++ b/lib/agents/writer.tsx @@ -27,7 +27,7 @@ export async function writer( Link format: [link text](url) Image format: ![alt text](url) - There are also some proconfigured example queires. + There are also some preconfigured example queries. When asked about 'What is a planet computer?' answer with the following: '"A planet computer is a proprietary environment aware system that interoperates Climate forecasting, mapping and scheduling using cutting edge multi-agents to streamline automation and exploration on a planet' `; diff --git a/lib/utils/ai.ts b/lib/utils/ai.ts index 1e98c218..c7cbfae8 100644 --- a/lib/utils/ai.ts +++ b/lib/utils/ai.ts @@ -7,7 +7,12 @@ import { getCurrentUserIdOnServer } from '@/lib/auth/get-current-user' import { withSupermemory } from '@supermemory/tools/ai-sdk' import { LanguageModel } from 'ai' -export async function getModel(requireVision: boolean = false, userId?: string, chatId?: string) { +export async function getModel( + requireVision: boolean = false, + userId?: string, + chatId?: string, + requireStructuredOutput: boolean = false +) { const actualUserId = userId || await getCurrentUserIdOnServer(); async function getBaseModel() { @@ -21,76 +26,58 @@ export async function getModel(requireVision: boolean = false, userId?: string, const bedrockModelId = process.env.BEDROCK_MODEL_ID || 'anthropic.claude-3-5-sonnet-20241022-v2:0'; const openaiApiKey = process.env.OPENAI_API_KEY; + // Handle user preference first if (selectedModel) { switch (selectedModel) { case 'Grok 4.2': - if (xaiApiKey) { + if (xaiApiKey && !requireStructuredOutput && !requireVision) { const xai = createXai({ apiKey: xaiApiKey, baseURL: 'https://api.x.ai/v1', }); - try { - return xai('grok-4-fast-non-reasoning'); - } catch (error) { - console.error('Selected model "Grok 4.2" is configured but failed to initialize.', error); - throw new Error('Failed to initialize selected model.'); - } - } else { - console.error('User selected "Grok 4.2" but XAI_API_KEY is not set.'); - throw new Error('Selected model is not configured.'); + return xai('grok-4-fast-non-reasoning'); } + break; case 'Gemini 3': if (gemini3ProApiKey) { const google = createGoogleGenerativeAI({ apiKey: gemini3ProApiKey, }); - try { - return google('gemini-3-pro-preview'); - } catch (error) { - console.error('Selected model "Gemini 3" is configured but failed to initialize.', error); - throw new Error('Failed to initialize selected model.'); - } - } else { - console.error('User selected "Gemini 3" but GEMINI_3_PRO_API_KEY is not set.'); - throw new Error('Selected model is not configured.'); + return google('gemini-3-pro-preview'); } + break; + case 'GPT-4o': case 'GPT-5.1': if (openaiApiKey) { const openai = createOpenAI({ apiKey: openaiApiKey, }); return openai('gpt-4o'); - } else { - console.error('User selected "GPT-5.1" but OPENAI_API_KEY is not set.'); - throw new Error('Selected model is not configured.'); } + break; + default: + console.warn(`Unknown model selection: ${selectedModel}. Falling back to default provider chain.`); } } - // Default behavior: Grok -> Gemini -> Bedrock -> OpenAI - if (xaiApiKey) { - const xai = createXai({ - apiKey: xaiApiKey, - baseURL: 'https://api.x.ai/v1', + // Default provider chain: OpenAI -> Gemini -> Bedrock -> Xai + // OpenAI and Gemini are our primary choices for vision and structured output. + + if (openaiApiKey) { + const openai = createOpenAI({ + apiKey: openaiApiKey, }); - try { - return xai('grok-4-fast-non-reasoning'); - } catch (error) { - console.warn('xAI API unavailable, falling back to next provider:'); - } + return openai('gpt-4o'); } if (gemini3ProApiKey) { const google = createGoogleGenerativeAI({ apiKey: gemini3ProApiKey, }); - try { - return google('gemini-3-pro-preview'); - } catch (error) { - console.warn('Gemini 3 Pro API unavailable, falling back to next provider:', error); - } + return google('gemini-3-pro-preview'); } + // Bedrock might support vision depending on the model, but we'll assume it doesn't for now if requireVision is true and model is generic. if (awsAccessKeyId && awsSecretAccessKey) { const bedrock = createAmazonBedrock({ bedrockOptions: { @@ -101,25 +88,36 @@ export async function getModel(requireVision: boolean = false, userId?: string, }, }, }); - const model = bedrock(bedrockModelId, { + return bedrock(bedrockModelId, { additionalModelRequestFields: { top_k: 350 }, }); - return model; } - const openai = createOpenAI({ - apiKey: openaiApiKey, - }); - return openai('gpt-4o'); + if (xaiApiKey && !requireStructuredOutput && !requireVision) { + const xai = createXai({ + apiKey: xaiApiKey, + baseURL: 'https://api.x.ai/v1', + }); + return xai('grok-4-fast-non-reasoning'); + } + + const requirements = []; + if (requireVision) requirements.push('vision'); + if (requireStructuredOutput) requirements.push('structured output'); + + throw new Error(`No compatible AI provider configured. Missing key or provider doesn't support: ${requirements.join(', ') || 'basic completion'}.`); } const model = await getBaseModel(); if (process.env.SUPERMEMORY_API_KEY && actualUserId) { + // Default to 'never' for addMemory unless explicitly opted in via env var for privacy. + const addMemoryMode = (process.env.SUPERMEMORY_ADD_MEMORY_MODE as 'always' | 'never' | undefined) || 'never'; + return withSupermemory(model as any, actualUserId, { conversationId: chatId, mode: 'full', - addMemory: 'always' + addMemory: addMemoryMode }); }