From 67bc0dcf4b303e415429383d55b4feef31a59ad6 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Tue, 25 Mar 2025 10:26:36 +0000 Subject: [PATCH 01/11] WIP BudibaseAI provider. --- packages/pro | 2 +- packages/types/src/documents/global/config.ts | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/pro b/packages/pro index 761ec71e15..d15384e81c 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit 761ec71e1543ef04887d6515f99a2c2911999ebf +Subproject commit d15384e81cf985ca04a514bea0bc9d091b6c482e diff --git a/packages/types/src/documents/global/config.ts b/packages/types/src/documents/global/config.ts index bd0340595c..422486e30f 100644 --- a/packages/types/src/documents/global/config.ts +++ b/packages/types/src/documents/global/config.ts @@ -117,6 +117,7 @@ export type AIProvider = | "AzureOpenAI" | "TogetherAI" | "Custom" + | "BudibaseAI" export interface ProviderConfig { provider: AIProvider From 11d95df20eabdf56b38a0e50bd5c8ed1b3539ad8 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Wed, 26 Mar 2025 11:44:07 +0000 Subject: [PATCH 02/11] Budibase AI self-host changes. --- packages/pro | 2 +- packages/server/scripts/dev/manage.js | 1 + packages/types/src/api/web/ai.ts | 13 +++++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/packages/pro b/packages/pro index d15384e81c..200f84cfe8 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit d15384e81cf985ca04a514bea0bc9d091b6c482e +Subproject commit 200f84cfe865e482a386c7062ee40ec1812446a3 diff --git a/packages/server/scripts/dev/manage.js b/packages/server/scripts/dev/manage.js index a07fa1b582..a5af8650ef 100644 --- a/packages/server/scripts/dev/manage.js +++ b/packages/server/scripts/dev/manage.js @@ -47,6 +47,7 @@ async function init() { VERSION: "0.0.0+local", PASSWORD_MIN_LENGTH: "1", OPENAI_API_KEY: "sk-abcdefghijklmnopqrstuvwxyz1234567890abcd", + BUDICLOUD_URL: "https://budibaseqa.app", } config = { ...config, ...existingConfig } diff --git a/packages/types/src/api/web/ai.ts b/packages/types/src/api/web/ai.ts index 3962422b77..29eeff1040 100644 --- a/packages/types/src/api/web/ai.ts +++ b/packages/types/src/api/web/ai.ts @@ -1,5 +1,18 @@ import { EnrichedBinding } from "../../ui" +export interface Message { + role: "system" | "user" + content: string +} + +export interface ChatCompletionRequest { + messages: Message[] +} + +export interface ChatCompletionResponse { + message?: string +} + export interface GenerateJsRequest { prompt: string bindings?: EnrichedBinding[] From 52b059ce38ac07e690807967fa5a06127f60a120 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Mon, 31 Mar 2025 09:51:34 +0100 Subject: [PATCH 03/11] Update pro reference --- packages/pro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/pro b/packages/pro index 200f84cfe8..12798a298b 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit 200f84cfe865e482a386c7062ee40ec1812446a3 +Subproject commit 12798a298be77223f58545021963c224e71243b2 From edbad1df810ef2beec376cf57edef6790dbe173e Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Mon, 31 Mar 2025 15:31:41 +0100 Subject: [PATCH 04/11] Added a new suite of AI tests, replacing unit tests in pro package. --- packages/backend-core/src/configs/configs.ts | 3 + .../src/configs/tests/configs.spec.ts | 1 - packages/pro | 2 +- .../server/src/api/routes/tests/ai.spec.ts | 204 ++++++++++++++++++ .../server/src/api/routes/tests/row.spec.ts | 2 +- .../src/api/routes/tests/search.spec.ts | 2 +- .../src/api/routes/tests/viewV2.spec.ts | 2 +- .../automations/tests/steps/openai.spec.ts | 9 +- packages/server/src/tests/utilities/api/ai.ts | 41 ++++ .../server/src/tests/utilities/api/index.ts | 3 + .../src/tests/utilities/mocks/ai/anthropic.ts | 54 +++++ .../src/tests/utilities/mocks/ai/index.ts | 10 + .../tests/utilities/mocks/{ => ai}/openai.ts | 39 ++-- packages/types/src/api/web/ai.ts | 8 + 14 files changed, 351 insertions(+), 29 deletions(-) create mode 100644 packages/server/src/api/routes/tests/ai.spec.ts create mode 100644 packages/server/src/tests/utilities/api/ai.ts create mode 100644 packages/server/src/tests/utilities/mocks/ai/anthropic.ts create mode 100644 packages/server/src/tests/utilities/mocks/ai/index.ts rename packages/server/src/tests/utilities/mocks/{ => ai}/openai.ts (79%) diff --git a/packages/backend-core/src/configs/configs.ts b/packages/backend-core/src/configs/configs.ts index f184bf87df..3747fff82e 100644 --- a/packages/backend-core/src/configs/configs.ts +++ b/packages/backend-core/src/configs/configs.ts @@ -47,6 +47,9 @@ export async function getConfig( export async function save( config: Config ): Promise<{ id: string; rev: string }> { + if (!config._id) { + config._id = generateConfigID(config.type) + } const db = context.getGlobalDB() return db.put(config) } diff --git a/packages/backend-core/src/configs/tests/configs.spec.ts b/packages/backend-core/src/configs/tests/configs.spec.ts index 2c6a1948ec..5b5186109c 100644 --- a/packages/backend-core/src/configs/tests/configs.spec.ts +++ b/packages/backend-core/src/configs/tests/configs.spec.ts @@ -12,7 +12,6 @@ describe("configs", () => { const setDbPlatformUrl = async (dbUrl: string) => { const settingsConfig = { - _id: configs.generateConfigID(ConfigType.SETTINGS), type: ConfigType.SETTINGS, config: { platformUrl: dbUrl, diff --git a/packages/pro b/packages/pro index 12798a298b..49f3b848fc 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit 12798a298be77223f58545021963c224e71243b2 +Subproject commit 49f3b848fcf76c937917849005184db25dcf07c7 diff --git a/packages/server/src/api/routes/tests/ai.spec.ts b/packages/server/src/api/routes/tests/ai.spec.ts new file mode 100644 index 0000000000..a4843c659b --- /dev/null +++ b/packages/server/src/api/routes/tests/ai.spec.ts @@ -0,0 +1,204 @@ +import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai" +import TestConfiguration from "../../../tests/utilities/TestConfiguration" +import nock from "nock" +import { configs, features, setEnv } from "@budibase/backend-core" +import { AIInnerConfig, ConfigType, ProviderConfig } from "@budibase/types" +import { context } from "@budibase/backend-core" +import { mocks } from "@budibase/backend-core/tests" +import { MockLLMResponseFn } from "../../../tests/utilities/mocks/ai" +import { mockAnthropicResponse } from "../../../tests/utilities/mocks/ai/anthropic" + +function dedent(str: string) { + return str + .split("\n") + .map(line => line.trim()) + .join("\n") +} + +type SetupFn = ( + config: TestConfiguration +) => Promise<() => Promise | void> +interface TestSetup { + name: string + setup: SetupFn + mockLLMResponse: MockLLMResponseFn +} + +function budibaseAI(): SetupFn { + return async () => { + const cleanup = setEnv({ + OPENAI_API_KEY: "test-key", + }) + mocks.licenses.useBudibaseAI() + return async () => { + mocks.licenses.useCloudFree() + cleanup() + } + } +} + +function customAIConfig(providerConfig: Partial): SetupFn { + return async (config: TestConfiguration) => { + mocks.licenses.useAICustomConfigs() + + const innerConfig: AIInnerConfig = { + myaiconfig: { + provider: "OpenAI", + name: "OpenAI", + apiKey: "test-key", + defaultModel: "gpt-4o-mini", + active: true, + isDefault: true, + ...providerConfig, + }, + } + + const { id, rev } = await config.doInTenant( + async () => + await configs.save({ + type: ConfigType.AI, + config: innerConfig, + }) + ) + + return async () => { + mocks.licenses.useCloudFree() + + await config.doInTenant(async () => { + const db = context.getGlobalDB() + await db.remove(id, rev) + }) + } + } +} + +const providers: TestSetup[] = [ + { + name: "OpenAI API key", + setup: async () => { + return setEnv({ + OPENAI_API_KEY: "test-key", + }) + }, + mockLLMResponse: mockChatGPTResponse, + }, + { + name: "OpenAI API key with custom config", + setup: customAIConfig({ provider: "OpenAI", defaultModel: "gpt-4o-mini" }), + mockLLMResponse: mockChatGPTResponse, + }, + { + name: "Anthropic API key with custom config", + setup: customAIConfig({ + provider: "Anthropic", + defaultModel: "claude-3-5-sonnet-20240620", + }), + mockLLMResponse: mockAnthropicResponse, + }, + { + name: "BudibaseAI", + setup: budibaseAI(), + mockLLMResponse: mockChatGPTResponse, + }, +] + +describe("AI", () => { + const config = new TestConfiguration() + + beforeAll(async () => { + await config.init() + }) + + afterAll(() => { + config.end() + }) + + beforeEach(() => { + nock.cleanAll() + }) + + describe.each(providers)( + "provider: $name", + ({ setup, mockLLMResponse }: TestSetup) => { + let cleanup: () => Promise | void + beforeAll(async () => { + cleanup = await setup(config) + }) + + afterAll(async () => { + const maybePromise = cleanup() + if (maybePromise) { + await maybePromise + } + }) + + describe("POST /api/ai/js", () => { + let cleanup: () => void + beforeAll(() => { + cleanup = features.testutils.setFeatureFlags("*", { + AI_JS_GENERATION: true, + }) + }) + + afterAll(() => { + cleanup() + }) + + it("handles correct plain code response", async () => { + mockLLMResponse(`return 42`) + + const { code } = await config.api.ai.generateJs({ prompt: "test" }) + expect(code).toBe("return 42") + }) + + it("handles correct markdown code response", async () => { + mockLLMResponse( + dedent(` + \`\`\`js + return 42 + \`\`\` + `) + ) + + const { code } = await config.api.ai.generateJs({ prompt: "test" }) + expect(code).toBe("return 42") + }) + + it("handles multiple markdown code blocks returned", async () => { + mockLLMResponse( + dedent(` + This: + + \`\`\`js + return 42 + \`\`\` + + Or this: + + \`\`\`js + return 10 + \`\`\` + `) + ) + + const { code } = await config.api.ai.generateJs({ prompt: "test" }) + expect(code).toBe("return 42") + }) + + // TODO: handle when this happens + it.skip("handles no code response", async () => { + mockLLMResponse("I'm sorry, you're quite right, etc.") + const { code } = await config.api.ai.generateJs({ prompt: "test" }) + expect(code).toBe("") + }) + + it("handles LLM errors", async () => { + mockLLMResponse(() => { + throw new Error("LLM error") + }) + await config.api.ai.generateJs({ prompt: "test" }, { status: 500 }) + }) + }) + } + ) +}) diff --git a/packages/server/src/api/routes/tests/row.spec.ts b/packages/server/src/api/routes/tests/row.spec.ts index c55db8640c..3fb882ff2f 100644 --- a/packages/server/src/api/routes/tests/row.spec.ts +++ b/packages/server/src/api/routes/tests/row.spec.ts @@ -46,7 +46,7 @@ import { withEnv } from "../../../environment" import { JsTimeoutError } from "@budibase/string-templates" import { isDate } from "../../../utilities" import nock from "nock" -import { mockChatGPTResponse } from "../../../tests/utilities/mocks/openai" +import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai" const timestamp = new Date("2023-01-26T11:48:57.597Z").toISOString() tk.freeze(timestamp) diff --git a/packages/server/src/api/routes/tests/search.spec.ts b/packages/server/src/api/routes/tests/search.spec.ts index e115297ee9..7a7f388a2c 100644 --- a/packages/server/src/api/routes/tests/search.spec.ts +++ b/packages/server/src/api/routes/tests/search.spec.ts @@ -44,7 +44,7 @@ import { generator, structures, mocks } from "@budibase/backend-core/tests" import { DEFAULT_EMPLOYEE_TABLE_SCHEMA } from "../../../db/defaultData/datasource_bb_default" import { generateRowIdField } from "../../../integrations/utils" import { cloneDeep } from "lodash/fp" -import { mockChatGPTResponse } from "../../../tests/utilities/mocks/openai" +import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai" const descriptions = datasourceDescribe({ plus: true }) diff --git a/packages/server/src/api/routes/tests/viewV2.spec.ts b/packages/server/src/api/routes/tests/viewV2.spec.ts index ad41aa618c..bca7d16807 100644 --- a/packages/server/src/api/routes/tests/viewV2.spec.ts +++ b/packages/server/src/api/routes/tests/viewV2.spec.ts @@ -41,7 +41,7 @@ import { datasourceDescribe } from "../../../integrations/tests/utils" import merge from "lodash/merge" import { quotas } from "@budibase/pro" import { context, db, events, roles, setEnv } from "@budibase/backend-core" -import { mockChatGPTResponse } from "../../../tests/utilities/mocks/openai" +import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai" import nock from "nock" const descriptions = datasourceDescribe({ plus: true }) diff --git a/packages/server/src/automations/tests/steps/openai.spec.ts b/packages/server/src/automations/tests/steps/openai.spec.ts index a06c633e5e..2536b083d7 100644 --- a/packages/server/src/automations/tests/steps/openai.spec.ts +++ b/packages/server/src/automations/tests/steps/openai.spec.ts @@ -2,10 +2,7 @@ import { createAutomationBuilder } from "../utilities/AutomationTestBuilder" import { setEnv as setCoreEnv } from "@budibase/backend-core" import { Model, MonthlyQuotaName, QuotaUsageType } from "@budibase/types" import TestConfiguration from "../../..//tests/utilities/TestConfiguration" -import { - mockChatGPTError, - mockChatGPTResponse, -} from "../../../tests/utilities/mocks/openai" +import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai" import nock from "nock" import { mocks } from "@budibase/backend-core/tests" import { quotas } from "@budibase/pro" @@ -83,7 +80,9 @@ describe("test the openai action", () => { }) it("should present the correct error message when an error is thrown from the createChatCompletion call", async () => { - mockChatGPTError() + mockChatGPTResponse(() => { + throw new Error("oh no") + }) const result = await expectAIUsage(0, () => createAutomationBuilder(config) diff --git a/packages/server/src/tests/utilities/api/ai.ts b/packages/server/src/tests/utilities/api/ai.ts new file mode 100644 index 0000000000..ed37350872 --- /dev/null +++ b/packages/server/src/tests/utilities/api/ai.ts @@ -0,0 +1,41 @@ +import { + ChatCompletionRequest, + ChatCompletionResponse, + GenerateCronRequest, + GenerateCronResponse, + GenerateJsRequest, + GenerateJsResponse, +} from "@budibase/types" +import { Expectations, TestAPI } from "./base" + +export class AIAPI extends TestAPI { + generateJs = async ( + req: GenerateJsRequest, + expectations?: Expectations + ): Promise => { + return await this._post(`/api/ai/js`, { + body: req, + expectations, + }) + } + + generateCron = async ( + req: GenerateCronRequest, + expectations?: Expectations + ): Promise => { + return await this._post(`/api/ai/cron`, { + body: req, + expectations, + }) + } + + chat = async ( + req: ChatCompletionRequest, + expectations?: Expectations + ): Promise => { + return await this._post(`/api/ai/chat`, { + body: req, + expectations, + }) + } +} diff --git a/packages/server/src/tests/utilities/api/index.ts b/packages/server/src/tests/utilities/api/index.ts index ba99c2eca0..9c00b77b73 100644 --- a/packages/server/src/tests/utilities/api/index.ts +++ b/packages/server/src/tests/utilities/api/index.ts @@ -22,8 +22,10 @@ import { UserPublicAPI } from "./public/user" import { MiscAPI } from "./misc" import { OAuth2API } from "./oauth2" import { AssetsAPI } from "./assets" +import { AIAPI } from "./ai" export default class API { + ai: AIAPI application: ApplicationAPI attachment: AttachmentAPI automation: AutomationAPI @@ -52,6 +54,7 @@ export default class API { } constructor(config: TestConfiguration) { + this.ai = new AIAPI(config) this.application = new ApplicationAPI(config) this.attachment = new AttachmentAPI(config) this.automation = new AutomationAPI(config) diff --git a/packages/server/src/tests/utilities/mocks/ai/anthropic.ts b/packages/server/src/tests/utilities/mocks/ai/anthropic.ts new file mode 100644 index 0000000000..20f1e3cc0a --- /dev/null +++ b/packages/server/src/tests/utilities/mocks/ai/anthropic.ts @@ -0,0 +1,54 @@ +import AnthropicClient from "@anthropic-ai/sdk" +import nock from "nock" +import { MockLLMResponseFn, MockLLMResponseOpts } from "." + +let chatID = 1 +const SPACE_REGEX = /\s+/g + +export const mockAnthropicResponse: MockLLMResponseFn = ( + answer: string | ((prompt: string) => string), + opts?: MockLLMResponseOpts +) => { + return nock(opts?.host || "https://api.anthropic.com") + .post("/v1/messages") + .reply((uri: string, body: nock.Body) => { + const req = body as AnthropicClient.MessageCreateParamsNonStreaming + const prompt = req.messages[0].content + if (typeof prompt !== "string") { + throw new Error("Anthropic mock only supports string prompts") + } + + let content + if (typeof answer === "function") { + try { + content = answer(prompt) + } catch (e) { + return [ + 500, + { + message: "Error in mock response function", + error: e, + }, + ] + } + } else { + content = answer + } + + const resp: AnthropicClient.Messages.Message = { + id: `${chatID++}`, + type: "message", + role: "assistant", + model: req.model, + stop_reason: "end_turn", + usage: { + input_tokens: prompt.split(SPACE_REGEX).length, + output_tokens: content.split(SPACE_REGEX).length, + }, + stop_sequence: null, + content: [{ type: "text", text: content }], + } + return [200, resp] + }) + .persist() +} diff --git a/packages/server/src/tests/utilities/mocks/ai/index.ts b/packages/server/src/tests/utilities/mocks/ai/index.ts new file mode 100644 index 0000000000..87f8ce77be --- /dev/null +++ b/packages/server/src/tests/utilities/mocks/ai/index.ts @@ -0,0 +1,10 @@ +import { Scope } from "nock" + +export interface MockLLMResponseOpts { + host?: string +} + +export type MockLLMResponseFn = ( + answer: string | ((prompt: string) => string), + opts?: MockLLMResponseOpts +) => Scope diff --git a/packages/server/src/tests/utilities/mocks/openai.ts b/packages/server/src/tests/utilities/mocks/ai/openai.ts similarity index 79% rename from packages/server/src/tests/utilities/mocks/openai.ts rename to packages/server/src/tests/utilities/mocks/ai/openai.ts index 7fcc0c08fc..c0e07adcaa 100644 --- a/packages/server/src/tests/utilities/mocks/openai.ts +++ b/packages/server/src/tests/utilities/mocks/ai/openai.ts @@ -1,12 +1,9 @@ import nock from "nock" +import { MockLLMResponseFn, MockLLMResponseOpts } from "." let chatID = 1 const SPACE_REGEX = /\s+/g -interface MockChatGPTResponseOpts { - host?: string -} - interface Message { role: string content: string @@ -47,19 +44,30 @@ interface ChatCompletionResponse { usage: Usage } -export function mockChatGPTResponse( +export const mockChatGPTResponse: MockLLMResponseFn = ( answer: string | ((prompt: string) => string), - opts?: MockChatGPTResponseOpts -) { + opts?: MockLLMResponseOpts +) => { return nock(opts?.host || "https://api.openai.com") .post("/v1/chat/completions") - .reply(200, (uri: string, requestBody: ChatCompletionRequest) => { - const messages = requestBody.messages + .reply((uri: string, body: nock.Body) => { + const req = body as ChatCompletionRequest + const messages = req.messages const prompt = messages[0].content let content if (typeof answer === "function") { - content = answer(prompt) + try { + content = answer(prompt) + } catch (e) { + return [ + 500, + { + message: "Error in mock response function", + error: e, + }, + ] + } } else { content = answer } @@ -76,7 +84,7 @@ export function mockChatGPTResponse( id: `chatcmpl-${chatID}`, object: "chat.completion", created: Math.floor(Date.now() / 1000), - model: requestBody.model, + model: req.model, system_fingerprint: `fp_${chatID}`, choices: [ { @@ -97,14 +105,7 @@ export function mockChatGPTResponse( }, }, } - return response + return [200, response] }) .persist() } - -export function mockChatGPTError() { - return nock("https://api.openai.com") - .post("/v1/chat/completions") - .reply(500, "Internal Server Error") - .persist() -} diff --git a/packages/types/src/api/web/ai.ts b/packages/types/src/api/web/ai.ts index 29eeff1040..f9c587ca0b 100644 --- a/packages/types/src/api/web/ai.ts +++ b/packages/types/src/api/web/ai.ts @@ -21,3 +21,11 @@ export interface GenerateJsRequest { export interface GenerateJsResponse { code: string } + +export interface GenerateCronRequest { + prompt: string +} + +export interface GenerateCronResponse { + message?: string +} From fbf188d82a50cd51ca37c071ee1772b68fc27b23 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Mon, 31 Mar 2025 16:07:24 +0100 Subject: [PATCH 05/11] Expand AI endpoint testing. --- packages/pro | 2 +- .../server/src/api/routes/tests/ai.spec.ts | 125 +++++++++++++++++- packages/server/src/tests/utilities/api/ai.ts | 8 +- 3 files changed, 130 insertions(+), 5 deletions(-) diff --git a/packages/pro b/packages/pro index 49f3b848fc..01d95a6808 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit 49f3b848fcf76c937917849005184db25dcf07c7 +Subproject commit 01d95a6808d751e4bf59c7c1c8bf5100b00b91be diff --git a/packages/server/src/api/routes/tests/ai.spec.ts b/packages/server/src/api/routes/tests/ai.spec.ts index a4843c659b..01aacb2dc4 100644 --- a/packages/server/src/api/routes/tests/ai.spec.ts +++ b/packages/server/src/api/routes/tests/ai.spec.ts @@ -1,8 +1,15 @@ import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai" import TestConfiguration from "../../../tests/utilities/TestConfiguration" import nock from "nock" -import { configs, features, setEnv } from "@budibase/backend-core" -import { AIInnerConfig, ConfigType, ProviderConfig } from "@budibase/types" +import { configs, env, features, setEnv } from "@budibase/backend-core" +import { + AIInnerConfig, + ConfigType, + License, + PlanModel, + PlanType, + ProviderConfig, +} from "@budibase/types" import { context } from "@budibase/backend-core" import { mocks } from "@budibase/backend-core/tests" import { MockLLMResponseFn } from "../../../tests/utilities/mocks/ai" @@ -22,6 +29,7 @@ interface TestSetup { name: string setup: SetupFn mockLLMResponse: MockLLMResponseFn + selfHostOnly?: boolean } function budibaseAI(): SetupFn { @@ -81,6 +89,7 @@ const providers: TestSetup[] = [ }) }, mockLLMResponse: mockChatGPTResponse, + selfHostOnly: true, }, { name: "OpenAI API key with custom config", @@ -119,7 +128,7 @@ describe("AI", () => { describe.each(providers)( "provider: $name", - ({ setup, mockLLMResponse }: TestSetup) => { + ({ setup, mockLLMResponse, selfHostOnly }: TestSetup) => { let cleanup: () => Promise | void beforeAll(async () => { cleanup = await setup(config) @@ -199,6 +208,116 @@ describe("AI", () => { await config.api.ai.generateJs({ prompt: "test" }, { status: 500 }) }) }) + + describe("POST /api/ai/cron", () => { + it("handles correct cron response", async () => { + mockLLMResponse("0 0 * * *") + + const { message } = await config.api.ai.generateCron({ + prompt: "test", + }) + expect(message).toBe("0 0 * * *") + }) + + it("handles expected LLM error", async () => { + mockLLMResponse("Error generating cron: skill issue") + + await config.api.ai.generateCron( + { + prompt: "test", + }, + { status: 400 } + ) + }) + + it("handles unexpected LLM error", async () => { + mockLLMResponse(() => { + throw new Error("LLM error") + }) + + await config.api.ai.generateCron( + { + prompt: "test", + }, + { status: 500 } + ) + }) + }) + + !selfHostOnly && + describe("POST /api/ai/chat", () => { + let cleanup: () => void + beforeAll(() => { + cleanup = setEnv({ SELF_HOSTED: false }) + }) + + afterAll(() => { + cleanup() + }) + + beforeEach(() => { + const license: License = { + plan: { + type: PlanType.FREE, + model: PlanModel.PER_USER, + usesInvoicing: false, + }, + features: [], + quotas: {} as any, + tenantId: config.tenantId, + } + nock(env.ACCOUNT_PORTAL_URL).get("/api/license").reply(200, license) + }) + + it("handles correct chat response", async () => { + mockLLMResponse("Hi there!") + const { message } = await config.api.ai.chat({ + messages: [{ role: "user", content: "Hello!" }], + licenseKey: "test-key", + }) + expect(message).toBe("Hi there!") + }) + + it("handles chat response error", async () => { + mockLLMResponse(() => { + throw new Error("LLM error") + }) + await config.api.ai.chat( + { + messages: [{ role: "user", content: "Hello!" }], + licenseKey: "test-key", + }, + { status: 500 } + ) + }) + + it("handles no license", async () => { + nock.cleanAll() + nock(env.ACCOUNT_PORTAL_URL).get("/api/license").reply(404) + await config.api.ai.chat( + { + messages: [{ role: "user", content: "Hello!" }], + licenseKey: "test-key", + }, + { + status: 403, + } + ) + }) + + it("handles no license key", async () => { + await config.api.ai.chat( + { + messages: [{ role: "user", content: "Hello!" }], + // @ts-expect-error - intentionally wrong + licenseKey: undefined, + }, + { + status: 403, + } + ) + }) + }) } ) }) diff --git a/packages/server/src/tests/utilities/api/ai.ts b/packages/server/src/tests/utilities/api/ai.ts index ed37350872..efaa321f09 100644 --- a/packages/server/src/tests/utilities/api/ai.ts +++ b/packages/server/src/tests/utilities/api/ai.ts @@ -7,6 +7,7 @@ import { GenerateJsResponse, } from "@budibase/types" import { Expectations, TestAPI } from "./base" +import { constants } from "@budibase/backend-core" export class AIAPI extends TestAPI { generateJs = async ( @@ -30,11 +31,16 @@ export class AIAPI extends TestAPI { } chat = async ( - req: ChatCompletionRequest, + req: ChatCompletionRequest & { licenseKey: string }, expectations?: Expectations ): Promise => { + const headers: Record = {} + if (req.licenseKey) { + headers[constants.Header.LICENSE_KEY] = req.licenseKey + } return await this._post(`/api/ai/chat`, { body: req, + headers, expectations, }) } From ec85267944a13edf44383d53a3eddfb56e7be60a Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Mon, 31 Mar 2025 16:10:09 +0100 Subject: [PATCH 06/11] Update pro reference --- packages/pro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/pro b/packages/pro index 01d95a6808..c22e0044f9 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit 01d95a6808d751e4bf59c7c1c8bf5100b00b91be +Subproject commit c22e0044f91e44773e7ebc817bf53ec867921c12 From 1c35bae0866040644356b26737ce44081de32477 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Mon, 31 Mar 2025 16:23:45 +0100 Subject: [PATCH 07/11] Update pro reference --- packages/pro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/pro b/packages/pro index c22e0044f9..9a4d9cb5bd 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit c22e0044f91e44773e7ebc817bf53ec867921c12 +Subproject commit 9a4d9cb5bd493963e5cbb8c474de415f7623ed33 From ec862fd13d389449d8a5e4c8c22a9bde38ca1727 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Mon, 31 Mar 2025 16:32:09 +0100 Subject: [PATCH 08/11] Add anthropic-ai/sdk to packages/server for AI mocks. --- packages/server/package.json | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/server/package.json b/packages/server/package.json index e9bf4bbf15..18b13cba90 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -49,6 +49,7 @@ "author": "Budibase", "license": "GPL-3.0", "dependencies": { + "@anthropic-ai/sdk": "^0.27.3", "@apidevtools/swagger-parser": "10.0.3", "@aws-sdk/client-dynamodb": "3.709.0", "@aws-sdk/client-s3": "3.709.0", From f9e10898be90f9e2197c785926f878de4b2ca686 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Mon, 31 Mar 2025 17:16:03 +0100 Subject: [PATCH 09/11] Fix tests. --- packages/pro | 2 +- packages/server/src/api/routes/tests/ai.spec.ts | 11 ++++++++--- .../server/src/tests/utilities/mocks/ai/anthropic.ts | 8 +------- .../server/src/tests/utilities/mocks/ai/openai.ts | 8 +------- 4 files changed, 11 insertions(+), 18 deletions(-) diff --git a/packages/pro b/packages/pro index 9a4d9cb5bd..2fdb7ccaf6 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit 9a4d9cb5bd493963e5cbb8c474de415f7623ed33 +Subproject commit 2fdb7ccaf64ac5a43c44042ccf2eb93423d3f269 diff --git a/packages/server/src/api/routes/tests/ai.spec.ts b/packages/server/src/api/routes/tests/ai.spec.ts index 01aacb2dc4..288ab888fd 100644 --- a/packages/server/src/api/routes/tests/ai.spec.ts +++ b/packages/server/src/api/routes/tests/ai.spec.ts @@ -246,13 +246,18 @@ describe("AI", () => { !selfHostOnly && describe("POST /api/ai/chat", () => { - let cleanup: () => void + let envCleanup: () => void + let featureCleanup: () => void beforeAll(() => { - cleanup = setEnv({ SELF_HOSTED: false }) + envCleanup = setEnv({ SELF_HOSTED: false }) + featureCleanup = features.testutils.setFeatureFlags("*", { + AI_JS_GENERATION: true, + }) }) afterAll(() => { - cleanup() + featureCleanup() + envCleanup() }) beforeEach(() => { diff --git a/packages/server/src/tests/utilities/mocks/ai/anthropic.ts b/packages/server/src/tests/utilities/mocks/ai/anthropic.ts index 20f1e3cc0a..ff0413aee1 100644 --- a/packages/server/src/tests/utilities/mocks/ai/anthropic.ts +++ b/packages/server/src/tests/utilities/mocks/ai/anthropic.ts @@ -23,13 +23,7 @@ export const mockAnthropicResponse: MockLLMResponseFn = ( try { content = answer(prompt) } catch (e) { - return [ - 500, - { - message: "Error in mock response function", - error: e, - }, - ] + return [500, "Internal Server Error"] } } else { content = answer diff --git a/packages/server/src/tests/utilities/mocks/ai/openai.ts b/packages/server/src/tests/utilities/mocks/ai/openai.ts index c0e07adcaa..827caad9be 100644 --- a/packages/server/src/tests/utilities/mocks/ai/openai.ts +++ b/packages/server/src/tests/utilities/mocks/ai/openai.ts @@ -60,13 +60,7 @@ export const mockChatGPTResponse: MockLLMResponseFn = ( try { content = answer(prompt) } catch (e) { - return [ - 500, - { - message: "Error in mock response function", - error: e, - }, - ] + return [500, "Internal Server Error"] } } else { content = answer From 1c51fb0ecfd4fe1a29ea518574be0ebc6c93abba Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Mon, 31 Mar 2025 17:26:32 +0100 Subject: [PATCH 10/11] Fix tests (again). --- .../src/automations/tests/steps/openai.spec.ts | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/packages/server/src/automations/tests/steps/openai.spec.ts b/packages/server/src/automations/tests/steps/openai.spec.ts index 2536b083d7..3ad03eb1b2 100644 --- a/packages/server/src/automations/tests/steps/openai.spec.ts +++ b/packages/server/src/automations/tests/steps/openai.spec.ts @@ -1,5 +1,5 @@ import { createAutomationBuilder } from "../utilities/AutomationTestBuilder" -import { setEnv as setCoreEnv } from "@budibase/backend-core" +import { setEnv as setCoreEnv, withEnv } from "@budibase/backend-core" import { Model, MonthlyQuotaName, QuotaUsageType } from "@budibase/types" import TestConfiguration from "../../..//tests/utilities/TestConfiguration" import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai" @@ -107,11 +107,13 @@ describe("test the openai action", () => { // path, because we've enabled Budibase AI. The exact value depends on a // calculation we use to approximate cost. This uses Budibase's OpenAI API // key, so we charge users for it. - const result = await expectAIUsage(14, () => - createAutomationBuilder(config) - .onAppAction() - .openai({ model: Model.GPT_4O_MINI, prompt: "Hello, world" }) - .test({ fields: {} }) + const result = await withEnv({ SELF_HOSTED: false }, () => + expectAIUsage(14, () => + createAutomationBuilder(config) + .onAppAction() + .openai({ model: Model.GPT_4O_MINI, prompt: "Hello, world" }) + .test({ fields: {} }) + ) ) expect(result.steps[0].outputs.response).toEqual("This is a test") From 9f2b25fcf1668bf4156135119d13790c22bd7748 Mon Sep 17 00:00:00 2001 From: Sam Rose Date: Tue, 1 Apr 2025 09:23:45 +0100 Subject: [PATCH 11/11] Update pro reference --- packages/pro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/pro b/packages/pro index 2fdb7ccaf6..4417bceb24 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit 2fdb7ccaf64ac5a43c44042ccf2eb93423d3f269 +Subproject commit 4417bceb24eabdd9a8c1615fb83c4e6fe8c0c914