From a09427675f4528ab70af2a069bff268e70e75677 Mon Sep 17 00:00:00 2001 From: Adria Navarro Date: Wed, 16 Apr 2025 12:17:22 +0200 Subject: [PATCH] Add tests --- packages/pro | 2 +- .../server/src/api/routes/tests/ai.spec.ts | 43 ++++++++++++++++++- .../src/tests/utilities/mocks/ai/index.ts | 2 + .../src/tests/utilities/mocks/ai/openai.ts | 11 ++++- 4 files changed, 54 insertions(+), 4 deletions(-) diff --git a/packages/pro b/packages/pro index 39c20eebc3..55ae1eab04 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit 39c20eebc3baece4562abc0826779f20aac62267 +Subproject commit 55ae1eab04d7bd8f7557428e609064003da9a178 diff --git a/packages/server/src/api/routes/tests/ai.spec.ts b/packages/server/src/api/routes/tests/ai.spec.ts index 9e041a619e..515a9b4f88 100644 --- a/packages/server/src/api/routes/tests/ai.spec.ts +++ b/packages/server/src/api/routes/tests/ai.spec.ts @@ -12,7 +12,7 @@ import { ProviderConfig, } from "@budibase/types" import { context } from "@budibase/backend-core" -import { mocks } from "@budibase/backend-core/tests" +import { generator, mocks } from "@budibase/backend-core/tests" import { MockLLMResponseFn } from "../../../tests/utilities/mocks/ai" import { mockAnthropicResponse } from "../../../tests/utilities/mocks/ai/anthropic" import { quotas } from "@budibase/pro" @@ -285,7 +285,8 @@ describe("BudibaseAI", () => { envCleanup() }) - beforeEach(() => { + beforeEach(async () => { + await config.newTenant() nock.cleanAll() const license: License = { plan: { @@ -366,5 +367,43 @@ describe("BudibaseAI", () => { } ) }) + + it("handles text format", async () => { + let usage = await getQuotaUsage() + expect(usage._id).toBe(`quota_usage_${config.getTenantId()}`) + expect(usage.monthly.current.budibaseAICredits).toBe(0) + + const gptResponse = generator.word() + mockChatGPTResponse(gptResponse, { format: "text" }) + const { message } = await config.api.ai.chat({ + messages: [{ role: "user", content: "Hello!" }], + format: "text", + licenseKey: licenseKey, + }) + expect(message).toBe(gptResponse) + + usage = await getQuotaUsage() + expect(usage.monthly.current.budibaseAICredits).toBeGreaterThan(0) + }) + + it("handles json format", async () => { + let usage = await getQuotaUsage() + expect(usage._id).toBe(`quota_usage_${config.getTenantId()}`) + expect(usage.monthly.current.budibaseAICredits).toBe(0) + + const gptResponse = JSON.stringify({ + [generator.word()]: generator.word(), + }) + mockChatGPTResponse(gptResponse, { format: "json" }) + const { message } = await config.api.ai.chat({ + messages: [{ role: "user", content: "Hello!" }], + format: "json", + licenseKey: licenseKey, + }) + expect(message).toBe(gptResponse) + + usage = await getQuotaUsage() + expect(usage.monthly.current.budibaseAICredits).toBeGreaterThan(0) + }) }) }) diff --git a/packages/server/src/tests/utilities/mocks/ai/index.ts b/packages/server/src/tests/utilities/mocks/ai/index.ts index 87f8ce77be..d7df6be44f 100644 --- a/packages/server/src/tests/utilities/mocks/ai/index.ts +++ b/packages/server/src/tests/utilities/mocks/ai/index.ts @@ -1,7 +1,9 @@ +import { ResponseFormat } from "@budibase/types" import { Scope } from "nock" export interface MockLLMResponseOpts { host?: string + format?: ResponseFormat } export type MockLLMResponseFn = ( diff --git a/packages/server/src/tests/utilities/mocks/ai/openai.ts b/packages/server/src/tests/utilities/mocks/ai/openai.ts index 827caad9be..3a9ac7f87a 100644 --- a/packages/server/src/tests/utilities/mocks/ai/openai.ts +++ b/packages/server/src/tests/utilities/mocks/ai/openai.ts @@ -1,5 +1,7 @@ import nock from "nock" import { MockLLMResponseFn, MockLLMResponseOpts } from "." +import _ from "lodash" +import { ai } from "@budibase/pro" let chatID = 1 const SPACE_REGEX = /\s+/g @@ -48,8 +50,15 @@ export const mockChatGPTResponse: MockLLMResponseFn = ( answer: string | ((prompt: string) => string), opts?: MockLLMResponseOpts ) => { + let body: any = undefined + + if (opts?.format) { + body = _.matches({ + response_format: ai.openai.parseResponseFormat(opts.format), + }) + } return nock(opts?.host || "https://api.openai.com") - .post("/v1/chat/completions") + .post("/v1/chat/completions", body) .reply((uri: string, body: nock.Body) => { const req = body as ChatCompletionRequest const messages = req.messages