Add tests

This commit is contained in:
Adria Navarro 2025-04-16 12:17:22 +02:00
parent 083486f84b
commit a09427675f
4 changed files with 54 additions and 4 deletions

@ -1 +1 @@
Subproject commit 39c20eebc3baece4562abc0826779f20aac62267
Subproject commit 55ae1eab04d7bd8f7557428e609064003da9a178

View File

@ -12,7 +12,7 @@ import {
ProviderConfig,
} from "@budibase/types"
import { context } from "@budibase/backend-core"
import { mocks } from "@budibase/backend-core/tests"
import { generator, mocks } from "@budibase/backend-core/tests"
import { MockLLMResponseFn } from "../../../tests/utilities/mocks/ai"
import { mockAnthropicResponse } from "../../../tests/utilities/mocks/ai/anthropic"
import { quotas } from "@budibase/pro"
@ -285,7 +285,8 @@ describe("BudibaseAI", () => {
envCleanup()
})
beforeEach(() => {
beforeEach(async () => {
await config.newTenant()
nock.cleanAll()
const license: License = {
plan: {
@ -366,5 +367,43 @@ describe("BudibaseAI", () => {
}
)
})
it("handles text format", async () => {
let usage = await getQuotaUsage()
expect(usage._id).toBe(`quota_usage_${config.getTenantId()}`)
expect(usage.monthly.current.budibaseAICredits).toBe(0)
const gptResponse = generator.word()
mockChatGPTResponse(gptResponse, { format: "text" })
const { message } = await config.api.ai.chat({
messages: [{ role: "user", content: "Hello!" }],
format: "text",
licenseKey: licenseKey,
})
expect(message).toBe(gptResponse)
usage = await getQuotaUsage()
expect(usage.monthly.current.budibaseAICredits).toBeGreaterThan(0)
})
it("handles json format", async () => {
let usage = await getQuotaUsage()
expect(usage._id).toBe(`quota_usage_${config.getTenantId()}`)
expect(usage.monthly.current.budibaseAICredits).toBe(0)
const gptResponse = JSON.stringify({
[generator.word()]: generator.word(),
})
mockChatGPTResponse(gptResponse, { format: "json" })
const { message } = await config.api.ai.chat({
messages: [{ role: "user", content: "Hello!" }],
format: "json",
licenseKey: licenseKey,
})
expect(message).toBe(gptResponse)
usage = await getQuotaUsage()
expect(usage.monthly.current.budibaseAICredits).toBeGreaterThan(0)
})
})
})

View File

@ -1,7 +1,9 @@
import { ResponseFormat } from "@budibase/types"
import { Scope } from "nock"
export interface MockLLMResponseOpts {
host?: string
format?: ResponseFormat
}
export type MockLLMResponseFn = (

View File

@ -1,5 +1,7 @@
import nock from "nock"
import { MockLLMResponseFn, MockLLMResponseOpts } from "."
import _ from "lodash"
import { ai } from "@budibase/pro"
let chatID = 1
const SPACE_REGEX = /\s+/g
@ -48,8 +50,15 @@ export const mockChatGPTResponse: MockLLMResponseFn = (
answer: string | ((prompt: string) => string),
opts?: MockLLMResponseOpts
) => {
let body: any = undefined
if (opts?.format) {
body = _.matches({
response_format: ai.openai.parseResponseFormat(opts.format),
})
}
return nock(opts?.host || "https://api.openai.com")
.post("/v1/chat/completions")
.post("/v1/chat/completions", body)
.reply((uri: string, body: nock.Body) => {
const req = body as ChatCompletionRequest
const messages = req.messages