Merge pull request #15968 from Budibase/BUDI-9238/budibaseai-structuredoutputs

Support LLM formats and structured outputs
This commit is contained in:
Adria Navarro 2025-04-17 12:31:54 +02:00 committed by GitHub
commit 266f4f0bed
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 84 additions and 4 deletions

View File

@ -1,3 +1,4 @@
import { z } from "zod"
import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai" import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai"
import TestConfiguration from "../../../tests/utilities/TestConfiguration" import TestConfiguration from "../../../tests/utilities/TestConfiguration"
import nock from "nock" import nock from "nock"
@ -10,12 +11,13 @@ import {
PlanModel, PlanModel,
PlanType, PlanType,
ProviderConfig, ProviderConfig,
StructuredOutput,
} from "@budibase/types" } from "@budibase/types"
import { context } from "@budibase/backend-core" import { context } from "@budibase/backend-core"
import { mocks } from "@budibase/backend-core/tests" import { generator, mocks } from "@budibase/backend-core/tests"
import { ai, quotas } from "@budibase/pro"
import { MockLLMResponseFn } from "../../../tests/utilities/mocks/ai" import { MockLLMResponseFn } from "../../../tests/utilities/mocks/ai"
import { mockAnthropicResponse } from "../../../tests/utilities/mocks/ai/anthropic" import { mockAnthropicResponse } from "../../../tests/utilities/mocks/ai/anthropic"
import { quotas } from "@budibase/pro"
function dedent(str: string) { function dedent(str: string) {
return str return str
@ -285,7 +287,8 @@ describe("BudibaseAI", () => {
envCleanup() envCleanup()
}) })
beforeEach(() => { beforeEach(async () => {
await config.newTenant()
nock.cleanAll() nock.cleanAll()
const license: License = { const license: License = {
plan: { plan: {
@ -366,5 +369,66 @@ describe("BudibaseAI", () => {
} }
) )
}) })
it("handles text format", async () => {
let usage = await getQuotaUsage()
expect(usage._id).toBe(`quota_usage_${config.getTenantId()}`)
expect(usage.monthly.current.budibaseAICredits).toBe(0)
const gptResponse = generator.word()
mockChatGPTResponse(gptResponse, { format: "text" })
const { message } = await config.api.ai.chat({
messages: [{ role: "user", content: "Hello!" }],
format: "text",
licenseKey: licenseKey,
})
expect(message).toBe(gptResponse)
usage = await getQuotaUsage()
expect(usage.monthly.current.budibaseAICredits).toBeGreaterThan(0)
})
it("handles json format", async () => {
let usage = await getQuotaUsage()
expect(usage._id).toBe(`quota_usage_${config.getTenantId()}`)
expect(usage.monthly.current.budibaseAICredits).toBe(0)
const gptResponse = JSON.stringify({
[generator.word()]: generator.word(),
})
mockChatGPTResponse(gptResponse, { format: "json" })
const { message } = await config.api.ai.chat({
messages: [{ role: "user", content: "Hello!" }],
format: "json",
licenseKey: licenseKey,
})
expect(message).toBe(gptResponse)
usage = await getQuotaUsage()
expect(usage.monthly.current.budibaseAICredits).toBeGreaterThan(0)
})
it("handles structured outputs", async () => {
let usage = await getQuotaUsage()
expect(usage._id).toBe(`quota_usage_${config.getTenantId()}`)
expect(usage.monthly.current.budibaseAICredits).toBe(0)
const gptResponse = generator.guid()
const structuredOutput = generator.word() as unknown as StructuredOutput
ai.structuredOutputs[structuredOutput] = {
key: generator.word(),
validator: z.object({ name: z.string() }),
}
mockChatGPTResponse(gptResponse, { format: structuredOutput })
const { message } = await config.api.ai.chat({
messages: [{ role: "user", content: "Hello!" }],
format: structuredOutput,
licenseKey: licenseKey,
})
expect(message).toBe(gptResponse)
usage = await getQuotaUsage()
expect(usage.monthly.current.budibaseAICredits).toBeGreaterThan(0)
})
}) })
}) })

View File

@ -1,7 +1,9 @@
import { ResponseFormat } from "@budibase/types"
import { Scope } from "nock" import { Scope } from "nock"
export interface MockLLMResponseOpts { export interface MockLLMResponseOpts {
host?: string host?: string
format?: ResponseFormat
} }
export type MockLLMResponseFn = ( export type MockLLMResponseFn = (

View File

@ -1,5 +1,7 @@
import nock from "nock" import nock from "nock"
import { MockLLMResponseFn, MockLLMResponseOpts } from "." import { MockLLMResponseFn, MockLLMResponseOpts } from "."
import _ from "lodash"
import { ai } from "@budibase/pro"
let chatID = 1 let chatID = 1
const SPACE_REGEX = /\s+/g const SPACE_REGEX = /\s+/g
@ -48,8 +50,15 @@ export const mockChatGPTResponse: MockLLMResponseFn = (
answer: string | ((prompt: string) => string), answer: string | ((prompt: string) => string),
opts?: MockLLMResponseOpts opts?: MockLLMResponseOpts
) => { ) => {
let body: any = undefined
if (opts?.format) {
body = _.matches({
response_format: ai.openai.parseResponseFormat(opts.format),
})
}
return nock(opts?.host || "https://api.openai.com") return nock(opts?.host || "https://api.openai.com")
.post("/v1/chat/completions") .post("/v1/chat/completions", body)
.reply((uri: string, body: nock.Body) => { .reply((uri: string, body: nock.Body) => {
const req = body as ChatCompletionRequest const req = body as ChatCompletionRequest
const messages = req.messages const messages = req.messages

View File

@ -5,8 +5,13 @@ export interface Message {
content: string content: string
} }
export enum StructuredOutput {}
export type ResponseFormat = "text" | "json" | StructuredOutput
export interface ChatCompletionRequest { export interface ChatCompletionRequest {
messages: Message[] messages: Message[]
format?: ResponseFormat
} }
export interface ChatCompletionResponse { export interface ChatCompletionResponse {