Merge remote-tracking branch 'origin/master' into BUDI-9238/ai-table-generation-ui
This commit is contained in:
commit
c3ed609704
|
@ -1321,11 +1321,14 @@ const shouldReplaceBinding = (currentValue, from, convertTo, binding) => {
|
|||
// which are substrings of other words - e.g. a binding of `a` would turn
|
||||
// `hah` into `h[a]h` which is obviously wrong. To avoid this we can remove all
|
||||
// expanded versions of the binding to be replaced.
|
||||
const excludeExtensions = (string, binding) => {
|
||||
const excludeReadableExtensions = (string, binding) => {
|
||||
// Escape any special chars in the binding so we can treat it as a literal
|
||||
// string match in the regexes below
|
||||
const escaped = binding.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")
|
||||
// Regex to find prefixed bindings (e.g. exclude xfoo for foo)
|
||||
const regex1 = new RegExp(`[a-zA-Z0-9-_]+${binding}[a-zA-Z0-9-_]*`, "g")
|
||||
const regex1 = new RegExp(`[a-zA-Z0-9-_]+${escaped}[a-zA-Z0-9-_]*`, "g")
|
||||
// Regex to find prefixed bindings (e.g. exclude foox for foo)
|
||||
const regex2 = new RegExp(`[a-zA-Z0-9-_]*${binding}[a-zA-Z0-9-_]+`, "g")
|
||||
const regex2 = new RegExp(`[a-zA-Z0-9-_]*${escaped}[a-zA-Z0-9-_]+`, "g")
|
||||
const matches = [...string.matchAll(regex1), ...string.matchAll(regex2)]
|
||||
for (const match of matches) {
|
||||
string = string.replace(match[0], new Array(match[0].length + 1).join("*"))
|
||||
|
@ -1377,9 +1380,10 @@ const bindingReplacement = (
|
|||
// in the search, working from longest to shortest so always use best match first
|
||||
let searchString = newBoundValue
|
||||
for (let from of convertFromProps) {
|
||||
// Blank out all extensions of this string to avoid partial matches
|
||||
// If converting readable > runtime, blank out all extensions of this
|
||||
// string to avoid partial matches
|
||||
if (convertTo === "runtimeBinding") {
|
||||
searchString = excludeExtensions(searchString, from)
|
||||
searchString = excludeReadableExtensions(searchString, from)
|
||||
}
|
||||
const binding = bindableProperties.find(el => el[convertFrom] === from)
|
||||
if (
|
||||
|
|
|
@ -79,6 +79,20 @@ describe("Builder dataBinding", () => {
|
|||
runtimeBinding: "[location]",
|
||||
type: "context",
|
||||
},
|
||||
{
|
||||
category: "Bindings",
|
||||
icon: "Brackets",
|
||||
readableBinding: "foo.[bar]",
|
||||
runtimeBinding: "[foo].[qwe]",
|
||||
type: "context",
|
||||
},
|
||||
{
|
||||
category: "Bindings",
|
||||
icon: "Brackets",
|
||||
readableBinding: "foo.baz",
|
||||
runtimeBinding: "[foo].[baz]",
|
||||
type: "context",
|
||||
},
|
||||
]
|
||||
it("should convert a readable binding to a runtime one", () => {
|
||||
const textWithBindings = `Hello {{ Current User.firstName }}! The count is {{ Binding.count }}.`
|
||||
|
@ -102,6 +116,16 @@ describe("Builder dataBinding", () => {
|
|||
`location {{ _location Zlocation [location] locationZ _location_ }}`
|
||||
)
|
||||
})
|
||||
it("should handle special characters in the readable binding", () => {
|
||||
const textWithBindings = `{{ foo.baz }}`
|
||||
expect(
|
||||
readableToRuntimeBinding(
|
||||
bindableProperties,
|
||||
textWithBindings,
|
||||
"runtimeBinding"
|
||||
)
|
||||
).toEqual(`{{ [foo].[baz] }}`)
|
||||
})
|
||||
})
|
||||
|
||||
describe("updateReferencesInObject", () => {
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit f27612865cd5f689b75b8f4e148293dff3b77bc4
|
||||
Subproject commit 6c9ccbb8a5737733448f6b0e23696de1ed343015
|
|
@ -1,3 +1,4 @@
|
|||
import { z } from "zod"
|
||||
import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai"
|
||||
import TestConfiguration from "../../../tests/utilities/TestConfiguration"
|
||||
import nock from "nock"
|
||||
|
@ -10,10 +11,13 @@ import {
|
|||
PlanModel,
|
||||
PlanType,
|
||||
ProviderConfig,
|
||||
StructuredOutput,
|
||||
} from "@budibase/types"
|
||||
import { context } from "@budibase/backend-core"
|
||||
import { mocks } from "@budibase/backend-core/tests"
|
||||
import { quotas } from "@budibase/pro"
|
||||
import { generator, mocks } from "@budibase/backend-core/tests"
|
||||
import { ai, quotas } from "@budibase/pro"
|
||||
import { MockLLMResponseFn } from "../../../tests/utilities/mocks/ai"
|
||||
import { mockAnthropicResponse } from "../../../tests/utilities/mocks/ai/anthropic"
|
||||
|
||||
function dedent(str: string) {
|
||||
return str
|
||||
|
@ -28,6 +32,7 @@ type SetupFn = (
|
|||
interface TestSetup {
|
||||
name: string
|
||||
setup: SetupFn
|
||||
mockLLMResponse: MockLLMResponseFn
|
||||
}
|
||||
|
||||
function budibaseAI(): SetupFn {
|
||||
|
@ -86,14 +91,25 @@ const allProviders: TestSetup[] = [
|
|||
OPENAI_API_KEY: "test-key",
|
||||
})
|
||||
},
|
||||
mockLLMResponse: mockChatGPTResponse,
|
||||
},
|
||||
{
|
||||
name: "OpenAI API key with custom config",
|
||||
setup: customAIConfig({ provider: "OpenAI", defaultModel: "gpt-4o-mini" }),
|
||||
mockLLMResponse: mockChatGPTResponse,
|
||||
},
|
||||
{
|
||||
name: "Anthropic API key with custom config",
|
||||
setup: customAIConfig({
|
||||
provider: "Anthropic",
|
||||
defaultModel: "claude-3-5-sonnet-20240620",
|
||||
}),
|
||||
mockLLMResponse: mockAnthropicResponse,
|
||||
},
|
||||
{
|
||||
name: "BudibaseAI",
|
||||
setup: budibaseAI(),
|
||||
mockLLMResponse: mockChatGPTResponse,
|
||||
},
|
||||
]
|
||||
|
||||
|
@ -112,54 +128,56 @@ describe("AI", () => {
|
|||
nock.cleanAll()
|
||||
})
|
||||
|
||||
describe.each(allProviders)("provider: $name", ({ setup }: TestSetup) => {
|
||||
let cleanup: () => Promise<void> | void
|
||||
beforeAll(async () => {
|
||||
cleanup = await setup(config)
|
||||
})
|
||||
describe.each(allProviders)(
|
||||
"provider: $name",
|
||||
({ setup, mockLLMResponse }: TestSetup) => {
|
||||
let cleanup: () => Promise<void> | void
|
||||
beforeAll(async () => {
|
||||
cleanup = await setup(config)
|
||||
})
|
||||
|
||||
afterAll(async () => {
|
||||
const maybePromise = cleanup()
|
||||
if (maybePromise) {
|
||||
await maybePromise
|
||||
}
|
||||
})
|
||||
afterAll(async () => {
|
||||
const maybePromise = cleanup()
|
||||
if (maybePromise) {
|
||||
await maybePromise
|
||||
}
|
||||
})
|
||||
|
||||
describe("POST /api/ai/js", () => {
|
||||
let cleanup: () => void
|
||||
beforeAll(() => {
|
||||
cleanup = features.testutils.setFeatureFlags("*", {
|
||||
AI_JS_GENERATION: true,
|
||||
describe("POST /api/ai/js", () => {
|
||||
let cleanup: () => void
|
||||
beforeAll(() => {
|
||||
cleanup = features.testutils.setFeatureFlags("*", {
|
||||
AI_JS_GENERATION: true,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
afterAll(() => {
|
||||
cleanup()
|
||||
})
|
||||
afterAll(() => {
|
||||
cleanup()
|
||||
})
|
||||
|
||||
it("handles correct plain code response", async () => {
|
||||
mockChatGPTResponse(`return 42`)
|
||||
it("handles correct plain code response", async () => {
|
||||
mockLLMResponse(`return 42`)
|
||||
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
|
||||
it("handles correct markdown code response", async () => {
|
||||
mockChatGPTResponse(
|
||||
dedent(`
|
||||
it("handles correct markdown code response", async () => {
|
||||
mockLLMResponse(
|
||||
dedent(`
|
||||
\`\`\`js
|
||||
return 42
|
||||
\`\`\`
|
||||
`)
|
||||
)
|
||||
)
|
||||
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
|
||||
it("handles multiple markdown code blocks returned", async () => {
|
||||
mockChatGPTResponse(
|
||||
dedent(`
|
||||
it("handles multiple markdown code blocks returned", async () => {
|
||||
mockLLMResponse(
|
||||
dedent(`
|
||||
This:
|
||||
|
||||
\`\`\`js
|
||||
|
@ -172,62 +190,63 @@ describe("AI", () => {
|
|||
return 10
|
||||
\`\`\`
|
||||
`)
|
||||
)
|
||||
)
|
||||
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
|
||||
// TODO: handle when this happens
|
||||
it.skip("handles no code response", async () => {
|
||||
mockChatGPTResponse("I'm sorry, you're quite right, etc.")
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("")
|
||||
})
|
||||
|
||||
it("handles LLM errors", async () => {
|
||||
mockChatGPTResponse(() => {
|
||||
throw new Error("LLM error")
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
await config.api.ai.generateJs({ prompt: "test" }, { status: 500 })
|
||||
})
|
||||
})
|
||||
|
||||
describe("POST /api/ai/cron", () => {
|
||||
it("handles correct cron response", async () => {
|
||||
mockChatGPTResponse("0 0 * * *")
|
||||
|
||||
const { message } = await config.api.ai.generateCron({
|
||||
prompt: "test",
|
||||
// TODO: handle when this happens
|
||||
it.skip("handles no code response", async () => {
|
||||
mockLLMResponse("I'm sorry, you're quite right, etc.")
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("")
|
||||
})
|
||||
|
||||
it("handles LLM errors", async () => {
|
||||
mockLLMResponse(() => {
|
||||
throw new Error("LLM error")
|
||||
})
|
||||
await config.api.ai.generateJs({ prompt: "test" }, { status: 500 })
|
||||
})
|
||||
expect(message).toBe("0 0 * * *")
|
||||
})
|
||||
|
||||
it("handles expected LLM error", async () => {
|
||||
mockChatGPTResponse("Error generating cron: skill issue")
|
||||
describe("POST /api/ai/cron", () => {
|
||||
it("handles correct cron response", async () => {
|
||||
mockLLMResponse("0 0 * * *")
|
||||
|
||||
await config.api.ai.generateCron(
|
||||
{
|
||||
const { message } = await config.api.ai.generateCron({
|
||||
prompt: "test",
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
})
|
||||
|
||||
it("handles unexpected LLM error", async () => {
|
||||
mockChatGPTResponse(() => {
|
||||
throw new Error("LLM error")
|
||||
})
|
||||
expect(message).toBe("0 0 * * *")
|
||||
})
|
||||
|
||||
await config.api.ai.generateCron(
|
||||
{
|
||||
prompt: "test",
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
it("handles expected LLM error", async () => {
|
||||
mockLLMResponse("Error generating cron: skill issue")
|
||||
|
||||
await config.api.ai.generateCron(
|
||||
{
|
||||
prompt: "test",
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
})
|
||||
|
||||
it("handles unexpected LLM error", async () => {
|
||||
mockLLMResponse(() => {
|
||||
throw new Error("LLM error")
|
||||
})
|
||||
|
||||
await config.api.ai.generateCron(
|
||||
{
|
||||
prompt: "test",
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
describe("BudibaseAI", () => {
|
||||
|
@ -268,7 +287,8 @@ describe("BudibaseAI", () => {
|
|||
envCleanup()
|
||||
})
|
||||
|
||||
beforeEach(() => {
|
||||
beforeEach(async () => {
|
||||
await config.newTenant()
|
||||
nock.cleanAll()
|
||||
const license: License = {
|
||||
plan: {
|
||||
|
@ -349,5 +369,66 @@ describe("BudibaseAI", () => {
|
|||
}
|
||||
)
|
||||
})
|
||||
|
||||
it("handles text format", async () => {
|
||||
let usage = await getQuotaUsage()
|
||||
expect(usage._id).toBe(`quota_usage_${config.getTenantId()}`)
|
||||
expect(usage.monthly.current.budibaseAICredits).toBe(0)
|
||||
|
||||
const gptResponse = generator.word()
|
||||
mockChatGPTResponse(gptResponse, { format: "text" })
|
||||
const { message } = await config.api.ai.chat({
|
||||
messages: [{ role: "user", content: "Hello!" }],
|
||||
format: "text",
|
||||
licenseKey: licenseKey,
|
||||
})
|
||||
expect(message).toBe(gptResponse)
|
||||
|
||||
usage = await getQuotaUsage()
|
||||
expect(usage.monthly.current.budibaseAICredits).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
it("handles json format", async () => {
|
||||
let usage = await getQuotaUsage()
|
||||
expect(usage._id).toBe(`quota_usage_${config.getTenantId()}`)
|
||||
expect(usage.monthly.current.budibaseAICredits).toBe(0)
|
||||
|
||||
const gptResponse = JSON.stringify({
|
||||
[generator.word()]: generator.word(),
|
||||
})
|
||||
mockChatGPTResponse(gptResponse, { format: "json" })
|
||||
const { message } = await config.api.ai.chat({
|
||||
messages: [{ role: "user", content: "Hello!" }],
|
||||
format: "json",
|
||||
licenseKey: licenseKey,
|
||||
})
|
||||
expect(message).toBe(gptResponse)
|
||||
|
||||
usage = await getQuotaUsage()
|
||||
expect(usage.monthly.current.budibaseAICredits).toBeGreaterThan(0)
|
||||
})
|
||||
|
||||
it("handles structured outputs", async () => {
|
||||
let usage = await getQuotaUsage()
|
||||
expect(usage._id).toBe(`quota_usage_${config.getTenantId()}`)
|
||||
expect(usage.monthly.current.budibaseAICredits).toBe(0)
|
||||
|
||||
const gptResponse = generator.guid()
|
||||
const structuredOutput = generator.word() as unknown as StructuredOutput
|
||||
ai.structuredOutputs[structuredOutput] = {
|
||||
key: generator.word(),
|
||||
validator: z.object({ name: z.string() }),
|
||||
}
|
||||
mockChatGPTResponse(gptResponse, { format: structuredOutput })
|
||||
const { message } = await config.api.ai.chat({
|
||||
messages: [{ role: "user", content: "Hello!" }],
|
||||
format: structuredOutput,
|
||||
licenseKey: licenseKey,
|
||||
})
|
||||
expect(message).toBe(gptResponse)
|
||||
|
||||
usage = await getQuotaUsage()
|
||||
expect(usage.monthly.current.budibaseAICredits).toBeGreaterThan(0)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
import AnthropicClient from "@anthropic-ai/sdk"
|
||||
import nock from "nock"
|
||||
import { MockLLMResponseFn, MockLLMResponseOpts } from "."
|
||||
|
||||
let chatID = 1
|
||||
const SPACE_REGEX = /\s+/g
|
||||
|
||||
export const mockAnthropicResponse: MockLLMResponseFn = (
|
||||
answer: string | ((prompt: string) => string),
|
||||
opts?: MockLLMResponseOpts
|
||||
) => {
|
||||
return nock(opts?.host || "https://api.anthropic.com")
|
||||
.post("/v1/messages")
|
||||
.reply((uri: string, body: nock.Body) => {
|
||||
const req = body as AnthropicClient.MessageCreateParamsNonStreaming
|
||||
const prompt = req.messages[0].content
|
||||
if (typeof prompt !== "string") {
|
||||
throw new Error("Anthropic mock only supports string prompts")
|
||||
}
|
||||
|
||||
let content
|
||||
if (typeof answer === "function") {
|
||||
try {
|
||||
content = answer(prompt)
|
||||
} catch (e) {
|
||||
return [500, "Internal Server Error"]
|
||||
}
|
||||
} else {
|
||||
content = answer
|
||||
}
|
||||
|
||||
const resp: AnthropicClient.Messages.Message = {
|
||||
id: `${chatID++}`,
|
||||
type: "message",
|
||||
role: "assistant",
|
||||
model: req.model,
|
||||
stop_reason: "end_turn",
|
||||
usage: {
|
||||
input_tokens: prompt.split(SPACE_REGEX).length,
|
||||
output_tokens: content.split(SPACE_REGEX).length,
|
||||
},
|
||||
stop_sequence: null,
|
||||
content: [{ type: "text", text: content }],
|
||||
}
|
||||
return [200, resp]
|
||||
})
|
||||
.persist()
|
||||
}
|
|
@ -1,7 +1,9 @@
|
|||
import { ResponseFormat } from "@budibase/types"
|
||||
import { Scope } from "nock"
|
||||
|
||||
export interface MockLLMResponseOpts {
|
||||
host?: string
|
||||
format?: ResponseFormat
|
||||
}
|
||||
|
||||
export type MockLLMResponseFn = (
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
import nock from "nock"
|
||||
import { MockLLMResponseFn, MockLLMResponseOpts } from "."
|
||||
import _ from "lodash"
|
||||
import { ai } from "@budibase/pro"
|
||||
|
||||
let chatID = 1
|
||||
const SPACE_REGEX = /\s+/g
|
||||
|
@ -48,8 +50,15 @@ export const mockChatGPTResponse: MockLLMResponseFn = (
|
|||
answer: string | ((prompt: string) => string),
|
||||
opts?: MockLLMResponseOpts
|
||||
) => {
|
||||
let body: any = undefined
|
||||
|
||||
if (opts?.format) {
|
||||
body = _.matches({
|
||||
response_format: ai.openai.parseResponseFormat(opts.format),
|
||||
})
|
||||
}
|
||||
return nock(opts?.host || "https://api.openai.com")
|
||||
.post("/v1/chat/completions")
|
||||
.post("/v1/chat/completions", body)
|
||||
.reply((uri: string, body: nock.Body) => {
|
||||
const req = body as ChatCompletionRequest
|
||||
const messages = req.messages
|
||||
|
|
|
@ -5,8 +5,13 @@ export interface Message {
|
|||
content: string
|
||||
}
|
||||
|
||||
export enum StructuredOutput {}
|
||||
|
||||
export type ResponseFormat = "text" | "json" | StructuredOutput
|
||||
|
||||
export interface ChatCompletionRequest {
|
||||
messages: Message[]
|
||||
format?: ResponseFormat
|
||||
}
|
||||
|
||||
export interface ChatCompletionResponse {
|
||||
|
|
|
@ -111,7 +111,13 @@ export interface SCIMInnerConfig {
|
|||
|
||||
export interface SCIMConfig extends Config<SCIMInnerConfig> {}
|
||||
|
||||
export type AIProvider = "OpenAI" | "AzureOpenAI" | "BudibaseAI"
|
||||
export type AIProvider =
|
||||
| "OpenAI"
|
||||
| "Anthropic"
|
||||
| "AzureOpenAI"
|
||||
| "TogetherAI"
|
||||
| "Custom"
|
||||
| "BudibaseAI"
|
||||
|
||||
export interface ProviderConfig {
|
||||
provider: AIProvider
|
||||
|
|
Loading…
Reference in New Issue