Merge remote-tracking branch 'origin/master' into BUDI-9238/budibaseai-structuredoutputs

This commit is contained in:
Adria Navarro 2025-04-16 12:26:03 +02:00
commit 59b09b9396
11 changed files with 190 additions and 192 deletions

View File

@ -1,4 +1,4 @@
import { FieldType } from "@budibase/types"
import { FieldType, FormulaType } from "@budibase/types"
import { FIELDS } from "@/constants/backend"
import { tables } from "@/stores/builder"
import { get as svelteGet } from "svelte/store"
@ -8,7 +8,6 @@ import { makeReadableKeyPropSafe } from "@/dataBinding"
const MAX_DEPTH = 1
const TYPES_TO_SKIP = [
FieldType.FORMULA,
FieldType.AI,
FieldType.LONGFORM,
FieldType.SIGNATURE_SINGLE,
@ -17,6 +16,18 @@ const TYPES_TO_SKIP = [
FieldType.INTERNAL,
]
const shouldSkipFieldSchema = fieldSchema => {
// Skip some types always
if (TYPES_TO_SKIP.includes(fieldSchema.type)) {
return true
}
// Skip dynamic formula fields
return (
fieldSchema.type === FieldType.FORMULA &&
fieldSchema.formulaType === FormulaType.DYNAMIC
)
}
export function getBindings({
table,
path = null,
@ -32,7 +43,7 @@ export function getBindings({
// skip relationships after a certain depth and types which
// can't bind to
if (
TYPES_TO_SKIP.includes(schema.type) ||
shouldSkipFieldSchema(schema) ||
(isRelationship && depth >= MAX_DEPTH)
) {
continue

View File

@ -40,14 +40,27 @@
let switchOnAIModal: Modal
let addCreditsModal: Modal
const thresholdExpansionWidth = 350
$: accountPortalAccess = $auth?.user?.accountPortalAccess
$: accountPortal = $admin.accountPortalUrl
$: aiEnabled = $auth?.user?.llm
$: expanded = expandedOnly ? true : expanded
$: expanded =
expandedOnly ||
(parentWidth !== null && parentWidth > thresholdExpansionWidth)
? true
: expanded
$: creditsExceeded = $licensing.aiCreditsExceeded
$: disabled = suggestedCode !== null || !aiEnabled || creditsExceeded
$: if (expandedOnly) {
$: if (
expandedOnly ||
(expanded && parentWidth !== null && parentWidth > thresholdExpansionWidth)
) {
containerWidth = calculateExpandedWidth()
} else if (!expanded) {
containerWidth = "auto"
}
async function generateJs(prompt: string) {

View File

@ -443,7 +443,6 @@
const resizeObserver = new ResizeObserver(() => {
updateEditorWidth()
})
resizeObserver.observe(editorEle)
return () => {
resizeObserver.disconnect()
@ -469,7 +468,6 @@
{#if aiGenEnabled}
<AIGen
expandedOnly={true}
{bindings}
{value}
parentWidth={editorWidth}

View File

@ -1317,6 +1317,22 @@ const shouldReplaceBinding = (currentValue, from, convertTo, binding) => {
return !invalids.find(invalid => noSpaces?.includes(invalid))
}
// If converting readable to runtime we need to ensure we don't replace words
// which are substrings of other words - e.g. a binding of `a` would turn
// `hah` into `h[a]h` which is obviously wrong. To avoid this we can remove all
// expanded versions of the binding to be replaced.
const excludeExtensions = (string, binding) => {
// Regex to find prefixed bindings (e.g. exclude xfoo for foo)
const regex1 = new RegExp(`[a-zA-Z0-9-_]+${binding}[a-zA-Z0-9-_]*`, "g")
// Regex to find prefixed bindings (e.g. exclude foox for foo)
const regex2 = new RegExp(`[a-zA-Z0-9-_]*${binding}[a-zA-Z0-9-_]+`, "g")
const matches = [...string.matchAll(regex1), ...string.matchAll(regex2)]
for (const match of matches) {
string = string.replace(match[0], new Array(match[0].length + 1).join("*"))
}
return string
}
/**
* Utility function which replaces a string between given indices.
*/
@ -1361,6 +1377,10 @@ const bindingReplacement = (
// in the search, working from longest to shortest so always use best match first
let searchString = newBoundValue
for (let from of convertFromProps) {
// Blank out all extensions of this string to avoid partial matches
if (convertTo === "runtimeBinding") {
searchString = excludeExtensions(searchString, from)
}
const binding = bindableProperties.find(el => el[convertFrom] === from)
if (
isJS ||

View File

@ -72,6 +72,13 @@ describe("Builder dataBinding", () => {
runtimeBinding: "count",
type: "context",
},
{
category: "Bindings",
icon: "Brackets",
readableBinding: "location",
runtimeBinding: "[location]",
type: "context",
},
]
it("should convert a readable binding to a runtime one", () => {
const textWithBindings = `Hello {{ Current User.firstName }}! The count is {{ Binding.count }}.`
@ -83,6 +90,18 @@ describe("Builder dataBinding", () => {
)
).toEqual(`Hello {{ [user].[firstName] }}! The count is {{ count }}.`)
})
it("should not convert a partial match", () => {
const textWithBindings = `location {{ _location Zlocation location locationZ _location_ }}`
expect(
readableToRuntimeBinding(
bindableProperties,
textWithBindings,
"runtimeBinding"
)
).toEqual(
`location {{ _location Zlocation [location] locationZ _location_ }}`
)
})
})
describe("updateReferencesInObject", () => {

View File

@ -1,4 +1,4 @@
<script>
<script lang="ts">
import { Body, Label, Icon } from "@budibase/bbui"
import BudibaseLogo from "./logos/Budibase.svelte"
import OpenAILogo from "./logos/OpenAI.svelte"
@ -6,7 +6,7 @@
import TogetherAILogo from "./logos/TogetherAI.svelte"
import AzureOpenAILogo from "./logos/AzureOpenAI.svelte"
import { Providers } from "./constants"
import type { ProviderConfig } from "@budibase/types"
const logos = {
["Budibase AI"]: BudibaseLogo,
[Providers.OpenAI.name]: OpenAILogo,
@ -15,11 +15,11 @@
[Providers.AzureOpenAI.name]: AzureOpenAILogo,
}
export let config
export let disabled
export let config: ProviderConfig
export let disabled: boolean | null = null
export let editHandler
export let deleteHandler
export let editHandler: (() => void) | null
export let deleteHandler: (() => void) | null
</script>
<!-- svelte-ignore a11y-no-static-element-interactions -->

View File

@ -1,4 +1,4 @@
<script>
<script lang="ts">
import { onMount } from "svelte"
import {
Button,
@ -16,22 +16,23 @@
import { API } from "@/api"
import AIConfigModal from "./ConfigModal.svelte"
import AIConfigTile from "./AIConfigTile.svelte"
import {
type AIConfig,
ConfigType,
type ProviderConfig,
} from "@budibase/types"
const ConfigTypes = {
AI: "ai",
}
let modal
let fullAIConfig
let editingAIConfig = {}
let editingUuid
let modal: Modal
let fullAIConfig: AIConfig
let editingAIConfig: ProviderConfig | undefined
let editingUuid: string | undefined
$: isCloud = $admin.cloud
$: customAIConfigsEnabled = $licensing.customAIConfigsEnabled
async function fetchAIConfig() {
try {
fullAIConfig = await API.getConfig(ConfigTypes.AI)
fullAIConfig = (await API.getConfig(ConfigType.AI)) as AIConfig
} catch (error) {
notifications.error("Error fetching AI config")
}
@ -42,9 +43,9 @@
const id = editingUuid || Helpers.uuid()
// Creating first custom AI Config
if (!fullAIConfig) {
if (!fullAIConfig && editingAIConfig) {
fullAIConfig = {
type: ConfigTypes.AI,
type: ConfigType.AI,
config: {
[id]: editingAIConfig,
},
@ -54,7 +55,7 @@
delete fullAIConfig.config.budibase_ai
// unset the default value from other configs if default is set
if (editingAIConfig.isDefault) {
if (editingAIConfig?.isDefault) {
for (let key in fullAIConfig.config) {
if (key !== id) {
fullAIConfig.config[key].isDefault = false
@ -62,8 +63,10 @@
}
}
// Add new or update existing custom AI Config
fullAIConfig.config[id] = editingAIConfig
fullAIConfig.type = ConfigTypes.AI
if (editingAIConfig) {
fullAIConfig.config[id] = editingAIConfig
}
fullAIConfig.type = ConfigType.AI
}
try {
@ -72,7 +75,7 @@
} catch (error) {
notifications.error(
`Failed to save AI Configuration, reason: ${
error?.message || "Unknown"
error instanceof Error ? error.message : "Unknown"
}`
)
} finally {
@ -80,7 +83,7 @@
}
}
async function deleteConfig(key) {
async function deleteConfig(key: string) {
// We don't store the default BB AI config in the DB
delete fullAIConfig.config.budibase_ai
// Delete the configuration
@ -91,14 +94,16 @@
notifications.success(`Deleted config`)
} catch (error) {
notifications.error(
`Failed to delete config, reason: ${error?.message || "Unknown"}`
`Failed to delete config, reason: ${
error instanceof Error ? error.message : "Unknown"
}`
)
} finally {
await fetchAIConfig()
}
}
function editConfig(uuid) {
function editConfig(uuid: string) {
editingUuid = uuid
editingAIConfig = fullAIConfig?.config[editingUuid]
modal.show()
@ -136,7 +141,10 @@
</Tags>
{/if}
</div>
<Body>Configure your AI settings within this section:</Body>
<Body
>Connect an LLM to enable AI features. You can only enable one LLM at a
time.</Body
>
</Layout>
<Divider />
<div style={`opacity: ${customAIConfigsEnabled ? 1 : 0.5}`}>

@ -1 +1 @@
Subproject commit 55ae1eab04d7bd8f7557428e609064003da9a178
Subproject commit dc0cf7b47d4331332d7ead5580be0b834d74d91e

View File

@ -15,8 +15,6 @@ import {
} from "@budibase/types"
import { context } from "@budibase/backend-core"
import { generator, mocks } from "@budibase/backend-core/tests"
import { MockLLMResponseFn } from "../../../tests/utilities/mocks/ai"
import { mockAnthropicResponse } from "../../../tests/utilities/mocks/ai/anthropic"
import { ai, quotas } from "@budibase/pro"
function dedent(str: string) {
@ -32,7 +30,6 @@ type SetupFn = (
interface TestSetup {
name: string
setup: SetupFn
mockLLMResponse: MockLLMResponseFn
}
function budibaseAI(): SetupFn {
@ -91,25 +88,14 @@ const allProviders: TestSetup[] = [
OPENAI_API_KEY: "test-key",
})
},
mockLLMResponse: mockChatGPTResponse,
},
{
name: "OpenAI API key with custom config",
setup: customAIConfig({ provider: "OpenAI", defaultModel: "gpt-4o-mini" }),
mockLLMResponse: mockChatGPTResponse,
},
{
name: "Anthropic API key with custom config",
setup: customAIConfig({
provider: "Anthropic",
defaultModel: "claude-3-5-sonnet-20240620",
}),
mockLLMResponse: mockAnthropicResponse,
},
{
name: "BudibaseAI",
setup: budibaseAI(),
mockLLMResponse: mockChatGPTResponse,
},
]
@ -128,56 +114,54 @@ describe("AI", () => {
nock.cleanAll()
})
describe.each(allProviders)(
"provider: $name",
({ setup, mockLLMResponse }: TestSetup) => {
let cleanup: () => Promise<void> | void
beforeAll(async () => {
cleanup = await setup(config)
describe.each(allProviders)("provider: $name", ({ setup }: TestSetup) => {
let cleanup: () => Promise<void> | void
beforeAll(async () => {
cleanup = await setup(config)
})
afterAll(async () => {
const maybePromise = cleanup()
if (maybePromise) {
await maybePromise
}
})
describe("POST /api/ai/js", () => {
let cleanup: () => void
beforeAll(() => {
cleanup = features.testutils.setFeatureFlags("*", {
AI_JS_GENERATION: true,
})
})
afterAll(async () => {
const maybePromise = cleanup()
if (maybePromise) {
await maybePromise
}
afterAll(() => {
cleanup()
})
describe("POST /api/ai/js", () => {
let cleanup: () => void
beforeAll(() => {
cleanup = features.testutils.setFeatureFlags("*", {
AI_JS_GENERATION: true,
})
})
it("handles correct plain code response", async () => {
mockChatGPTResponse(`return 42`)
afterAll(() => {
cleanup()
})
const { code } = await config.api.ai.generateJs({ prompt: "test" })
expect(code).toBe("return 42")
})
it("handles correct plain code response", async () => {
mockLLMResponse(`return 42`)
const { code } = await config.api.ai.generateJs({ prompt: "test" })
expect(code).toBe("return 42")
})
it("handles correct markdown code response", async () => {
mockLLMResponse(
dedent(`
it("handles correct markdown code response", async () => {
mockChatGPTResponse(
dedent(`
\`\`\`js
return 42
\`\`\`
`)
)
)
const { code } = await config.api.ai.generateJs({ prompt: "test" })
expect(code).toBe("return 42")
})
const { code } = await config.api.ai.generateJs({ prompt: "test" })
expect(code).toBe("return 42")
})
it("handles multiple markdown code blocks returned", async () => {
mockLLMResponse(
dedent(`
it("handles multiple markdown code blocks returned", async () => {
mockChatGPTResponse(
dedent(`
This:
\`\`\`js
@ -190,63 +174,62 @@ describe("AI", () => {
return 10
\`\`\`
`)
)
)
const { code } = await config.api.ai.generateJs({ prompt: "test" })
expect(code).toBe("return 42")
})
// TODO: handle when this happens
it.skip("handles no code response", async () => {
mockLLMResponse("I'm sorry, you're quite right, etc.")
const { code } = await config.api.ai.generateJs({ prompt: "test" })
expect(code).toBe("")
})
it("handles LLM errors", async () => {
mockLLMResponse(() => {
throw new Error("LLM error")
})
await config.api.ai.generateJs({ prompt: "test" }, { status: 500 })
})
const { code } = await config.api.ai.generateJs({ prompt: "test" })
expect(code).toBe("return 42")
})
describe("POST /api/ai/cron", () => {
it("handles correct cron response", async () => {
mockLLMResponse("0 0 * * *")
// TODO: handle when this happens
it.skip("handles no code response", async () => {
mockChatGPTResponse("I'm sorry, you're quite right, etc.")
const { code } = await config.api.ai.generateJs({ prompt: "test" })
expect(code).toBe("")
})
const { message } = await config.api.ai.generateCron({
it("handles LLM errors", async () => {
mockChatGPTResponse(() => {
throw new Error("LLM error")
})
await config.api.ai.generateJs({ prompt: "test" }, { status: 500 })
})
})
describe("POST /api/ai/cron", () => {
it("handles correct cron response", async () => {
mockChatGPTResponse("0 0 * * *")
const { message } = await config.api.ai.generateCron({
prompt: "test",
})
expect(message).toBe("0 0 * * *")
})
it("handles expected LLM error", async () => {
mockChatGPTResponse("Error generating cron: skill issue")
await config.api.ai.generateCron(
{
prompt: "test",
})
expect(message).toBe("0 0 * * *")
})
it("handles expected LLM error", async () => {
mockLLMResponse("Error generating cron: skill issue")
await config.api.ai.generateCron(
{
prompt: "test",
},
{ status: 400 }
)
})
it("handles unexpected LLM error", async () => {
mockLLMResponse(() => {
throw new Error("LLM error")
})
await config.api.ai.generateCron(
{
prompt: "test",
},
{ status: 500 }
)
})
},
{ status: 400 }
)
})
}
)
it("handles unexpected LLM error", async () => {
mockChatGPTResponse(() => {
throw new Error("LLM error")
})
await config.api.ai.generateCron(
{
prompt: "test",
},
{ status: 500 }
)
})
})
})
})
describe("BudibaseAI", () => {

View File

@ -1,48 +0,0 @@
import AnthropicClient from "@anthropic-ai/sdk"
import nock from "nock"
import { MockLLMResponseFn, MockLLMResponseOpts } from "."
let chatID = 1
const SPACE_REGEX = /\s+/g
export const mockAnthropicResponse: MockLLMResponseFn = (
answer: string | ((prompt: string) => string),
opts?: MockLLMResponseOpts
) => {
return nock(opts?.host || "https://api.anthropic.com")
.post("/v1/messages")
.reply((uri: string, body: nock.Body) => {
const req = body as AnthropicClient.MessageCreateParamsNonStreaming
const prompt = req.messages[0].content
if (typeof prompt !== "string") {
throw new Error("Anthropic mock only supports string prompts")
}
let content
if (typeof answer === "function") {
try {
content = answer(prompt)
} catch (e) {
return [500, "Internal Server Error"]
}
} else {
content = answer
}
const resp: AnthropicClient.Messages.Message = {
id: `${chatID++}`,
type: "message",
role: "assistant",
model: req.model,
stop_reason: "end_turn",
usage: {
input_tokens: prompt.split(SPACE_REGEX).length,
output_tokens: content.split(SPACE_REGEX).length,
},
stop_sequence: null,
content: [{ type: "text", text: content }],
}
return [200, resp]
})
.persist()
}

View File

@ -111,13 +111,7 @@ export interface SCIMInnerConfig {
export interface SCIMConfig extends Config<SCIMInnerConfig> {}
export type AIProvider =
| "OpenAI"
| "Anthropic"
| "AzureOpenAI"
| "TogetherAI"
| "Custom"
| "BudibaseAI"
export type AIProvider = "OpenAI" | "AzureOpenAI" | "BudibaseAI"
export interface ProviderConfig {
provider: AIProvider