Merge pull request #15973 from Budibase/revert-15960-remove-anthropic
Revert "Remove LLM options that aren't OpenAI."
This commit is contained in:
commit
438f471230
|
@ -1 +1 @@
|
|||
Subproject commit f27612865cd5f689b75b8f4e148293dff3b77bc4
|
||||
Subproject commit 6c9ccbb8a5737733448f6b0e23696de1ed343015
|
|
@ -13,6 +13,8 @@ import {
|
|||
} from "@budibase/types"
|
||||
import { context } from "@budibase/backend-core"
|
||||
import { mocks } from "@budibase/backend-core/tests"
|
||||
import { MockLLMResponseFn } from "../../../tests/utilities/mocks/ai"
|
||||
import { mockAnthropicResponse } from "../../../tests/utilities/mocks/ai/anthropic"
|
||||
import { quotas } from "@budibase/pro"
|
||||
|
||||
function dedent(str: string) {
|
||||
|
@ -28,6 +30,7 @@ type SetupFn = (
|
|||
interface TestSetup {
|
||||
name: string
|
||||
setup: SetupFn
|
||||
mockLLMResponse: MockLLMResponseFn
|
||||
}
|
||||
|
||||
function budibaseAI(): SetupFn {
|
||||
|
@ -86,14 +89,25 @@ const allProviders: TestSetup[] = [
|
|||
OPENAI_API_KEY: "test-key",
|
||||
})
|
||||
},
|
||||
mockLLMResponse: mockChatGPTResponse,
|
||||
},
|
||||
{
|
||||
name: "OpenAI API key with custom config",
|
||||
setup: customAIConfig({ provider: "OpenAI", defaultModel: "gpt-4o-mini" }),
|
||||
mockLLMResponse: mockChatGPTResponse,
|
||||
},
|
||||
{
|
||||
name: "Anthropic API key with custom config",
|
||||
setup: customAIConfig({
|
||||
provider: "Anthropic",
|
||||
defaultModel: "claude-3-5-sonnet-20240620",
|
||||
}),
|
||||
mockLLMResponse: mockAnthropicResponse,
|
||||
},
|
||||
{
|
||||
name: "BudibaseAI",
|
||||
setup: budibaseAI(),
|
||||
mockLLMResponse: mockChatGPTResponse,
|
||||
},
|
||||
]
|
||||
|
||||
|
@ -112,54 +126,56 @@ describe("AI", () => {
|
|||
nock.cleanAll()
|
||||
})
|
||||
|
||||
describe.each(allProviders)("provider: $name", ({ setup }: TestSetup) => {
|
||||
let cleanup: () => Promise<void> | void
|
||||
beforeAll(async () => {
|
||||
cleanup = await setup(config)
|
||||
})
|
||||
describe.each(allProviders)(
|
||||
"provider: $name",
|
||||
({ setup, mockLLMResponse }: TestSetup) => {
|
||||
let cleanup: () => Promise<void> | void
|
||||
beforeAll(async () => {
|
||||
cleanup = await setup(config)
|
||||
})
|
||||
|
||||
afterAll(async () => {
|
||||
const maybePromise = cleanup()
|
||||
if (maybePromise) {
|
||||
await maybePromise
|
||||
}
|
||||
})
|
||||
afterAll(async () => {
|
||||
const maybePromise = cleanup()
|
||||
if (maybePromise) {
|
||||
await maybePromise
|
||||
}
|
||||
})
|
||||
|
||||
describe("POST /api/ai/js", () => {
|
||||
let cleanup: () => void
|
||||
beforeAll(() => {
|
||||
cleanup = features.testutils.setFeatureFlags("*", {
|
||||
AI_JS_GENERATION: true,
|
||||
describe("POST /api/ai/js", () => {
|
||||
let cleanup: () => void
|
||||
beforeAll(() => {
|
||||
cleanup = features.testutils.setFeatureFlags("*", {
|
||||
AI_JS_GENERATION: true,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
afterAll(() => {
|
||||
cleanup()
|
||||
})
|
||||
afterAll(() => {
|
||||
cleanup()
|
||||
})
|
||||
|
||||
it("handles correct plain code response", async () => {
|
||||
mockChatGPTResponse(`return 42`)
|
||||
it("handles correct plain code response", async () => {
|
||||
mockLLMResponse(`return 42`)
|
||||
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
|
||||
it("handles correct markdown code response", async () => {
|
||||
mockChatGPTResponse(
|
||||
dedent(`
|
||||
it("handles correct markdown code response", async () => {
|
||||
mockLLMResponse(
|
||||
dedent(`
|
||||
\`\`\`js
|
||||
return 42
|
||||
\`\`\`
|
||||
`)
|
||||
)
|
||||
)
|
||||
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
|
||||
it("handles multiple markdown code blocks returned", async () => {
|
||||
mockChatGPTResponse(
|
||||
dedent(`
|
||||
it("handles multiple markdown code blocks returned", async () => {
|
||||
mockLLMResponse(
|
||||
dedent(`
|
||||
This:
|
||||
|
||||
\`\`\`js
|
||||
|
@ -172,62 +188,63 @@ describe("AI", () => {
|
|||
return 10
|
||||
\`\`\`
|
||||
`)
|
||||
)
|
||||
)
|
||||
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
|
||||
// TODO: handle when this happens
|
||||
it.skip("handles no code response", async () => {
|
||||
mockChatGPTResponse("I'm sorry, you're quite right, etc.")
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("")
|
||||
})
|
||||
|
||||
it("handles LLM errors", async () => {
|
||||
mockChatGPTResponse(() => {
|
||||
throw new Error("LLM error")
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
await config.api.ai.generateJs({ prompt: "test" }, { status: 500 })
|
||||
})
|
||||
})
|
||||
|
||||
describe("POST /api/ai/cron", () => {
|
||||
it("handles correct cron response", async () => {
|
||||
mockChatGPTResponse("0 0 * * *")
|
||||
|
||||
const { message } = await config.api.ai.generateCron({
|
||||
prompt: "test",
|
||||
// TODO: handle when this happens
|
||||
it.skip("handles no code response", async () => {
|
||||
mockLLMResponse("I'm sorry, you're quite right, etc.")
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("")
|
||||
})
|
||||
|
||||
it("handles LLM errors", async () => {
|
||||
mockLLMResponse(() => {
|
||||
throw new Error("LLM error")
|
||||
})
|
||||
await config.api.ai.generateJs({ prompt: "test" }, { status: 500 })
|
||||
})
|
||||
expect(message).toBe("0 0 * * *")
|
||||
})
|
||||
|
||||
it("handles expected LLM error", async () => {
|
||||
mockChatGPTResponse("Error generating cron: skill issue")
|
||||
describe("POST /api/ai/cron", () => {
|
||||
it("handles correct cron response", async () => {
|
||||
mockLLMResponse("0 0 * * *")
|
||||
|
||||
await config.api.ai.generateCron(
|
||||
{
|
||||
const { message } = await config.api.ai.generateCron({
|
||||
prompt: "test",
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
})
|
||||
|
||||
it("handles unexpected LLM error", async () => {
|
||||
mockChatGPTResponse(() => {
|
||||
throw new Error("LLM error")
|
||||
})
|
||||
expect(message).toBe("0 0 * * *")
|
||||
})
|
||||
|
||||
await config.api.ai.generateCron(
|
||||
{
|
||||
prompt: "test",
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
it("handles expected LLM error", async () => {
|
||||
mockLLMResponse("Error generating cron: skill issue")
|
||||
|
||||
await config.api.ai.generateCron(
|
||||
{
|
||||
prompt: "test",
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
})
|
||||
|
||||
it("handles unexpected LLM error", async () => {
|
||||
mockLLMResponse(() => {
|
||||
throw new Error("LLM error")
|
||||
})
|
||||
|
||||
await config.api.ai.generateCron(
|
||||
{
|
||||
prompt: "test",
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
describe("BudibaseAI", () => {
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
import AnthropicClient from "@anthropic-ai/sdk"
|
||||
import nock from "nock"
|
||||
import { MockLLMResponseFn, MockLLMResponseOpts } from "."
|
||||
|
||||
let chatID = 1
|
||||
const SPACE_REGEX = /\s+/g
|
||||
|
||||
export const mockAnthropicResponse: MockLLMResponseFn = (
|
||||
answer: string | ((prompt: string) => string),
|
||||
opts?: MockLLMResponseOpts
|
||||
) => {
|
||||
return nock(opts?.host || "https://api.anthropic.com")
|
||||
.post("/v1/messages")
|
||||
.reply((uri: string, body: nock.Body) => {
|
||||
const req = body as AnthropicClient.MessageCreateParamsNonStreaming
|
||||
const prompt = req.messages[0].content
|
||||
if (typeof prompt !== "string") {
|
||||
throw new Error("Anthropic mock only supports string prompts")
|
||||
}
|
||||
|
||||
let content
|
||||
if (typeof answer === "function") {
|
||||
try {
|
||||
content = answer(prompt)
|
||||
} catch (e) {
|
||||
return [500, "Internal Server Error"]
|
||||
}
|
||||
} else {
|
||||
content = answer
|
||||
}
|
||||
|
||||
const resp: AnthropicClient.Messages.Message = {
|
||||
id: `${chatID++}`,
|
||||
type: "message",
|
||||
role: "assistant",
|
||||
model: req.model,
|
||||
stop_reason: "end_turn",
|
||||
usage: {
|
||||
input_tokens: prompt.split(SPACE_REGEX).length,
|
||||
output_tokens: content.split(SPACE_REGEX).length,
|
||||
},
|
||||
stop_sequence: null,
|
||||
content: [{ type: "text", text: content }],
|
||||
}
|
||||
return [200, resp]
|
||||
})
|
||||
.persist()
|
||||
}
|
|
@ -111,7 +111,13 @@ export interface SCIMInnerConfig {
|
|||
|
||||
export interface SCIMConfig extends Config<SCIMInnerConfig> {}
|
||||
|
||||
export type AIProvider = "OpenAI" | "AzureOpenAI" | "BudibaseAI"
|
||||
export type AIProvider =
|
||||
| "OpenAI"
|
||||
| "Anthropic"
|
||||
| "AzureOpenAI"
|
||||
| "TogetherAI"
|
||||
| "Custom"
|
||||
| "BudibaseAI"
|
||||
|
||||
export interface ProviderConfig {
|
||||
provider: AIProvider
|
||||
|
|
Loading…
Reference in New Issue