Merge branch 'master' into feature/view-calculations-static-formulas
This commit is contained in:
commit
089ed603bd
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"$schema": "node_modules/lerna/schemas/lerna-schema.json",
|
||||
"version": "3.8.0",
|
||||
"version": "3.8.1",
|
||||
"npmClient": "yarn",
|
||||
"concurrency": 20,
|
||||
"command": {
|
||||
|
|
|
@ -47,6 +47,9 @@ export async function getConfig<T extends Config>(
|
|||
export async function save(
|
||||
config: Config
|
||||
): Promise<{ id: string; rev: string }> {
|
||||
if (!config._id) {
|
||||
config._id = generateConfigID(config.type)
|
||||
}
|
||||
const db = context.getGlobalDB()
|
||||
return db.put(config)
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ describe("configs", () => {
|
|||
|
||||
const setDbPlatformUrl = async (dbUrl: string) => {
|
||||
const settingsConfig = {
|
||||
_id: configs.generateConfigID(ConfigType.SETTINGS),
|
||||
type: ConfigType.SETTINGS,
|
||||
config: {
|
||||
platformUrl: dbUrl,
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
import { API } from "@/api"
|
||||
import { onMount } from "svelte"
|
||||
import { sdk } from "@budibase/shared-core"
|
||||
import { getFormattedPlanName } from "@/helpers/planTitle"
|
||||
|
||||
$: license = $auth.user.license
|
||||
$: upgradeUrl = `${$admin.accountPortalUrl}/portal/upgrade`
|
||||
|
@ -260,7 +261,11 @@
|
|||
<Layout gap="XS" noPadding>
|
||||
<Heading size="XS">Plan</Heading>
|
||||
<Layout noPadding gap="S">
|
||||
<Body size="S">You are currently on the {license.plan.type} plan</Body>
|
||||
<Body size="S"
|
||||
>You are currently on the <b
|
||||
>{getFormattedPlanName(license.plan.type)}</b
|
||||
></Body
|
||||
>
|
||||
<div>
|
||||
<Body size="S"
|
||||
>If you purchase or update your plan on the account</Body
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 40c36f86584568d31abd6dd5b6b00dd3a458093f
|
||||
Subproject commit 4417bceb24eabdd9a8c1615fb83c4e6fe8c0c914
|
|
@ -49,6 +49,7 @@
|
|||
"author": "Budibase",
|
||||
"license": "GPL-3.0",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.27.3",
|
||||
"@apidevtools/swagger-parser": "10.0.3",
|
||||
"@aws-sdk/client-dynamodb": "3.709.0",
|
||||
"@aws-sdk/client-s3": "3.709.0",
|
||||
|
|
|
@ -47,6 +47,7 @@ async function init() {
|
|||
VERSION: "0.0.0+local",
|
||||
PASSWORD_MIN_LENGTH: "1",
|
||||
OPENAI_API_KEY: "sk-abcdefghijklmnopqrstuvwxyz1234567890abcd",
|
||||
BUDICLOUD_URL: "https://budibaseqa.app",
|
||||
}
|
||||
|
||||
config = { ...config, ...existingConfig }
|
||||
|
|
|
@ -0,0 +1,328 @@
|
|||
import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai"
|
||||
import TestConfiguration from "../../../tests/utilities/TestConfiguration"
|
||||
import nock from "nock"
|
||||
import { configs, env, features, setEnv } from "@budibase/backend-core"
|
||||
import {
|
||||
AIInnerConfig,
|
||||
ConfigType,
|
||||
License,
|
||||
PlanModel,
|
||||
PlanType,
|
||||
ProviderConfig,
|
||||
} from "@budibase/types"
|
||||
import { context } from "@budibase/backend-core"
|
||||
import { mocks } from "@budibase/backend-core/tests"
|
||||
import { MockLLMResponseFn } from "../../../tests/utilities/mocks/ai"
|
||||
import { mockAnthropicResponse } from "../../../tests/utilities/mocks/ai/anthropic"
|
||||
|
||||
function dedent(str: string) {
|
||||
return str
|
||||
.split("\n")
|
||||
.map(line => line.trim())
|
||||
.join("\n")
|
||||
}
|
||||
|
||||
type SetupFn = (
|
||||
config: TestConfiguration
|
||||
) => Promise<() => Promise<void> | void>
|
||||
interface TestSetup {
|
||||
name: string
|
||||
setup: SetupFn
|
||||
mockLLMResponse: MockLLMResponseFn
|
||||
selfHostOnly?: boolean
|
||||
}
|
||||
|
||||
function budibaseAI(): SetupFn {
|
||||
return async () => {
|
||||
const cleanup = setEnv({
|
||||
OPENAI_API_KEY: "test-key",
|
||||
})
|
||||
mocks.licenses.useBudibaseAI()
|
||||
return async () => {
|
||||
mocks.licenses.useCloudFree()
|
||||
cleanup()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function customAIConfig(providerConfig: Partial<ProviderConfig>): SetupFn {
|
||||
return async (config: TestConfiguration) => {
|
||||
mocks.licenses.useAICustomConfigs()
|
||||
|
||||
const innerConfig: AIInnerConfig = {
|
||||
myaiconfig: {
|
||||
provider: "OpenAI",
|
||||
name: "OpenAI",
|
||||
apiKey: "test-key",
|
||||
defaultModel: "gpt-4o-mini",
|
||||
active: true,
|
||||
isDefault: true,
|
||||
...providerConfig,
|
||||
},
|
||||
}
|
||||
|
||||
const { id, rev } = await config.doInTenant(
|
||||
async () =>
|
||||
await configs.save({
|
||||
type: ConfigType.AI,
|
||||
config: innerConfig,
|
||||
})
|
||||
)
|
||||
|
||||
return async () => {
|
||||
mocks.licenses.useCloudFree()
|
||||
|
||||
await config.doInTenant(async () => {
|
||||
const db = context.getGlobalDB()
|
||||
await db.remove(id, rev)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const providers: TestSetup[] = [
|
||||
{
|
||||
name: "OpenAI API key",
|
||||
setup: async () => {
|
||||
return setEnv({
|
||||
OPENAI_API_KEY: "test-key",
|
||||
})
|
||||
},
|
||||
mockLLMResponse: mockChatGPTResponse,
|
||||
selfHostOnly: true,
|
||||
},
|
||||
{
|
||||
name: "OpenAI API key with custom config",
|
||||
setup: customAIConfig({ provider: "OpenAI", defaultModel: "gpt-4o-mini" }),
|
||||
mockLLMResponse: mockChatGPTResponse,
|
||||
},
|
||||
{
|
||||
name: "Anthropic API key with custom config",
|
||||
setup: customAIConfig({
|
||||
provider: "Anthropic",
|
||||
defaultModel: "claude-3-5-sonnet-20240620",
|
||||
}),
|
||||
mockLLMResponse: mockAnthropicResponse,
|
||||
},
|
||||
{
|
||||
name: "BudibaseAI",
|
||||
setup: budibaseAI(),
|
||||
mockLLMResponse: mockChatGPTResponse,
|
||||
},
|
||||
]
|
||||
|
||||
describe("AI", () => {
|
||||
const config = new TestConfiguration()
|
||||
|
||||
beforeAll(async () => {
|
||||
await config.init()
|
||||
})
|
||||
|
||||
afterAll(() => {
|
||||
config.end()
|
||||
})
|
||||
|
||||
beforeEach(() => {
|
||||
nock.cleanAll()
|
||||
})
|
||||
|
||||
describe.each(providers)(
|
||||
"provider: $name",
|
||||
({ setup, mockLLMResponse, selfHostOnly }: TestSetup) => {
|
||||
let cleanup: () => Promise<void> | void
|
||||
beforeAll(async () => {
|
||||
cleanup = await setup(config)
|
||||
})
|
||||
|
||||
afterAll(async () => {
|
||||
const maybePromise = cleanup()
|
||||
if (maybePromise) {
|
||||
await maybePromise
|
||||
}
|
||||
})
|
||||
|
||||
describe("POST /api/ai/js", () => {
|
||||
let cleanup: () => void
|
||||
beforeAll(() => {
|
||||
cleanup = features.testutils.setFeatureFlags("*", {
|
||||
AI_JS_GENERATION: true,
|
||||
})
|
||||
})
|
||||
|
||||
afterAll(() => {
|
||||
cleanup()
|
||||
})
|
||||
|
||||
it("handles correct plain code response", async () => {
|
||||
mockLLMResponse(`return 42`)
|
||||
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
|
||||
it("handles correct markdown code response", async () => {
|
||||
mockLLMResponse(
|
||||
dedent(`
|
||||
\`\`\`js
|
||||
return 42
|
||||
\`\`\`
|
||||
`)
|
||||
)
|
||||
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
|
||||
it("handles multiple markdown code blocks returned", async () => {
|
||||
mockLLMResponse(
|
||||
dedent(`
|
||||
This:
|
||||
|
||||
\`\`\`js
|
||||
return 42
|
||||
\`\`\`
|
||||
|
||||
Or this:
|
||||
|
||||
\`\`\`js
|
||||
return 10
|
||||
\`\`\`
|
||||
`)
|
||||
)
|
||||
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("return 42")
|
||||
})
|
||||
|
||||
// TODO: handle when this happens
|
||||
it.skip("handles no code response", async () => {
|
||||
mockLLMResponse("I'm sorry, you're quite right, etc.")
|
||||
const { code } = await config.api.ai.generateJs({ prompt: "test" })
|
||||
expect(code).toBe("")
|
||||
})
|
||||
|
||||
it("handles LLM errors", async () => {
|
||||
mockLLMResponse(() => {
|
||||
throw new Error("LLM error")
|
||||
})
|
||||
await config.api.ai.generateJs({ prompt: "test" }, { status: 500 })
|
||||
})
|
||||
})
|
||||
|
||||
describe("POST /api/ai/cron", () => {
|
||||
it("handles correct cron response", async () => {
|
||||
mockLLMResponse("0 0 * * *")
|
||||
|
||||
const { message } = await config.api.ai.generateCron({
|
||||
prompt: "test",
|
||||
})
|
||||
expect(message).toBe("0 0 * * *")
|
||||
})
|
||||
|
||||
it("handles expected LLM error", async () => {
|
||||
mockLLMResponse("Error generating cron: skill issue")
|
||||
|
||||
await config.api.ai.generateCron(
|
||||
{
|
||||
prompt: "test",
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
})
|
||||
|
||||
it("handles unexpected LLM error", async () => {
|
||||
mockLLMResponse(() => {
|
||||
throw new Error("LLM error")
|
||||
})
|
||||
|
||||
await config.api.ai.generateCron(
|
||||
{
|
||||
prompt: "test",
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
!selfHostOnly &&
|
||||
describe("POST /api/ai/chat", () => {
|
||||
let envCleanup: () => void
|
||||
let featureCleanup: () => void
|
||||
beforeAll(() => {
|
||||
envCleanup = setEnv({ SELF_HOSTED: false })
|
||||
featureCleanup = features.testutils.setFeatureFlags("*", {
|
||||
AI_JS_GENERATION: true,
|
||||
})
|
||||
})
|
||||
|
||||
afterAll(() => {
|
||||
featureCleanup()
|
||||
envCleanup()
|
||||
})
|
||||
|
||||
beforeEach(() => {
|
||||
const license: License = {
|
||||
plan: {
|
||||
type: PlanType.FREE,
|
||||
model: PlanModel.PER_USER,
|
||||
usesInvoicing: false,
|
||||
},
|
||||
features: [],
|
||||
quotas: {} as any,
|
||||
tenantId: config.tenantId,
|
||||
}
|
||||
nock(env.ACCOUNT_PORTAL_URL).get("/api/license").reply(200, license)
|
||||
})
|
||||
|
||||
it("handles correct chat response", async () => {
|
||||
mockLLMResponse("Hi there!")
|
||||
const { message } = await config.api.ai.chat({
|
||||
messages: [{ role: "user", content: "Hello!" }],
|
||||
licenseKey: "test-key",
|
||||
})
|
||||
expect(message).toBe("Hi there!")
|
||||
})
|
||||
|
||||
it("handles chat response error", async () => {
|
||||
mockLLMResponse(() => {
|
||||
throw new Error("LLM error")
|
||||
})
|
||||
await config.api.ai.chat(
|
||||
{
|
||||
messages: [{ role: "user", content: "Hello!" }],
|
||||
licenseKey: "test-key",
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
})
|
||||
|
||||
it("handles no license", async () => {
|
||||
nock.cleanAll()
|
||||
nock(env.ACCOUNT_PORTAL_URL).get("/api/license").reply(404)
|
||||
await config.api.ai.chat(
|
||||
{
|
||||
messages: [{ role: "user", content: "Hello!" }],
|
||||
licenseKey: "test-key",
|
||||
},
|
||||
{
|
||||
status: 403,
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it("handles no license key", async () => {
|
||||
await config.api.ai.chat(
|
||||
{
|
||||
messages: [{ role: "user", content: "Hello!" }],
|
||||
// @ts-expect-error - intentionally wrong
|
||||
licenseKey: undefined,
|
||||
},
|
||||
{
|
||||
status: 403,
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
)
|
||||
})
|
|
@ -46,7 +46,7 @@ import { withEnv } from "../../../environment"
|
|||
import { JsTimeoutError } from "@budibase/string-templates"
|
||||
import { isDate } from "../../../utilities"
|
||||
import nock from "nock"
|
||||
import { mockChatGPTResponse } from "../../../tests/utilities/mocks/openai"
|
||||
import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai"
|
||||
|
||||
const timestamp = new Date("2023-01-26T11:48:57.597Z").toISOString()
|
||||
tk.freeze(timestamp)
|
||||
|
|
|
@ -44,7 +44,7 @@ import { generator, structures, mocks } from "@budibase/backend-core/tests"
|
|||
import { DEFAULT_EMPLOYEE_TABLE_SCHEMA } from "../../../db/defaultData/datasource_bb_default"
|
||||
import { generateRowIdField } from "../../../integrations/utils"
|
||||
import { cloneDeep } from "lodash/fp"
|
||||
import { mockChatGPTResponse } from "../../../tests/utilities/mocks/openai"
|
||||
import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai"
|
||||
|
||||
const descriptions = datasourceDescribe({ plus: true })
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ import { datasourceDescribe } from "../../../integrations/tests/utils"
|
|||
import merge from "lodash/merge"
|
||||
import { quotas } from "@budibase/pro"
|
||||
import { context, db, events, roles, setEnv } from "@budibase/backend-core"
|
||||
import { mockChatGPTResponse } from "../../../tests/utilities/mocks/openai"
|
||||
import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai"
|
||||
import nock from "nock"
|
||||
|
||||
const descriptions = datasourceDescribe({ plus: true })
|
||||
|
|
|
@ -1,11 +1,8 @@
|
|||
import { createAutomationBuilder } from "../utilities/AutomationTestBuilder"
|
||||
import { setEnv as setCoreEnv } from "@budibase/backend-core"
|
||||
import { setEnv as setCoreEnv, withEnv } from "@budibase/backend-core"
|
||||
import { Model, MonthlyQuotaName, QuotaUsageType } from "@budibase/types"
|
||||
import TestConfiguration from "../../..//tests/utilities/TestConfiguration"
|
||||
import {
|
||||
mockChatGPTError,
|
||||
mockChatGPTResponse,
|
||||
} from "../../../tests/utilities/mocks/openai"
|
||||
import { mockChatGPTResponse } from "../../../tests/utilities/mocks/ai/openai"
|
||||
import nock from "nock"
|
||||
import { mocks } from "@budibase/backend-core/tests"
|
||||
import { quotas } from "@budibase/pro"
|
||||
|
@ -83,7 +80,9 @@ describe("test the openai action", () => {
|
|||
})
|
||||
|
||||
it("should present the correct error message when an error is thrown from the createChatCompletion call", async () => {
|
||||
mockChatGPTError()
|
||||
mockChatGPTResponse(() => {
|
||||
throw new Error("oh no")
|
||||
})
|
||||
|
||||
const result = await expectAIUsage(0, () =>
|
||||
createAutomationBuilder(config)
|
||||
|
@ -108,11 +107,13 @@ describe("test the openai action", () => {
|
|||
// path, because we've enabled Budibase AI. The exact value depends on a
|
||||
// calculation we use to approximate cost. This uses Budibase's OpenAI API
|
||||
// key, so we charge users for it.
|
||||
const result = await expectAIUsage(14, () =>
|
||||
createAutomationBuilder(config)
|
||||
.onAppAction()
|
||||
.openai({ model: Model.GPT_4O_MINI, prompt: "Hello, world" })
|
||||
.test({ fields: {} })
|
||||
const result = await withEnv({ SELF_HOSTED: false }, () =>
|
||||
expectAIUsage(14, () =>
|
||||
createAutomationBuilder(config)
|
||||
.onAppAction()
|
||||
.openai({ model: Model.GPT_4O_MINI, prompt: "Hello, world" })
|
||||
.test({ fields: {} })
|
||||
)
|
||||
)
|
||||
|
||||
expect(result.steps[0].outputs.response).toEqual("This is a test")
|
||||
|
|
|
@ -365,7 +365,11 @@ export function createSampleDataTableScreen(): Screen {
|
|||
_component: "@budibase/standard-components/textv2",
|
||||
_styles: {
|
||||
normal: {
|
||||
"--grid-desktop-col-start": 1,
|
||||
"--grid-desktop-col-end": 3,
|
||||
"--grid-desktop-row-start": 1,
|
||||
"--grid-desktop-row-end": 3,
|
||||
"--grid-mobile-col-end": 7,
|
||||
},
|
||||
hover: {},
|
||||
active: {},
|
||||
|
@ -384,6 +388,7 @@ export function createSampleDataTableScreen(): Screen {
|
|||
"--grid-desktop-row-start": 1,
|
||||
"--grid-desktop-row-end": 3,
|
||||
"--grid-desktop-h-align": "end",
|
||||
"--grid-mobile-col-start": 7,
|
||||
},
|
||||
hover: {},
|
||||
active: {},
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
import {
|
||||
ChatCompletionRequest,
|
||||
ChatCompletionResponse,
|
||||
GenerateCronRequest,
|
||||
GenerateCronResponse,
|
||||
GenerateJsRequest,
|
||||
GenerateJsResponse,
|
||||
} from "@budibase/types"
|
||||
import { Expectations, TestAPI } from "./base"
|
||||
import { constants } from "@budibase/backend-core"
|
||||
|
||||
export class AIAPI extends TestAPI {
|
||||
generateJs = async (
|
||||
req: GenerateJsRequest,
|
||||
expectations?: Expectations
|
||||
): Promise<GenerateJsResponse> => {
|
||||
return await this._post<GenerateJsResponse>(`/api/ai/js`, {
|
||||
body: req,
|
||||
expectations,
|
||||
})
|
||||
}
|
||||
|
||||
generateCron = async (
|
||||
req: GenerateCronRequest,
|
||||
expectations?: Expectations
|
||||
): Promise<GenerateCronResponse> => {
|
||||
return await this._post<GenerateCronResponse>(`/api/ai/cron`, {
|
||||
body: req,
|
||||
expectations,
|
||||
})
|
||||
}
|
||||
|
||||
chat = async (
|
||||
req: ChatCompletionRequest & { licenseKey: string },
|
||||
expectations?: Expectations
|
||||
): Promise<ChatCompletionResponse> => {
|
||||
const headers: Record<string, string> = {}
|
||||
if (req.licenseKey) {
|
||||
headers[constants.Header.LICENSE_KEY] = req.licenseKey
|
||||
}
|
||||
return await this._post<ChatCompletionResponse>(`/api/ai/chat`, {
|
||||
body: req,
|
||||
headers,
|
||||
expectations,
|
||||
})
|
||||
}
|
||||
}
|
|
@ -22,8 +22,10 @@ import { UserPublicAPI } from "./public/user"
|
|||
import { MiscAPI } from "./misc"
|
||||
import { OAuth2API } from "./oauth2"
|
||||
import { AssetsAPI } from "./assets"
|
||||
import { AIAPI } from "./ai"
|
||||
|
||||
export default class API {
|
||||
ai: AIAPI
|
||||
application: ApplicationAPI
|
||||
attachment: AttachmentAPI
|
||||
automation: AutomationAPI
|
||||
|
@ -52,6 +54,7 @@ export default class API {
|
|||
}
|
||||
|
||||
constructor(config: TestConfiguration) {
|
||||
this.ai = new AIAPI(config)
|
||||
this.application = new ApplicationAPI(config)
|
||||
this.attachment = new AttachmentAPI(config)
|
||||
this.automation = new AutomationAPI(config)
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
import AnthropicClient from "@anthropic-ai/sdk"
|
||||
import nock from "nock"
|
||||
import { MockLLMResponseFn, MockLLMResponseOpts } from "."
|
||||
|
||||
let chatID = 1
|
||||
const SPACE_REGEX = /\s+/g
|
||||
|
||||
export const mockAnthropicResponse: MockLLMResponseFn = (
|
||||
answer: string | ((prompt: string) => string),
|
||||
opts?: MockLLMResponseOpts
|
||||
) => {
|
||||
return nock(opts?.host || "https://api.anthropic.com")
|
||||
.post("/v1/messages")
|
||||
.reply((uri: string, body: nock.Body) => {
|
||||
const req = body as AnthropicClient.MessageCreateParamsNonStreaming
|
||||
const prompt = req.messages[0].content
|
||||
if (typeof prompt !== "string") {
|
||||
throw new Error("Anthropic mock only supports string prompts")
|
||||
}
|
||||
|
||||
let content
|
||||
if (typeof answer === "function") {
|
||||
try {
|
||||
content = answer(prompt)
|
||||
} catch (e) {
|
||||
return [500, "Internal Server Error"]
|
||||
}
|
||||
} else {
|
||||
content = answer
|
||||
}
|
||||
|
||||
const resp: AnthropicClient.Messages.Message = {
|
||||
id: `${chatID++}`,
|
||||
type: "message",
|
||||
role: "assistant",
|
||||
model: req.model,
|
||||
stop_reason: "end_turn",
|
||||
usage: {
|
||||
input_tokens: prompt.split(SPACE_REGEX).length,
|
||||
output_tokens: content.split(SPACE_REGEX).length,
|
||||
},
|
||||
stop_sequence: null,
|
||||
content: [{ type: "text", text: content }],
|
||||
}
|
||||
return [200, resp]
|
||||
})
|
||||
.persist()
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
import { Scope } from "nock"
|
||||
|
||||
export interface MockLLMResponseOpts {
|
||||
host?: string
|
||||
}
|
||||
|
||||
export type MockLLMResponseFn = (
|
||||
answer: string | ((prompt: string) => string),
|
||||
opts?: MockLLMResponseOpts
|
||||
) => Scope
|
|
@ -1,12 +1,9 @@
|
|||
import nock from "nock"
|
||||
import { MockLLMResponseFn, MockLLMResponseOpts } from "."
|
||||
|
||||
let chatID = 1
|
||||
const SPACE_REGEX = /\s+/g
|
||||
|
||||
interface MockChatGPTResponseOpts {
|
||||
host?: string
|
||||
}
|
||||
|
||||
interface Message {
|
||||
role: string
|
||||
content: string
|
||||
|
@ -47,19 +44,24 @@ interface ChatCompletionResponse {
|
|||
usage: Usage
|
||||
}
|
||||
|
||||
export function mockChatGPTResponse(
|
||||
export const mockChatGPTResponse: MockLLMResponseFn = (
|
||||
answer: string | ((prompt: string) => string),
|
||||
opts?: MockChatGPTResponseOpts
|
||||
) {
|
||||
opts?: MockLLMResponseOpts
|
||||
) => {
|
||||
return nock(opts?.host || "https://api.openai.com")
|
||||
.post("/v1/chat/completions")
|
||||
.reply(200, (uri: string, requestBody: ChatCompletionRequest) => {
|
||||
const messages = requestBody.messages
|
||||
.reply((uri: string, body: nock.Body) => {
|
||||
const req = body as ChatCompletionRequest
|
||||
const messages = req.messages
|
||||
const prompt = messages[0].content
|
||||
|
||||
let content
|
||||
if (typeof answer === "function") {
|
||||
content = answer(prompt)
|
||||
try {
|
||||
content = answer(prompt)
|
||||
} catch (e) {
|
||||
return [500, "Internal Server Error"]
|
||||
}
|
||||
} else {
|
||||
content = answer
|
||||
}
|
||||
|
@ -76,7 +78,7 @@ export function mockChatGPTResponse(
|
|||
id: `chatcmpl-${chatID}`,
|
||||
object: "chat.completion",
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
model: requestBody.model,
|
||||
model: req.model,
|
||||
system_fingerprint: `fp_${chatID}`,
|
||||
choices: [
|
||||
{
|
||||
|
@ -97,14 +99,7 @@ export function mockChatGPTResponse(
|
|||
},
|
||||
},
|
||||
}
|
||||
return response
|
||||
return [200, response]
|
||||
})
|
||||
.persist()
|
||||
}
|
||||
|
||||
export function mockChatGPTError() {
|
||||
return nock("https://api.openai.com")
|
||||
.post("/v1/chat/completions")
|
||||
.reply(500, "Internal Server Error")
|
||||
.persist()
|
||||
}
|
|
@ -1,5 +1,18 @@
|
|||
import { EnrichedBinding } from "../../ui"
|
||||
|
||||
export interface Message {
|
||||
role: "system" | "user"
|
||||
content: string
|
||||
}
|
||||
|
||||
export interface ChatCompletionRequest {
|
||||
messages: Message[]
|
||||
}
|
||||
|
||||
export interface ChatCompletionResponse {
|
||||
message?: string
|
||||
}
|
||||
|
||||
export interface GenerateJsRequest {
|
||||
prompt: string
|
||||
bindings?: EnrichedBinding[]
|
||||
|
@ -8,3 +21,11 @@ export interface GenerateJsRequest {
|
|||
export interface GenerateJsResponse {
|
||||
code: string
|
||||
}
|
||||
|
||||
export interface GenerateCronRequest {
|
||||
prompt: string
|
||||
}
|
||||
|
||||
export interface GenerateCronResponse {
|
||||
message?: string
|
||||
}
|
||||
|
|
|
@ -117,6 +117,7 @@ export type AIProvider =
|
|||
| "AzureOpenAI"
|
||||
| "TogetherAI"
|
||||
| "Custom"
|
||||
| "BudibaseAI"
|
||||
|
||||
export interface ProviderConfig {
|
||||
provider: AIProvider
|
||||
|
|
Loading…
Reference in New Issue