diff --git a/.changeset/config.json b/.changeset/config.json index aa37f6e..feb2e4d 100644 --- a/.changeset/config.json +++ b/.changeset/config.json @@ -2,7 +2,7 @@ "$schema": "https://unpkg.com/@changesets/config@1.6.3/schema.json", "changelog": [ "@changesets/changelog-github", - { "repo": "propology/hopfield" } + { "repo": "EnjoinHQ/hopfield" } ], "commit": false, "access": "public", diff --git a/.changeset/wise-clocks-greet.md b/.changeset/wise-clocks-greet.md new file mode 100644 index 0000000..397f987 --- /dev/null +++ b/.changeset/wise-clocks-greet.md @@ -0,0 +1,5 @@ +--- +"hopfield": patch +--- + +**Feature:**: updated the model names for OpenAI to be more complete and up to date. diff --git a/.env.example b/.env.example index abd38a3..ff036db 100644 --- a/.env.example +++ b/.env.example @@ -1,2 +1 @@ VITE_OPENAI_API_KEY="" -VITE_REPLICATE_API_KEY="" diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 8780f1c..7ef9683 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -2,11 +2,11 @@ Thanks for your interest in contributing to Hopfield! Please take a moment to review this document **before submitting a pull request.** -If you want to contribute, but aren't sure where to start, you can create a [new discussion](https://github.com/propology/hopfield/discussions). +If you want to contribute, but aren't sure where to start, you can create a [new discussion](https://github.com/EnjoinHQ/hopfield/discussions). > **Note** **Please ask first before starting work on any significant new features.** > -> It's never a fun experience to have your pull request declined after investing time and effort into a new feature. To avoid this from happening, we request that contributors create a [feature request](https://github.com/propology/hopfield/discussions/new?category=ideas) to first discuss any API changes or significant new ideas. +> It's never a fun experience to have your pull request declined after investing time and effort into a new feature. To avoid this from happening, we request that contributors create a [feature request](https://github.com/EnjoinHQ/hopfield/discussions/new?category=ideas) to first discuss any API changes or significant new ideas.
@@ -38,13 +38,13 @@ This guide covers more advanced topics. Pick the topics based on your needs. To start contributing to the project, clone it to your local machine using git: ```bash -git clone https://github.com/propology/hopfield.git +git clone https://github.com/EnjoinHQ/hopfield.git ``` Or the [GitHub CLI](https://cli.github.com): ```bash -gh repo clone propology/hopfield +gh repo clone EnjoinHQ/hopfield ```
@@ -154,7 +154,7 @@ The first time a PR with a changeset is merged after a release, a new PR will au ### Creating a snapshot release -If a PR has changesets, you can create a [snapshot release](https://github.com/changesets/changesets/blob/main/docs/snapshot-releases.md) by [manually dispatching](https://github.com/propology/hopfield/actions/workflows/snapshot.yml) the Snapshot workflow. This publishes a tagged version to npm with the PR branch name and timestamp. +If a PR has changesets, you can create a [snapshot release](https://github.com/changesets/changesets/blob/main/docs/snapshot-releases.md) by [manually dispatching](https://github.com/EnjoinHQ/hopfield/actions/workflows/snapshot.yml) the Snapshot workflow. This publishes a tagged version to npm with the PR branch name and timestamp.
↑ back to top diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 63067f0..5d515c4 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -58,7 +58,7 @@ body: label: Validations description: Before submitting this issue, please make sure you do the following. options: - - label: Checked there isn't [already an issue](https://github.com/propology/hopfield/issues) that exists for the bug you encountered. + - label: Checked there isn't [already an issue](https://github.com/EnjoinHQ/hopfield/issues) that exists for the bug you encountered. required: true - - label: Read the [Contributing Guide](https://github.com/propology/hopfield/blob/main/.github/CONTRIBUTING.md). + - label: Read the [Contributing Guide](https://github.com/EnjoinHQ/hopfield/blob/main/.github/CONTRIBUTING.md). required: true diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 7a4bfcc..c7b56c4 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,8 @@ blank_issues_enabled: false contact_links: - name: Question - url: https://github.com/propology/hopfield/discussions/new?category=q-a + url: https://github.com/EnjoinHQ/hopfield/discussions/new?category=q-a about: Ask questions and discuss with other community members. - name: Feature Request - url: https://github.com/propology/hopfield/discussions/new?category=ideas + url: https://github.com/EnjoinHQ/hopfield/discussions/new?category=ideas about: Requests features or brainstorm ideas for new functionality. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 7a83c82..59d8aa9 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -4,6 +4,6 @@ What changes are made in this PR? Is it a feature or a bug fix? ## Additional Information -- [ ] I read the [contributing guide](https://github.com/propology/hopfield/blob/main/.github/CONTRIBUTING.md) +- [ ] I read the [contributing guide](https://github.com/EnjoinHQ/hopfield/blob/main/.github/CONTRIBUTING.md) - [ ] I added documentation related to the changes made. - [ ] I added or updated tests related to the changes made. diff --git a/.vscode/settings.json b/.vscode/settings.json index b627d0a..3b5adb0 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -4,8 +4,8 @@ "typescript.tsdk": "node_modules/typescript/lib", "typescript.enablePromptUseWorkspaceTsdk": true, "editor.codeActionsOnSave": { - "quickfix.biome": true, - "source.organizeImports.biome": true + "quickfix.biome": "explicit", + "source.organizeImports.biome": "explicit" }, "[json]": { "editor.defaultFormatter": "biomejs.biome" diff --git a/LICENSE b/LICENSE index acdc39c..81540bf 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023-present Propology +Copyright (c) 2023-present Enjoin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 1fe2e6f..9aac83b 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,8 @@

- - Hopfield logo + + Hopfield logo

@@ -22,7 +22,7 @@ Downloads per month - + MIT License @@ -125,8 +125,7 @@ For full documentation, visit [hopfield.ai](https://hopfield.ai). ## Community -If you have questions or need help, reach out to the community at the [Hopfield GitHub Discussions](https://github.com/propology/hopfield/discussions) -or join the [Propology Discord](https://discord.gg/2hag5fc6) and check out the `πŸ‡-hopfield` channel. +If you have questions or need help, reach out to the community in the [Hopfield GitHub Discussions](https://github.com/EnjoinHQ/hopfield/discussions). ## Inspiration @@ -144,4 +143,4 @@ If you like Hopfield, go star them on Github too. ## Contributing -If you're interested in contributing to Hopfield, please read our [contributing docs](https://github.com/propology/hopfield/blob/main/.github/CONTRIBUTING.md) **before submitting a pull request**. +If you're interested in contributing to Hopfield, please read our [contributing docs](https://github.com/EnjoinHQ/hopfield/blob/main/.github/CONTRIBUTING.md) **before submitting a pull request**. diff --git a/biome.json b/biome.json index 15baaf4..8b4e34a 100644 --- a/biome.json +++ b/biome.json @@ -1,5 +1,5 @@ { - "$schema": "https://biomejs.dev/schemas/1.0.0/schema.json", + "$schema": "https://biomejs.dev/schemas/1.3.3/schema.json", "files": { "ignore": [ "_cjs", @@ -29,9 +29,6 @@ "correctness": { "noUnusedVariables": "error" }, - "nursery": { - "useGroupedTypeImport": "off" - }, "performance": { "noDelete": "off" }, @@ -50,7 +47,6 @@ "javascript": { "formatter": { "quoteStyle": "single", - "trailingComma": "all", "semicolons": "always" } }, diff --git a/bun.lockb b/bun.lockb index 3c7ef3a..8284d4b 100755 Binary files a/bun.lockb and b/bun.lockb differ diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index 9301462..1fd9783 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -68,7 +68,7 @@ export default withTwoslash( provider: 'local', }, editLink: { - pattern: 'https://github.com/propology/hopfield/edit/main/docs/:path', + pattern: 'https://github.com/EnjoinHQ/hopfield/edit/main/docs/:path', text: 'Suggest changes to this page', }, footer: { @@ -89,11 +89,11 @@ export default withTwoslash( items: [ { text: 'Release Notes ', - link: 'https://github.com/propology/hopfield/releases', + link: 'https://github.com/EnjoinHQ/hopfield/releases', }, { text: 'Contributing ', - link: 'https://github.com/propology/hopfield/blob/main/.github/CONTRIBUTING.md', + link: 'https://github.com/EnjoinHQ/hopfield/blob/main/.github/CONTRIBUTING.md', }, ], }, @@ -173,9 +173,8 @@ export default withTwoslash( }, siteTitle: false, socialLinks: [ - { icon: 'twitter', link: 'https://twitter.com/propology_' }, - { icon: 'discord', link: 'https://discord.gg/2hag5fc6' }, - { icon: 'github', link: 'https://github.com/propology/hopfield' }, + { icon: 'twitter', link: 'https://twitter.com/EnjoinHQ' }, + { icon: 'github', link: 'https://github.com/EnjoinHQ/hopfield' }, ], }, title: diff --git a/docs/chat/functions-streaming.md b/docs/chat/functions-streaming.md index 99fd6fe..1786e39 100644 --- a/docs/chat/functions-streaming.md +++ b/docs/chat/functions-streaming.md @@ -79,8 +79,8 @@ const response = await chat.get( ::: info Feedback -To influence these features, reach out on [Discord](https://discord.gg/2hag5fc6) or -[Github Discussions](https://github.com/propology/hopfield/discussions). +To influence these features, reach out on +[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions). We want your feedback! ::: diff --git a/docs/chat/functions.md b/docs/chat/functions.md index 0bfe6e8..1e5b59b 100644 --- a/docs/chat/functions.md +++ b/docs/chat/functions.md @@ -156,25 +156,3 @@ if (response.choices[0].__type === "function_call") { // ^? } ``` - -### Options - -The `function` also allows an optional `options` parameter, which lets you override the runtime checks for the -schema. This includes the checks for requiring descriptions on Zod schema parameters, as well as overriding the -list of "disabled types", which are Zod types which typically produce unreliable results from an LLM. - -```ts -type HopfieldFunctionOptions = { - /** - * Allows descriptions to not be checked on the function parameters. This defaults to `true`. - */ - requireDescriptions?: boolean; - /** - * Allows you override or disable "unstable" types, which are types that do not typically - * produce good results with a given model. These are defined on a per-model basis. - * - * Set to false to allow all "unstable" types. - */ - disabledTypes?: ZodFirstPartyTypeKind[] | false; -}; -``` diff --git a/docs/chat/overview.md b/docs/chat/overview.md index b89858d..a0cc338 100644 --- a/docs/chat/overview.md +++ b/docs/chat/overview.md @@ -11,8 +11,8 @@ guarantees with Zod. ::: info API Providers We currently only support OpenAI, but are -working on adding further providers. Reach out on [Discord](https://discord.gg/2hag5fc6) or -[Github Discussions](https://github.com/propology/hopfield/discussions) if you have any suggestions! +working on adding further providers. Reach out on +[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions) if you have any suggestions! ::: diff --git a/docs/embeddings/overview.md b/docs/embeddings/overview.md index 07e60d9..07ba9b9 100644 --- a/docs/embeddings/overview.md +++ b/docs/embeddings/overview.md @@ -11,8 +11,8 @@ guarantees with Zod, and composability across providers. ::: info API Providers We currently only support OpenAI, but are -working on adding further providers. Reach out on [Discord](https://discord.gg/2hag5fc6) or -[Github Discussions](https://github.com/propology/hopfield/discussions) if you have any suggestions! +working on adding further providers. Reach out on +[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions) if you have any suggestions! ::: diff --git a/docs/frameworks/next-13.md b/docs/frameworks/next-13.md index 4743434..897a9b6 100644 --- a/docs/frameworks/next-13.md +++ b/docs/frameworks/next-13.md @@ -23,7 +23,7 @@ include any custom code for streaming token by token. ### Backpressure -The readable stream handles backpressure with a pull-based approach. See our [tests](https://github.com/propology/hopfield/blob/main/src/utils.test.ts) for how Hopfield handles backpressure. For a more detailed explanation on "backpressure" and how it factors into streaming LLM responses, please see the +The readable stream handles backpressure with a pull-based approach. See our [tests](https://github.com/EnjoinHQ/hopfield/blob/main/src/utils.test.ts) for how Hopfield handles backpressure. For a more detailed explanation on "backpressure" and how it factors into streaming LLM responses, please see the [`vercel/ai` docs](https://sdk.vercel.ai/docs/concepts/backpressure-and-cancellation). ## Usage diff --git a/docs/index.md b/docs/index.md index 65cd049..49bee76 100644 --- a/docs/index.md +++ b/docs/index.md @@ -42,16 +42,16 @@ const members = [ Downloads per month - + MIT License MIT License - + - GitHub Repo stars - GitHub Repo stars + GitHub Repo stars + GitHub Repo stars
@@ -262,8 +262,7 @@ Oh, and liking Typescript is a nice-to-have. ## Community -If you have questions or need help, reach out to the community at the [Hopfield GitHub Discussions](https://github.com/propology/hopfield/discussions) -or join the [Propology Discord](https://discord.gg/2hag5fc6) and check out the `πŸ‡-hopfield` channel. +If you have questions or need help, reach out to the community in the [Hopfield GitHub Discussions](https://github.com/EnjoinHQ/hopfield/discussions).
diff --git a/examples/cloudflare-worker/src/index.test.ts b/examples/cloudflare-worker/src/index.test.ts index 68ccb57..fda99e6 100644 --- a/examples/cloudflare-worker/src/index.test.ts +++ b/examples/cloudflare-worker/src/index.test.ts @@ -1,4 +1,4 @@ -import { UnstableDevWorker, unstable_dev } from 'wrangler'; +import { type UnstableDevWorker, unstable_dev } from 'wrangler'; import { describe, beforeAll, afterAll, it, expect } from 'vitest'; describe('Worker', () => { diff --git a/examples/next-13/src/app/docs.ts b/examples/next-13/src/app/docs.ts index f53dd57..1511925 100644 --- a/examples/next-13/src/app/docs.ts +++ b/examples/next-13/src/app/docs.ts @@ -1,202 +1,4 @@ export const docs = `--- -description: "A detailed guide on seamlessly fetching and streaming data directly into React components." -title: "Next.js App Router with Hopfield" ---- - -# Next.js App Router - -Hopfield empowers developers to seamlessly fetch and stream data directly into Next.js React Server Components. - -## Overview - -Hopfield streaming chat provides a readableStream() which can be used to build recursive React Server Components. - -The readableStream() from Hopfield's streaming chat provider returns a [ReadableStream](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) (available in Node 18+, or it can be polyfilled with a library like [web-streams-polyfill](https://www.npmjs.com/package/web-streams-polyfill).). - -::: info Non-streaming - -If you are not interested in using streaming, you can use the non-streaming chat provider easily with a simple RSC -that awaits the full response from chat.get(). This is not shown below, but is a much simpler integration that does not -include any custom code for streaming token by token. - -::: - -### Backpressure - -The readable stream handles backpressure with a pull-based approach. See our [tests](https://github.com/propology/hopfield/blob/main/src/utils.test.ts) for how Hopfield handles backpressure. For a more detailed explanation on "backpressure" and how it factors into streaming LLM responses, please see the -[vercel/ai docs](https://sdk.vercel.ai/docs/concepts/backpressure-and-cancellation). - -## Usage - -Here's how to use Hopfield with a recursive React Server Component using Suspense: - -tsx -import { Suspense } from "react"; -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; - -// Set up the OpenAI client -const openaiClient = new OpenAI({ apiKey: "OPENAI_API_KEY" }); -// Pass the OpenAI client into Hopfield -const hopfield = hop.client(openai).provider(openaiClient); -// Create a streaming chat provider -const chat = hopfield.chat("gpt-3.5-turbo-16k-0613").streaming(); - -export type ChatResponseProps = { - prompt: string; -}; - -export async function ChatResponse({ prompt }: ChatResponseProps) { - // construct messages with hop.inferMessageInput - const messages: hop.inferMessageInput[] = [ - { - role: "system", - content: "You are a helpful AI assistant.", - }, - { - role: "user", - content: prompt, - }, - ]; - - const response = await chat.get( - { messages: messages }, - { - onChunk: async (value) => { - console.log(Received chunk type: \${value.choices[0].__type}); - // do something on the server with each individual chunk as it is - // streamed in - }, - onDone: async (chunks) => { - console.log(Total chunks received: \${chunks.length}); - // do something on the server when the chat completion is done - // this can be caching the response, storing in a database, etc. - // - // chunks is an array of all the streamed responses, so you - // can access the raw content and combine how you'd like - }, - // if you are using function calling, you can also add a onFunctionCall - // here with zod-parsed arguments - } - ); - - // pass the readableStream to the RSC - return ; -} - -type Props = { - /** - * A ReadableStream produced by Hopfield. - */ - stream: ReadableStream>; -}; - -/** - * A React Server Component that recursively renders a stream of tokens. - */ -async function Tokens(props: Props) { - const { stream } = props; - const reader = stream.getReader(); - - return ( - - - - ); -} - -type RecursiveTokensProps = { - reader: ReadableStreamDefaultReader>; -}; - -async function RecursiveTokens({ reader }: RecursiveTokensProps) { - const { done, value } = await reader.read(); - - if (done) { - return null; - } - - return ( - <> - {value.choices[0].__type === "content" ? ( - value.choices[0].delta.content - ) : ( - <> - )} - }> - - - - ); -} - -// This can be any loading indicator you want, which gets appended to the end -// of the tokens while waiting for the next token to be streamed -const LoadingDots = () => ...; - - -We create a recursive React Server Component which uses Suspense boundaries to await each token, -and show a fallback loading indicator where the next token will be rendered. - -See our [Next 13 RSC example](https://next-13.hopfield.ai) for a real-world integration -using Vercel, similar to this quick example. - -### Dive Deeper - -To deepen your understanding of how Streaming works, and how it can be further utilized within your application, -refer to the [Streaming Chat](/chat/streaming) section. -' - -'--- -description: "Comparisons between Hopfield's features and features from similar libraries." -title: "Comparisons" ---- - -# Comparisons - -No other library does what Hopfield does (inferring static LLM TypeScript types from Zod schemas), but there are some similarities with other libraries. This page compares Hopfield to other libraries. - -Comparisons strive to be as accurate and as unbiased as possible. If you use any of these libraries and feel the information could be improved, feel free to suggest changes. - -## vercel/ai - -[**ai**](https://github.com/vercel/ai) is a framework for AI-powered applications with React, Svelte, Vue, and Solid. They provide hooks to easily integrate -with a streaming text response (StreamingTextResponse) and allow a callback for function calling, as well as simple, drop-in components for React and other -frameworks. - -**Hopfield** provides a subset of these features, and focuses solely on the API interactions, and **not** on providing React components. - -Below is a comparison of the library features: - -| | **ai** | **Hopfield** | -| ---------------------- | ----------------------------------------- | ---------------------------------------------- | -| **React Components** | Easy, inflexible UI components & hooks | No UI components or hooks | -| **Typed Functions** | Streaming function calls with loose types | Strict function call types with Zod validation | -| **Framework Examples** | Multiple | Multiple | -| **Chat Providers** | Multiple | OpenAI, with support for others coming | - -## Langchain.js - -[**Langchain.js**](https://github.com/hwchase17/langchainjs) is a framework for developing applications powered by language models -with Javascript. Developers usually use Langchain to develop apps which connect to internal tools (like internal knowledge bases, -LLM demos, and generally in trusted environments). - -**Hopfield** is a TypeScript library that provides a subset of Langchain's features, -prioritizing inferring static types from LLM input, alongside runtime response validation and static typing. - -Below is a comparison of the library features: - -| | **Langchain.js** | **Hopfield** | -| -------------------- | ------------------------------------------- | ------------------------------------------------------------------------ | -| **Prompt Templates** | Opaque | Use string template types for type inference | -| **Type Generation** | Loose types with some Typescript helpers | Static types with Zod validation | -| **Function Calling** | Starter templates, with some Zod validation | Validation-driven, composable functions | -| **Connectors/Tools** | Many, with various integrations | Only a select few, with examples (actively being developed) | -| **Dependencies** | Many, with non-optional peer dependencies | Few, with strict bundle splitting to avoid unnecessary peer dependencies | -' - -'--- description: "Quickly add Hopfield to your TypeScript project." title: "Getting Started" --- @@ -322,293 +124,51 @@ Now that you're all set up, you are ready to dive in to the docs further! ' '--- -description: "Typescript-first LLM framework with static type inference, testability, and composability." -head: - - - meta - - name: keywords - content: ai, openai, zod, gpt, llm, ai-tools -title: "Hopfield: Typescript-first LLM framework with static type inference, testability, and composability." -titleTemplate: false +description: "Comparisons between Hopfield's features and features from similar libraries." +title: "Comparisons" --- - +No other library does what Hopfield does (inferring static LLM TypeScript types from Zod schemas), but there are some similarities with other libraries. This page compares Hopfield to other libraries. -

- Hopfield - Hopfield -

+Comparisons strive to be as accurate and as unbiased as possible. If you use any of these libraries and feel the information could be improved, feel free to suggest changes. - +## vercel/ai -Hopfield is a Typescript-first large language model framework with static type inference, testability, and composability. -Easily validate LLM responses and inputs with strong types. Flexible abstractions -with best practices baked in. +[**ai**](https://github.com/vercel/ai) is a framework for AI-powered applications with React, Svelte, Vue, and Solid. They provide hooks to easily integrate +with a streaming text response (StreamingTextResponse) and allow a callback for function calling, as well as simple, drop-in components for React and other +frameworks. -Add it to your project, along with any peer dependencies: - -::: code-group - -bash [bun] -bun i hopfield - - -bash [pnpm] -pnpm add hopfield - - -bash [npm] -npm i hopfield - - -::: - -### ready, set, hop - -See how easy it is to add composable, type-safe LLM features with Hopfield: - -::: code-group - -ts twoslash [main.ts] -// @filename: openai.ts -export const SupportCategoryEnum = z.enum([ - "ACCOUNT_ISSUES", - "BILLING_AND_PAYMENTS", - "TECHNICAL_SUPPORT", - "FEATURE_REQUESTS", - "BUG_REPORTS", - "PRODUCT_INQUIRIES", - "PASSWORD_RESET", - "SECURITY_ISSUES", - "SERVICE_OUTAGES", - "SETUP_AND_INSTALLATION", - "TROUBLESHOOTING", - "USER_GUIDES_AND_MANUALS", - "WARRANTY_AND_REPAIRS", - "ORDER_TRACKING", - "DELIVERY_ISSUES", - "RETURN_AND_REFUND", - "ACCOUNT_DELETION", - "PRIVACY_CONCERNS", - "COMPLIANCE_QUERY", - "TRAINING_AND_CERTIFICATIONS", - "PARTNER_SUPPORT", - "DEVELOPER_TOOLS", - "API_SUPPORT", - "PERFORMANCE_ISSUES", - "DATA_ISSUES", - "UPGRADE_ISSUES", - "MIGRATION_ASSISTANCE", - "SYSTEM_COMPATIBILITY", - "PAYMENT_GATEWAY_SUPPORT", - "SYSTEM_MAINTENANCE", - "RELEASE_NOTES", - "OTHERS", -]); - -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; -import z from "zod"; - -const hopfield = hop.client(openai).provider(new OpenAI()); - -const categoryDescription = hopfield - .template() - .enum("The category of the message."); - -const classifyMessage = hopfield.function({ - name: "classifyMessage", - description: "Triage an incoming support message.", - parameters: z.object({ - summary: z.string().describe("The summary of the message."), - category: SupportCategoryEnum.describe(categoryDescription), - }), -}); - -export const chat = hopfield.chat().functions([classifyMessage]); - -// @filename: main.ts -import z from "zod"; -import { SupportCategoryEnum } from "./openai"; -const handleMessageWithCategory = async ( - category: z.infer, - message: string -) => {}; - -// ---cut--- -import hop from "hopfield"; -import { chat } from "./openai"; - -const incomingUserMessage = "How do I reset my password?"; - -const messages: hop.inferMessageInput[] = [ - { - content: incomingUserMessage, - role: "user", - }, -]; - -const parsed = await chat.get({ - messages, -}); - -if (parsed.choices[0].__type === "function_call") { - // ^? - const category = parsed.choices[0].message.function_call.arguments.category; - await handleMessageWithCategory(category, incomingUserMessage); - // ^? -} - - -ts twoslash [openai.ts] -export const SupportCategoryEnum = z.enum([ - "ACCOUNT_ISSUES", - "BILLING_AND_PAYMENTS", - "TECHNICAL_SUPPORT", - "FEATURE_REQUESTS", - "BUG_REPORTS", - "PRODUCT_INQUIRIES", - "PASSWORD_RESET", - "SECURITY_ISSUES", - "SERVICE_OUTAGES", - "SETUP_AND_INSTALLATION", - "TROUBLESHOOTING", - "USER_GUIDES_AND_MANUALS", - "WARRANTY_AND_REPAIRS", - "ORDER_TRACKING", - "DELIVERY_ISSUES", - "RETURN_AND_REFUND", - "ACCOUNT_DELETION", - "PRIVACY_CONCERNS", - "COMPLIANCE_QUERY", - "TRAINING_AND_CERTIFICATIONS", - "PARTNER_SUPPORT", - "DEVELOPER_TOOLS", - "API_SUPPORT", - "PERFORMANCE_ISSUES", - "DATA_ISSUES", - "UPGRADE_ISSUES", - "MIGRATION_ASSISTANCE", - "SYSTEM_COMPATIBILITY", - "PAYMENT_GATEWAY_SUPPORT", - "SYSTEM_MAINTENANCE", - "RELEASE_NOTES", - "OTHERS", -]); - -// ---cut--- -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; -import z from "zod"; - -const hopfield = hop.client(openai).provider(new OpenAI()); - -const categoryDescription = hopfield - .template() - .enum("The category of the message."); - -const classifyMessage = hopfield.function({ - name: "classifyMessage", - description: "Triage an incoming support message.", - parameters: z.object({ - summary: z.string().describe("The summary of the message."), - category: SupportCategoryEnum.describe(categoryDescription), - // ^? - }), -}); - -export const chat = hopfield.chat().functions([classifyMessage]); - - -::: - -## TL;DR - -Hopfield might be a good fit for your project if: - -- πŸ—οΈ You build with Typescript/Javascript, and have your database schemas in these languages (e.g. [Prisma](https://www.prisma.io/) and/or [Next.js](https://nextjs.org/)). -- πŸͺ¨ You don't need a heavyweight LLM orchestration framework that ships with a ton of dependencies you'll never use. -- πŸ€™ You're using OpenAI function calling and/or custom tools, and want Typescript-native features for them (e.g. validations w/ [Zod](https://github.com/colinhacks/zod)). -- πŸ’¬ You're building complex LLM interactions which use memory & [RAG](https://www.promptingguide.ai/techniques/rag), evaluation, and orchestration (_coming soonβ„’_). -- πŸ“ You want best-practice, extensible templates, which use [string literal types](https://www.typescriptlang.org/docs/handbook/2/template-literal-types.html) - under the hood for transparency. - -Oh, and liking Typescript is a nice-to-have. - -## Guiding principles - -- πŸŒ€ We are Typescript-first, and only support TS (or JS) - with services like [Replicate](https://replicate.com/) or [OpenAI](https://platform.openai.com/docs/introduction), why do you need Python? -- 🀏 We provide a simple, ejectable interface with common LLM use-cases. This is aligned 1-1 with LLM provider abstractions, like OpenAI's. -- πŸͺ’ We explicitly _don't_ provide a ton of custom tools (please don't ask for too many πŸ˜…) outside of the building blocks and simple examples provided. Other frameworks provide these, but when you use them, you soon realize the tool you want is very use-case specific. -- πŸ§ͺ We (will) provide evaluation frameworks which let you simulate user scenarios and backend interactions with the LLM, including multi-turn conversations and function calling. -- 🐢 We support Node.js, Vercel Edge Functions, Cloudflare Workers, and more (oh and even web, if you like giving away API keys). - -## Community - -If you have questions or need help, reach out to the community at the [Hopfield GitHub Discussions](https://github.com/propology/hopfield/discussions) -or join the [Propology Discord](https://discord.gg/2hag5fc6) and check out the πŸ‡-hopfield channel. +**Hopfield** provides a subset of these features, and focuses solely on the API interactions, and **not** on providing React components. -
- -
+Below is a comparison of the library features: -## Learn more +| | **ai** | **Hopfield** | +| ---------------------- | ----------------------------------------- | ---------------------------------------------- | +| **React Components** | Easy, inflexible UI components & hooks | No UI components or hooks | +| **Typed Functions** | Streaming function calls with loose types | Strict function call types with Zod validation | +| **Framework Examples** | Multiple | Multiple | +| **Chat Providers** | Multiple | OpenAI, with support for others coming | -Read the [Getting Started](/guide/getting-started) guide to learn more how to use Hopfield. +## Langchain.js -### Inspiration +[**Langchain.js**](https://github.com/hwchase17/langchainjs) is a framework for developing applications powered by language models +with Javascript. Developers usually use Langchain to develop apps which connect to internal tools (like internal knowledge bases, +LLM demos, and generally in trusted environments). -Shoutout to these projects which inspired us: +**Hopfield** is a TypeScript library that provides a subset of Langchain's features, +prioritizing inferring static types from LLM input, alongside runtime response validation and static typing. -- [Zod](https://github.com/colinhacks/zod) -- [zod-to-json-schema](https://github.com/StefanTerdell/zod-to-json-schema) -- [Autochain](https://github.com/Forethought-Technologies/AutoChain) -- [Langchain.js](https://github.com/hwchase17/langchainjs) -- [simpleaichat](https://github.com/minimaxir/simpleaichat) -- [Auto-GPT](https://github.com/Significant-Gravitas/Auto-GPT) -- [abitype](https://github.com/wagmi-dev/abitype) +Below is a comparison of the library features: -If you like Hopfield, go star them on Github too. +| | **Langchain.js** | **Hopfield** | +| -------------------- | ------------------------------------------- | ------------------------------------------------------------------------ | +| **Prompt Templates** | Opaque | Use string template types for type inference | +| **Type Generation** | Loose types with some Typescript helpers | Static types with Zod validation | +| **Function Calling** | Starter templates, with some Zod validation | Validation-driven, composable functions | +| **Connectors/Tools** | Many, with various integrations | Only a select few, with examples (actively being developed) | +| **Dependencies** | Many, with non-optional peer dependencies | Few, with strict bundle splitting to avoid unnecessary peer dependencies | ' '--- @@ -624,8 +184,8 @@ guarantees with Zod, and composability across providers. ::: info API Providers We currently only support OpenAI, but are -working on adding further providers. Reach out on [Discord](https://discord.gg/2hag5fc6) or -[Github Discussions](https://github.com/propology/hopfield/discussions) if you have any suggestions! +working on adding further providers. Reach out on +[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions) if you have any suggestions! ::: @@ -787,114 +347,26 @@ const thirdEmbeddingLength = response.data[2].embedding.length; ' '--- -description: "Hopfield makes streaming with LLM function calling seamless." -title: "Chat - Functions with Streaming" +description: "An overview of working with chat models in Hopfield." +title: "Overview of Chat Models" --- -# Functions with Streaming +# Chat -Hopfield makes it easy to use streaming with function calling. -You define validation-driven functions which get passed to the LLM. +Hopfield also provides simple APIs for interacting with chat models. It has different API providers with type +guarantees with Zod. + +::: info API Providers + +We currently only support OpenAI, but are +working on adding further providers. Reach out on +[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions) if you have any suggestions! + +::: ## Usage -Use streaming function calling like: - -ts twoslash -const takeAction = async ( - name: string, - args: { - location: string; - unit: "celsius" | "fahrenheit"; - } -) => {}; -// ---cut--- -import z from "zod"; -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; - -const hopfield = hop.client(openai).provider(new OpenAI()); - -const weatherFunction = hopfield.function({ - name: "getCurrentWeather", - description: "Get the current weather in a given location", - parameters: z.object({ - location: z.string().describe("The city and state, e.g. San Francisco, CA"), - unit: z - .enum(["celsius", "fahrenheit"]) - .describe(hopfield.template().enum("The unit for the temperature.")), - }), -}); - -const chat = hopfield.chat().streaming().functions([weatherFunction]); - -const messages: hop.inferMessageInput[] = [ - { - role: "user", - content: "What's the weather in San Jose?", - }, -]; - -const response = await chat.get( - { - messages, - }, - { - onChunk(chunk) { - console.log(Received chunk type: \${chunk.choices[0].__type}); - // do something on the server with each individual chunk as it is - // streamed in - }, - onDone(chunks) { - console.log(Total chunks received: \${chunks.length}); - // do something on the server when the chat completion is done - // this can be caching the response, storing in a database, etc. - // - // chunks is an array of all the streamed responses, so you - // can access the raw content and combine how you'd like - }, - async onFunctionCall(fn) { - // do something based on the function call result - this - // is parsed by your function definition with zod, and - // the arguments are coerced into the object shape you expect - await takeAction(fn.name, fn.arguments); - // ^? - }, - } -); - - -::: info Feedback - -To influence these features, reach out on [Discord](https://discord.gg/2hag5fc6) or -[Github Discussions](https://github.com/propology/hopfield/discussions). -We want your feedback! - -::: -' - -'--- -description: "An overview of working with chat models in Hopfield." -title: "Overview of Chat Models" ---- - -# Chat - -Hopfield also provides simple APIs for interacting with chat models. It has different API providers with type -guarantees with Zod. - -::: info API Providers - -We currently only support OpenAI, but are -working on adding further providers. Reach out on [Discord](https://discord.gg/2hag5fc6) or -[Github Discussions](https://github.com/propology/hopfield/discussions) if you have any suggestions! - -::: - -## Usage - -Check out how we type responses: +Check out how we type responses: ts twoslash import hop from "hopfield"; @@ -985,6 +457,94 @@ in influencing this! ::: ' +'--- +description: "Hopfield makes streaming with LLM function calling seamless." +title: "Chat - Functions with Streaming" +--- + +# Functions with Streaming + +Hopfield makes it easy to use streaming with function calling. +You define validation-driven functions which get passed to the LLM. + +## Usage + +Use streaming function calling like: + +ts twoslash +const takeAction = async ( + name: string, + args: { + location: string; + unit: "celsius" | "fahrenheit"; + } +) => {}; +// ---cut--- +import z from "zod"; +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const hopfield = hop.client(openai).provider(new OpenAI()); + +const weatherFunction = hopfield.function({ + name: "getCurrentWeather", + description: "Get the current weather in a given location", + parameters: z.object({ + location: z.string().describe("The city and state, e.g. San Francisco, CA"), + unit: z + .enum(["celsius", "fahrenheit"]) + .describe(hopfield.template().enum("The unit for the temperature.")), + }), +}); + +const chat = hopfield.chat().streaming().functions([weatherFunction]); + +const messages: hop.inferMessageInput[] = [ + { + role: "user", + content: "What's the weather in San Jose?", + }, +]; + +const response = await chat.get( + { + messages, + }, + { + onChunk(chunk) { + console.log(Received chunk type: \${chunk.choices[0].__type}); + // do something on the server with each individual chunk as it is + // streamed in + }, + onDone(chunks) { + console.log(Total chunks received: \${chunks.length}); + // do something on the server when the chat completion is done + // this can be caching the response, storing in a database, etc. + // + // chunks is an array of all the streamed responses, so you + // can access the raw content and combine how you'd like + }, + async onFunctionCall(fn) { + // do something based on the function call result - this + // is parsed by your function definition with zod, and + // the arguments are coerced into the object shape you expect + await takeAction(fn.name, fn.arguments); + // ^? + }, + } +); + + +::: info Feedback + +To influence these features, reach out on +[Github Discussions](https://github.com/EnjoinHQ/hopfield/discussions). +We want your feedback! + +::: +' + '--- description: "Deep dive into how to get streaming chat responses with Hopfield." title: "Chat - Non-streaming" @@ -1061,14 +621,16 @@ See how to use streaming results combined with type-driven prompt templates in t ' '--- -description: "Deep dive into how to get non-streaming chat responses with Hopfield." -title: "Chat - Non-streaming" +description: "Hopfield makes LLM function calling seamless." +title: "Chat - Functions" --- -# Non-streaming chat +# Functions -Hopfield provides a simple way to interact with chat models. You can use different -API providers with type guarantees with Zod. +Hopfield lets you define validation-driven functions which can be passed to the LLM. +This lets you clearly build functions, which get transformed to JSON schema with +[zod-to-json-schema](https://github.com/StefanTerdell/zod-to-json-schema), so +the LLM can use these as tools. ## Usage @@ -1078,179 +640,65 @@ ts twoslash import hop from "hopfield"; import openai from "hopfield/openai"; import OpenAI from "openai"; +import z from "zod"; const hopfield = hop.client(openai).provider(new OpenAI()); -const chat = hopfield.chat(); +const weatherFunction = hopfield.function({ + name: "getCurrentWeather", + description: "Get the current weather in a given location", + parameters: z.object({ + location: z.string().describe("The city and state, e.g. San Francisco, CA"), + unit: z + .enum(["celsius", "fahrenheit"]) + .describe(hopfield.template().enum("The unit for the temperature.")), + }), +}); + +const chat = hopfield.chat().functions([weatherFunction]); const messages: hop.inferMessageInput[] = [ { role: "user", - content: "How do you count to ten?", + content: "What's the weather in Phoenix, AZ?", }, ]; const response = await chat.get({ messages, + temperature: 0, }); -const responseType = response.choices[0].__type; -// ^? -if (responseType === "stop") { - const message = response.choices[0].message; - // ^? -} - - -## Parameters - -### Model Name - -The model name to use for the embedding. +const choice = response.choices[0]; -ts -const hopfield = hop.client(openai).provider(new OpenAI()); +if (choice.__type === "function_call") { + // ^? + const functionParams = choice.message.function_call; + // ^? +} -const chat = hopfield.chat("gpt-4-0613"); // [!code focus] +The input function definition will be validated to make sure that: -#### OpenAI +1. Descriptions are provided for every argument. +2. No error-prone types are used in parameters (for OpenAI, this includes ZodTuple, ZodBigInt, and ZodAny). +3. If a type in the JSON schema performs better with a templated description (like enum), it is checked against the template. -The default model name is shown below. To override this, you must use -a model which is enabled on your OpenAI account. +All of these checks are entirely customizable and can be overridden/disabled. -ts twoslash -import type { DefaultOpenAIChatModelName } from "hopfield/openai"; -// ^? +## Parameters +### Function Definition -All possible model names are shown below (reach out if we are missing one!) +The function takes a name, description, and a Zod schema +for the parameters which can be passed into it. These are all required fields to define a function, +and are used to construct the JSON schema definition for the function, to be passed to the LLM. ts twoslash -import type { OpenAIChatModelName } from "hopfield/openai"; -// ^? - - ---- - -### Response Count - -The number of chat responses to be returned (this is usually referred to as n). -For all providers, this defaults to 1. -This is capped at 20. - -ts -const hopfield = hop.client(openai).provider(new OpenAI()); - -const chat = hopfield.chat("gpt-4-0613", 10); // [!code focus] - - -The response can then be safely used: - -ts twoslash -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; - -const hopfield = hop.client(openai).provider(new OpenAI()); - -const chat = hopfield.chat("gpt-4-0613", 10); -// ---cut--- -const messages: hop.inferMessageInput[] = [ - { - role: "user", - content: "What's the best way to get a bunch of chat responses?", - }, -]; - -const response = await chat.get({ - messages, -}); - -const chatCount = response.choices.length; -// ^? - -' - -'--- -description: "Hopfield makes LLM function calling seamless." -title: "Chat - Functions" ---- - -# Functions - -Hopfield lets you define validation-driven functions which can be passed to the LLM. -This lets you clearly build functions, which get transformed to JSON schema with -[zod-to-json-schema](https://github.com/StefanTerdell/zod-to-json-schema), so -the LLM can use these as tools. - -## Usage - -Use chat models from OpenAI: - -ts twoslash -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; -import z from "zod"; - -const hopfield = hop.client(openai).provider(new OpenAI()); - -const weatherFunction = hopfield.function({ - name: "getCurrentWeather", - description: "Get the current weather in a given location", - parameters: z.object({ - location: z.string().describe("The city and state, e.g. San Francisco, CA"), - unit: z - .enum(["celsius", "fahrenheit"]) - .describe(hopfield.template().enum("The unit for the temperature.")), - }), -}); - -const chat = hopfield.chat().functions([weatherFunction]); - -const messages: hop.inferMessageInput[] = [ - { - role: "user", - content: "What's the weather in Phoenix, AZ?", - }, -]; - -const response = await chat.get({ - messages, - temperature: 0, -}); - -const choice = response.choices[0]; - -if (choice.__type === "function_call") { - // ^? - const functionParams = choice.message.function_call; - // ^? -} - - -The input function definition will be validated to make sure that: - -1. Descriptions are provided for every argument. -2. No error-prone types are used in parameters (for OpenAI, this includes ZodTuple, ZodBigInt, and ZodAny). -3. If a type in the JSON schema performs better with a templated description (like enum), it is checked against the template. - -All of these checks are entirely customizable and can be overridden/disabled. - -## Parameters - -### Function Definition - -The function takes a name, description, and a Zod schema -for the parameters which can be passed into it. These are all required fields to define a function, -and are used to construct the JSON schema definition for the function, to be passed to the LLM. - -ts twoslash -import hop from "hopfield"; -import openai from "hopfield/openai"; -import OpenAI from "openai"; -import z from "zod"; +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; +import z from "zod"; export const SupportCategoryEnum = z.enum([ "ACCOUNT_ISSUES", @@ -1330,28 +778,557 @@ if (response.choices[0].__type === "function_call") { // ^? } +' + +'--- +description: "Deep dive into how to get non-streaming chat responses with Hopfield." +title: "Chat - Non-streaming" +--- + +# Non-streaming chat + +Hopfield provides a simple way to interact with chat models. You can use different +API providers with type guarantees with Zod. + +## Usage + +Use chat models from OpenAI: + +ts twoslash +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const hopfield = hop.client(openai).provider(new OpenAI()); + +const chat = hopfield.chat(); + +const messages: hop.inferMessageInput[] = [ + { + role: "user", + content: "How do you count to ten?", + }, +]; + +const response = await chat.get({ + messages, +}); + +const responseType = response.choices[0].__type; +// ^? +if (responseType === "stop") { + const message = response.choices[0].message; + // ^? +} + + +## Parameters -### Options +### Model Name -The function also allows an optional options parameter, which lets you override the runtime checks for the -schema. This includes the checks for requiring descriptions on Zod schema parameters, as well as overriding the -list of "disabled types", which are Zod types which typically produce unreliable results from an LLM. +The model name to use for the embedding. ts -type HopfieldFunctionOptions = { - /** - * Allows descriptions to not be checked on the function parameters. This defaults to true. - */ - requireDescriptions?: boolean; - /** - * Allows you override or disable "unstable" types, which are types that do not typically - * produce good results with a given model. These are defined on a per-model basis. - * - * Set to false to allow all "unstable" types. - */ - disabledTypes?: ZodFirstPartyTypeKind[] | false; -}; +const hopfield = hop.client(openai).provider(new OpenAI()); + +const chat = hopfield.chat("gpt-4-0613"); // [!code focus] + + +#### OpenAI + +The default model name is shown below. To override this, you must use +a model which is enabled on your OpenAI account. + +ts twoslash +import type { DefaultOpenAIChatModelName } from "hopfield/openai"; +// ^? + + +All possible model names are shown below (reach out if we are missing one!) + +ts twoslash +import type { OpenAIChatModelName } from "hopfield/openai"; +// ^? + + +--- + +### Response Count + +The number of chat responses to be returned (this is usually referred to as n). +For all providers, this defaults to 1. +This is capped at 20. + +ts +const hopfield = hop.client(openai).provider(new OpenAI()); + +const chat = hopfield.chat("gpt-4-0613", 10); // [!code focus] + + +The response can then be safely used: + +ts twoslash +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +const hopfield = hop.client(openai).provider(new OpenAI()); + +const chat = hopfield.chat("gpt-4-0613", 10); +// ---cut--- +const messages: hop.inferMessageInput[] = [ + { + role: "user", + content: "What's the best way to get a bunch of chat responses?", + }, +]; + +const response = await chat.get({ + messages, +}); + +const chatCount = response.choices.length; +// ^? + +' + +'--- +description: "Typescript-first LLM framework with static type inference, testability, and composability." +head: + - - meta + - name: keywords + content: ai, openai, zod, gpt, llm, ai-tools +title: "Hopfield: Typescript-first LLM framework with static type inference, testability, and composability." +titleTemplate: false +--- + + + +

+ Hopfield + Hopfield +

+ + + +Hopfield is a Typescript-first large language model framework with static type inference, testability, and composability. +Easily validate LLM responses and inputs with strong types. Flexible abstractions +with best practices baked in. + +Add it to your project, along with any peer dependencies: + +::: code-group + +bash [bun] +bun i hopfield + + +bash [pnpm] +pnpm add hopfield + + +bash [npm] +npm i hopfield + + +::: + +### ready, set, hop + +See how easy it is to add composable, type-safe LLM features with Hopfield: + +::: code-group + +ts twoslash [main.ts] +// @filename: openai.ts +export const SupportCategoryEnum = z.enum([ + "ACCOUNT_ISSUES", + "BILLING_AND_PAYMENTS", + "TECHNICAL_SUPPORT", + "FEATURE_REQUESTS", + "BUG_REPORTS", + "PRODUCT_INQUIRIES", + "PASSWORD_RESET", + "SECURITY_ISSUES", + "SERVICE_OUTAGES", + "SETUP_AND_INSTALLATION", + "TROUBLESHOOTING", + "USER_GUIDES_AND_MANUALS", + "WARRANTY_AND_REPAIRS", + "ORDER_TRACKING", + "DELIVERY_ISSUES", + "RETURN_AND_REFUND", + "ACCOUNT_DELETION", + "PRIVACY_CONCERNS", + "COMPLIANCE_QUERY", + "TRAINING_AND_CERTIFICATIONS", + "PARTNER_SUPPORT", + "DEVELOPER_TOOLS", + "API_SUPPORT", + "PERFORMANCE_ISSUES", + "DATA_ISSUES", + "UPGRADE_ISSUES", + "MIGRATION_ASSISTANCE", + "SYSTEM_COMPATIBILITY", + "PAYMENT_GATEWAY_SUPPORT", + "SYSTEM_MAINTENANCE", + "RELEASE_NOTES", + "OTHERS", +]); + +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; +import z from "zod"; + +const hopfield = hop.client(openai).provider(new OpenAI()); + +const categoryDescription = hopfield + .template() + .enum("The category of the message."); + +const classifyMessage = hopfield.function({ + name: "classifyMessage", + description: "Triage an incoming support message.", + parameters: z.object({ + summary: z.string().describe("The summary of the message."), + category: SupportCategoryEnum.describe(categoryDescription), + }), +}); + +export const chat = hopfield.chat().functions([classifyMessage]); + +// @filename: main.ts +import z from "zod"; +import { SupportCategoryEnum } from "./openai"; +const handleMessageWithCategory = async ( + category: z.infer, + message: string +) => {}; + +// ---cut--- +import hop from "hopfield"; +import { chat } from "./openai"; + +const incomingUserMessage = "How do I reset my password?"; + +const messages: hop.inferMessageInput[] = [ + { + content: incomingUserMessage, + role: "user", + }, +]; + +const parsed = await chat.get({ + messages, +}); + +if (parsed.choices[0].__type === "function_call") { + // ^? + const category = parsed.choices[0].message.function_call.arguments.category; + await handleMessageWithCategory(category, incomingUserMessage); + // ^? +} + + +ts twoslash [openai.ts] +export const SupportCategoryEnum = z.enum([ + "ACCOUNT_ISSUES", + "BILLING_AND_PAYMENTS", + "TECHNICAL_SUPPORT", + "FEATURE_REQUESTS", + "BUG_REPORTS", + "PRODUCT_INQUIRIES", + "PASSWORD_RESET", + "SECURITY_ISSUES", + "SERVICE_OUTAGES", + "SETUP_AND_INSTALLATION", + "TROUBLESHOOTING", + "USER_GUIDES_AND_MANUALS", + "WARRANTY_AND_REPAIRS", + "ORDER_TRACKING", + "DELIVERY_ISSUES", + "RETURN_AND_REFUND", + "ACCOUNT_DELETION", + "PRIVACY_CONCERNS", + "COMPLIANCE_QUERY", + "TRAINING_AND_CERTIFICATIONS", + "PARTNER_SUPPORT", + "DEVELOPER_TOOLS", + "API_SUPPORT", + "PERFORMANCE_ISSUES", + "DATA_ISSUES", + "UPGRADE_ISSUES", + "MIGRATION_ASSISTANCE", + "SYSTEM_COMPATIBILITY", + "PAYMENT_GATEWAY_SUPPORT", + "SYSTEM_MAINTENANCE", + "RELEASE_NOTES", + "OTHERS", +]); + +// ---cut--- +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; +import z from "zod"; + +const hopfield = hop.client(openai).provider(new OpenAI()); + +const categoryDescription = hopfield + .template() + .enum("The category of the message."); + +const classifyMessage = hopfield.function({ + name: "classifyMessage", + description: "Triage an incoming support message.", + parameters: z.object({ + summary: z.string().describe("The summary of the message."), + category: SupportCategoryEnum.describe(categoryDescription), + // ^? + }), +}); + +export const chat = hopfield.chat().functions([classifyMessage]); + + +::: + +## TL;DR + +Hopfield might be a good fit for your project if: + +- πŸ—οΈ You build with Typescript/Javascript, and have your database schemas in these languages (e.g. [Prisma](https://www.prisma.io/) and/or [Next.js](https://nextjs.org/)). +- πŸͺ¨ You don't need a heavyweight LLM orchestration framework that ships with a ton of dependencies you'll never use. +- πŸ€™ You're using OpenAI function calling and/or custom tools, and want Typescript-native features for them (e.g. validations w/ [Zod](https://github.com/colinhacks/zod)). +- πŸ’¬ You're building complex LLM interactions which use memory & [RAG](https://www.promptingguide.ai/techniques/rag), evaluation, and orchestration (_coming soonβ„’_). +- πŸ“ You want best-practice, extensible templates, which use [string literal types](https://www.typescriptlang.org/docs/handbook/2/template-literal-types.html) + under the hood for transparency. + +Oh, and liking Typescript is a nice-to-have. + +## Guiding principles + +- πŸŒ€ We are Typescript-first, and only support TS (or JS) - with services like [Replicate](https://replicate.com/) or [OpenAI](https://platform.openai.com/docs/introduction), why do you need Python? +- 🀏 We provide a simple, ejectable interface with common LLM use-cases. This is aligned 1-1 with LLM provider abstractions, like OpenAI's. +- πŸͺ’ We explicitly _don't_ provide a ton of custom tools (please don't ask for too many πŸ˜…) outside of the building blocks and simple examples provided. Other frameworks provide these, but when you use them, you soon realize the tool you want is very use-case specific. +- πŸ§ͺ We (will) provide evaluation frameworks which let you simulate user scenarios and backend interactions with the LLM, including multi-turn conversations and function calling. +- 🐢 We support Node.js, Vercel Edge Functions, Cloudflare Workers, and more (oh and even web, if you like giving away API keys). + +## Community + +If you have questions or need help, reach out to the community in the [Hopfield GitHub Discussions](https://github.com/EnjoinHQ/hopfield/discussions). + +
+ +
+ +## Learn more + +Read the [Getting Started](/guide/getting-started) guide to learn more how to use Hopfield. + +### Inspiration + +Shoutout to these projects which inspired us: + +- [Zod](https://github.com/colinhacks/zod) +- [zod-to-json-schema](https://github.com/StefanTerdell/zod-to-json-schema) +- [Autochain](https://github.com/Forethought-Technologies/AutoChain) +- [Langchain.js](https://github.com/hwchase17/langchainjs) +- [simpleaichat](https://github.com/minimaxir/simpleaichat) +- [Auto-GPT](https://github.com/Significant-Gravitas/Auto-GPT) +- [abitype](https://github.com/wagmi-dev/abitype) + +If you like Hopfield, go star them on Github too. +' + +'--- +description: "A detailed guide on seamlessly fetching and streaming data directly into React components." +title: "Next.js App Router with Hopfield" +--- + +# Next.js App Router + +Hopfield empowers developers to seamlessly fetch and stream data directly into Next.js React Server Components. + +## Overview + +Hopfield streaming chat provides a readableStream() which can be used to build recursive React Server Components. + +The readableStream() from Hopfield's streaming chat provider returns a [ReadableStream](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) (available in Node 18+, or it can be polyfilled with a library like [web-streams-polyfill](https://www.npmjs.com/package/web-streams-polyfill).). + +::: info Non-streaming + +If you are not interested in using streaming, you can use the non-streaming chat provider easily with a simple RSC +that awaits the full response from chat.get(). This is not shown below, but is a much simpler integration that does not +include any custom code for streaming token by token. + +::: + +### Backpressure + +The readable stream handles backpressure with a pull-based approach. See our [tests](https://github.com/EnjoinHQ/hopfield/blob/main/src/utils.test.ts) for how Hopfield handles backpressure. For a more detailed explanation on "backpressure" and how it factors into streaming LLM responses, please see the +[vercel/ai docs](https://sdk.vercel.ai/docs/concepts/backpressure-and-cancellation). + +## Usage + +Here's how to use Hopfield with a recursive React Server Component using Suspense: + +tsx +import { Suspense } from "react"; +import hop from "hopfield"; +import openai from "hopfield/openai"; +import OpenAI from "openai"; + +// Set up the OpenAI client +const openaiClient = new OpenAI({ apiKey: "OPENAI_API_KEY" }); +// Pass the OpenAI client into Hopfield +const hopfield = hop.client(openai).provider(openaiClient); +// Create a streaming chat provider +const chat = hopfield.chat("gpt-3.5-turbo-16k-1106").streaming(); + +export type ChatResponseProps = { + prompt: string; +}; + +export async function ChatResponse({ prompt }: ChatResponseProps) { + // construct messages with hop.inferMessageInput + const messages: hop.inferMessageInput[] = [ + { + role: "system", + content: "You are a helpful AI assistant.", + }, + { + role: "user", + content: prompt, + }, + ]; + + const response = await chat.get( + { messages: messages }, + { + onChunk: async (value) => { + console.log(Received chunk type: \${value.choices[0].__type}); + // do something on the server with each individual chunk as it is + // streamed in + }, + onDone: async (chunks) => { + console.log(Total chunks received: \${chunks.length}); + // do something on the server when the chat completion is done + // this can be caching the response, storing in a database, etc. + // + // chunks is an array of all the streamed responses, so you + // can access the raw content and combine how you'd like + }, + // if you are using function calling, you can also add a onFunctionCall + // here with zod-parsed arguments + } + ); + + // pass the readableStream to the RSC + return ; +} + +type Props = { + /** + * A ReadableStream produced by Hopfield. + */ + stream: ReadableStream>; +}; + +/** + * A React Server Component that recursively renders a stream of tokens. + */ +async function Tokens(props: Props) { + const { stream } = props; + const reader = stream.getReader(); + + return ( + + + + ); +} + +type RecursiveTokensProps = { + reader: ReadableStreamDefaultReader>; +}; + +async function RecursiveTokens({ reader }: RecursiveTokensProps) { + const { done, value } = await reader.read(); + + if (done) { + return null; + } + + return ( + <> + {value.choices[0].__type === "content" ? ( + value.choices[0].delta.content + ) : ( + <> + )} + }> + + + + ); +} + +// This can be any loading indicator you want, which gets appended to the end +// of the tokens while waiting for the next token to be streamed +const LoadingDots = () => ...; + + +We create a recursive React Server Component which uses Suspense boundaries to await each token, +and show a fallback loading indicator where the next token will be rendered. + +See our [Next 13 RSC example](https://next-13.hopfield.ai) for a real-world integration +using Vercel, similar to this quick example. + +### Dive Deeper + +To deepen your understanding of how Streaming works, and how it can be further utilized within your application, +refer to the [Streaming Chat](/chat/streaming) section. ' '`; diff --git a/examples/next-13/src/app/footer.tsx b/examples/next-13/src/app/footer.tsx index 2d609d2..5eea338 100644 --- a/examples/next-13/src/app/footer.tsx +++ b/examples/next-13/src/app/footer.tsx @@ -17,7 +17,7 @@ export function Footer() { github logo diff --git a/examples/next-13/src/app/layout.tsx b/examples/next-13/src/app/layout.tsx index 7fab504..71b4eba 100644 --- a/examples/next-13/src/app/layout.tsx +++ b/examples/next-13/src/app/layout.tsx @@ -12,7 +12,7 @@ export const metadata = { card: 'summary_large_image', title: 'Hopfield with Server Components', description: 'Hopfield with React Server Components streaming on the Edge', - creator: '@propology_', + creator: '@EnjoinHQ', }, openGraph: { type: 'website', diff --git a/package.json b/package.json index 5db3351..26363ca 100644 --- a/package.json +++ b/package.json @@ -15,7 +15,7 @@ "format": "biome format . --write", "generate": "bun scripts/concatDocs.ts", "lint": "biome check .", - "lint:fix": "bun run lint --apply", + "lint:fix": "bun run lint --write", "postinstall": "bun run generate && bun x patch-package", "prepare": "bun x simple-git-hooks", "prepublishOnly": "bun scripts/prepublishOnly.ts", @@ -28,27 +28,27 @@ "typecheck": "tsc --noEmit" }, "devDependencies": { - "@biomejs/biome": "^1.2.2", + "@biomejs/biome": "^1.8.1", "@changesets/changelog-github": "^0.4.8", - "@changesets/cli": "^2.26.2", + "@changesets/cli": "^2.27.5", "@size-limit/preset-small-lib": "^9.0.0", - "@types/fs-extra": "^11.0.2", - "@types/json-schema": "^7.0.13", + "@types/fs-extra": "^11.0.4", + "@types/json-schema": "^7.0.15", "@types/node": "20.8.1", "@vitest/coverage-v8": "^0.34.6", - "ajv": "^8.12.0", + "ajv": "^8.16.0", "ajv-formats": "^2.1.1", - "bun": "^1.0.3", - "fs-extra": "^11.1.1", + "bun": "^1.1.12", + "fs-extra": "^11.2.0", "json-schema": "^0.4.0", "json-schema-deref-sync": "^0.14.0", - "openai": "4.16.1", - "rimraf": "^5.0.5", - "simple-git-hooks": "^2.9.0", + "openai": "4.50.0", + "rimraf": "^5.0.7", + "simple-git-hooks": "^2.11.1", "size-limit": "^9.0.0", - "typescript": "^5.2.2", + "typescript": "^5.4.5", "which-pm-runs": "1.1.0", - "zod": "^3.22.2" + "zod": "^3.23.8" }, "simple-git-hooks": { "pre-commit": "bun run format && bun run lint:fix" diff --git a/patches/zod-to-json-schema+3.21.4.patch b/patches/zod-to-json-schema+3.21.4.patch deleted file mode 100644 index 7c4fbe4..0000000 --- a/patches/zod-to-json-schema+3.21.4.patch +++ /dev/null @@ -1,44 +0,0 @@ -diff --git a/node_modules/zod-to-json-schema/src/Options.d.ts b/node_modules/zod-to-json-schema/src/Options.d.ts -index abfb7a2..ca546fc 100644 ---- a/node_modules/zod-to-json-schema/src/Options.d.ts -+++ b/node_modules/zod-to-json-schema/src/Options.d.ts -@@ -1,4 +1,6 @@ --import { ZodSchema } from "zod"; -+import { ZodSchema, type ZodTypeDef } from "zod"; -+import type { Refs } from "./Refs"; -+import type { JsonSchema7Type } from "./parseDef"; - export type Targets = "jsonSchema7" | "jsonSchema2019-09" | "openApi3"; - export type Options = { - name: string | undefined; -@@ -14,6 +16,7 @@ export type Options = { - errorMessages: boolean; - markdownDescription: boolean; - emailStrategy: "format:email" | "format:idn-email" | "pattern:zod"; -+ onParseDef: ((def: ZodTypeDef, refs: Refs, schema: JsonSchema7Type) => void) | undefined; - }; - export declare const defaultOptions: Options; - export declare const getDefaultOptions: (options: string | Partial> | undefined) => Options; -diff --git a/node_modules/zod-to-json-schema/src/Options.js b/node_modules/zod-to-json-schema/src/Options.js -index 8b680c4..ed84e25 100644 ---- a/node_modules/zod-to-json-schema/src/Options.js -+++ b/node_modules/zod-to-json-schema/src/Options.js -@@ -15,6 +15,7 @@ exports.defaultOptions = { - errorMessages: false, - markdownDescription: false, - emailStrategy: "format:email", -+ onParseDef: undefined, - }; - const getDefaultOptions = (options) => (typeof options === "string" - ? Object.assign(Object.assign({}, exports.defaultOptions), { name: options }) : Object.assign(Object.assign({}, exports.defaultOptions), options)); -diff --git a/node_modules/zod-to-json-schema/src/parseDef.js b/node_modules/zod-to-json-schema/src/parseDef.js -index 4f33785..26748a8 100644 ---- a/node_modules/zod-to-json-schema/src/parseDef.js -+++ b/node_modules/zod-to-json-schema/src/parseDef.js -@@ -45,6 +45,7 @@ function parseDef(def, refs, forceResolution = false // Forces a new schema to b - const jsonSchema = selectParser(def, def.typeName, refs); - if (jsonSchema) { - addMeta(def, refs, jsonSchema); -+ if (refs.onParseDef) refs.onParseDef(def, refs, jsonSchema); - } - newItem.jsonSchema = jsonSchema; - return jsonSchema; diff --git a/scripts/concatDocs.ts b/scripts/concatDocs.ts index 4d18e4b..237117f 100644 --- a/scripts/concatDocs.ts +++ b/scripts/concatDocs.ts @@ -1,5 +1,5 @@ -import fs from 'fs'; -import path from 'path'; +import fs from 'node:fs'; +import path from 'node:path'; const concatenateMarkdownFiles = async ( inputFolder: string, diff --git a/src/CHANGELOG.md b/src/CHANGELOG.md index f62c62f..f329553 100644 --- a/src/CHANGELOG.md +++ b/src/CHANGELOG.md @@ -4,101 +4,101 @@ ### Patch Changes -- [`8a70728`](https://github.com/propology/hopfield/commit/8a70728c879c0180e526fa5a2f10a36b55102102) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** attempt to fix types. +- [`8a70728`](https://github.com/EnjoinHQ/hopfield/commit/8a70728c879c0180e526fa5a2f10a36b55102102) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** attempt to fix types. ## 0.3.4 ### Patch Changes -- [`16516f8`](https://github.com/propology/hopfield/commit/16516f8d2c40c705ea0d77a5027e4d535fc04f2b) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fix types with removing lazy. +- [`16516f8`](https://github.com/EnjoinHQ/hopfield/commit/16516f8d2c40c705ea0d77a5027e4d535fc04f2b) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fix types with removing lazy. ## 0.3.3 ### Patch Changes -- [`d3c6ecf`](https://github.com/propology/hopfield/commit/d3c6ecfebe4e0e7760be418a48b42cc4f93730a5) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fix types to use `ZodType`. +- [`d3c6ecf`](https://github.com/EnjoinHQ/hopfield/commit/d3c6ecfebe4e0e7760be418a48b42cc4f93730a5) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fix types to use `ZodType`. ## 0.3.2 ### Patch Changes -- [`9b975f8`](https://github.com/propology/hopfield/commit/9b975f8d06cbf51af8c431446a8e7dfaf121a79d) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix**: attempt to fix slow types. +- [`9b975f8`](https://github.com/EnjoinHQ/hopfield/commit/9b975f8d06cbf51af8c431446a8e7dfaf121a79d) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix**: attempt to fix slow types. ## 0.3.1 ### Patch Changes -- [`2d1cb18`](https://github.com/propology/hopfield/commit/2d1cb183e81bb03df791b357802b5df0457a5ccb) Thanks [@0xcadams](https://github.com/0xcadams)! - **Feature:** added new model versions from latest OpenAI release. +- [`2d1cb18`](https://github.com/EnjoinHQ/hopfield/commit/2d1cb183e81bb03df791b357802b5df0457a5ccb) Thanks [@0xcadams](https://github.com/0xcadams)! - **Feature:** added new model versions from latest OpenAI release. ## 0.3.0 ### Minor Changes -- [`43ea622`](https://github.com/propology/hopfield/commit/43ea6223b94bffce70a2d9400a000bb880825aeb) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fixed output types by bundling Zod. +- [`43ea622`](https://github.com/EnjoinHQ/hopfield/commit/43ea6223b94bffce70a2d9400a000bb880825aeb) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fixed output types by bundling Zod. ## 0.2.4 ### Patch Changes -- [`36fd9c0`](https://github.com/propology/hopfield/commit/36fd9c0e8ece2f2dcf524b6752bd09b82203d130) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** moved project to Bun. +- [`36fd9c0`](https://github.com/EnjoinHQ/hopfield/commit/36fd9c0e8ece2f2dcf524b6752bd09b82203d130) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** moved project to Bun. ## 0.2.3 ### Patch Changes -- [#20](https://github.com/propology/hopfield/pull/20) [`872e6c7`](https://github.com/propology/hopfield/commit/872e6c73a2a892f947d00d46d9beab2d166c4b29) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** moved project to Bun. +- [#20](https://github.com/EnjoinHQ/hopfield/pull/20) [`872e6c7`](https://github.com/EnjoinHQ/hopfield/commit/872e6c73a2a892f947d00d46d9beab2d166c4b29) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** moved project to Bun. ## 0.2.3 ### Patch Changes -- [`ebab940`](https://github.com/propology/hopfield/commit/ebab9405d231677ce8d0a8d0dbf8f7e92e8bbaed) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fixing issue with latest changeset release. +- [`ebab940`](https://github.com/EnjoinHQ/hopfield/commit/ebab9405d231677ce8d0a8d0dbf8f7e92e8bbaed) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fixing issue with latest changeset release. ## 0.2.2 ### Patch Changes -- [#16](https://github.com/propology/hopfield/pull/16) [`5e27e92`](https://github.com/propology/hopfield/commit/5e27e9236bd12860da6f5e9824c13ae0e12daebe): Thanks [@0xcadams](https://github.com/0xcadams)! - **Feature:** added `onFunctionCall` to streaming function provider to enable workflows that validate final function calls while also streaming responses. +- [#16](https://github.com/EnjoinHQ/hopfield/pull/16) [`5e27e92`](https://github.com/EnjoinHQ/hopfield/commit/5e27e9236bd12860da6f5e9824c13ae0e12daebe): Thanks [@0xcadams](https://github.com/0xcadams)! - **Feature:** added `onFunctionCall` to streaming function provider to enable workflows that validate final function calls while also streaming responses. ## 0.2.1 ### Patch Changes -- [#14](https://github.com/propology/hopfield/pull/14) [`5c29dec`](https://github.com/propology/hopfield/commit/5c29dec3ae8f0866513a9648b8f6563df5c48118) Thanks [@0xcadams](https://github.com/0xcadams)! - **Feature:** added a `ReadableStream` to the response from the Streaming Chat provider and removed `readableFromAsyncIterable` +- [#14](https://github.com/EnjoinHQ/hopfield/pull/14) [`5c29dec`](https://github.com/EnjoinHQ/hopfield/commit/5c29dec3ae8f0866513a9648b8f6563df5c48118) Thanks [@0xcadams](https://github.com/0xcadams)! - **Feature:** added a `ReadableStream` to the response from the Streaming Chat provider and removed `readableFromAsyncIterable` from the exports, to simplify integration. ## 0.2.0 ### Minor Changes -- [`9720598`](https://github.com/propology/hopfield/commit/9720598b115a91203e6674710fa534f834611c16) Thanks [@0xcadams](https://github.com/0xcadams)! - **Feature:** added async iterator to readable stream for node integration. +- [`9720598`](https://github.com/EnjoinHQ/hopfield/commit/9720598b115a91203e6674710fa534f834611c16) Thanks [@0xcadams](https://github.com/0xcadams)! - **Feature:** added async iterator to readable stream for node integration. ## 0.1.4 ### Patch Changes -- [#8](https://github.com/propology/hopfield/pull/8) [`24b5bdc`](https://github.com/propology/hopfield/commit/24b5bdcb8f9b3faaceaf99f58fe1e171e4422764) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fixed some missing types to be explicit. +- [#8](https://github.com/EnjoinHQ/hopfield/pull/8) [`24b5bdc`](https://github.com/EnjoinHQ/hopfield/commit/24b5bdcb8f9b3faaceaf99f58fe1e171e4422764) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fixed some missing types to be explicit. ## 0.1.3 ### Patch Changes -- [#6](https://github.com/propology/hopfield/pull/6) [`b2c5b0f`](https://github.com/propology/hopfield/commit/b2c5b0f28d3668dc468d61e15313c5d6a0a889aa) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** update to support latest `openai` version. +- [#6](https://github.com/EnjoinHQ/hopfield/pull/6) [`b2c5b0f`](https://github.com/EnjoinHQ/hopfield/commit/b2c5b0f28d3668dc468d61e15313c5d6a0a889aa) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** update to support latest `openai` version. ## 0.1.2 ### Patch Changes -- [#4](https://github.com/propology/hopfield/pull/4) [`d2a02c9`](https://github.com/propology/hopfield/commit/d2a02c977678826557c01f25245d824ae53c249e) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** added further cleanup based on integration tests. +- [#4](https://github.com/EnjoinHQ/hopfield/pull/4) [`d2a02c9`](https://github.com/EnjoinHQ/hopfield/commit/d2a02c977678826557c01f25245d824ae53c249e) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** added further cleanup based on integration tests. ## 0.1.1 ### Patch Changes -- [#2](https://github.com/propology/hopfield/pull/2) [`c0ff697`](https://github.com/propology/hopfield/commit/c0ff6971828591f61e29a7997a324834810e828e) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fixed error paths to correspond with docs. +- [#2](https://github.com/EnjoinHQ/hopfield/pull/2) [`c0ff697`](https://github.com/EnjoinHQ/hopfield/commit/c0ff6971828591f61e29a7997a324834810e828e) Thanks [@0xcadams](https://github.com/0xcadams)! - **Fix:** fixed error paths to correspond with docs. ## 0.1.0 ### Minor Changes -- [`e77072a`](https://github.com/propology/hopfield/commit/e77072a076dfb4096e83c732ec631a9cfa6a29e0) Thanks [@0xcadams](https://github.com/0xcadams)! - **Feature:** added the initial OpenAI validator to Hopfield, with Streaming/Non-Streaming Chat, Function Calling, Templates, and Embeddings. +- [`e77072a`](https://github.com/EnjoinHQ/hopfield/commit/e77072a076dfb4096e83c732ec631a9cfa6a29e0) Thanks [@0xcadams](https://github.com/0xcadams)! - **Feature:** added the initial OpenAI validator to Hopfield, with Streaming/Non-Streaming Chat, Function Calling, Templates, and Embeddings. diff --git a/src/errors.ts b/src/errors.ts index 8e76f3d..7d13b02 100644 --- a/src/errors.ts +++ b/src/errors.ts @@ -26,8 +26,8 @@ export class BaseError extends Error { args.cause instanceof BaseError ? args.cause.details : args.cause?.message - ? args.cause.message - : args.details!; + ? args.cause.message + : args.details!; const docsPath = args.cause instanceof BaseError ? args.cause.docsPath || args.docsPath diff --git a/src/external.ts b/src/external.ts index 2f1b4a1..d533215 100644 --- a/src/external.ts +++ b/src/external.ts @@ -1,10 +1,10 @@ -import { - type InferInput, - type InferInputMessage, - type InferResult, - type InferStreamingResult, - type StreamingOptions, - type StreamingWithFunctionsOptions, +import type { + InferInput, + InferInputMessage, + InferResult, + InferStreamingResult, + StreamingOptions, + StreamingWithFunctionsOptions, } from './chat.js'; import type { BaseHopfield } from './provider.js'; diff --git a/src/function.ts b/src/function.ts index f7f09c3..921f9b2 100644 --- a/src/function.ts +++ b/src/function.ts @@ -1,19 +1,16 @@ import { BaseHopfieldSchema } from './base.js'; import { BaseError } from './errors.js'; -import { zodToJsonSchema } from 'zod-to-json-schema'; +import { zodToJsonSchema, type JsonSchema7Type } from 'zod-to-json-schema'; import type { BaseHopfieldChatTemplate, TypeTemplates } from './template.js'; import type { IsEmptyArray } from './type-utils.js'; -import { ZodFirstPartyTypeKind, type ZodTypeDef, z, type ZodType } from 'zod'; -import type { Refs } from 'zod-to-json-schema/src/Refs.js'; -import type { JsonSchema7Type } from 'zod-to-json-schema/src/parseDef.js'; +import { z, type ZodType } from 'zod'; export type AnyBaseHopfieldFunction = BaseHopfieldFunction< any, any, any, any, - any, any >; @@ -29,11 +26,6 @@ export type FunctionPropertyOrNever< ? never : [FunctionProperty, ...FunctionProperty[]]; -export type DisabledTypes = - | ZodFirstPartyTypeKind[] - | readonly ZodFirstPartyTypeKind[] - | false; - export interface JsonSchemaFunction< Name extends string, Description extends string = string, @@ -60,31 +52,6 @@ export interface JsonSchemaFunction< parameters: JsonSchema7Type; } -const requiredDescriptionTypes: ZodFirstPartyTypeKind[] = [ - ZodFirstPartyTypeKind.ZodBigInt, - ZodFirstPartyTypeKind.ZodBoolean, - ZodFirstPartyTypeKind.ZodDate, - ZodFirstPartyTypeKind.ZodEnum, - ZodFirstPartyTypeKind.ZodFunction, - ZodFirstPartyTypeKind.ZodNativeEnum, - ZodFirstPartyTypeKind.ZodNumber, - ZodFirstPartyTypeKind.ZodString, -]; - -export type HopfieldFunctionOptions = { - /** - * Allows descriptions to not be checked on the function parameters. This defaults to `true`. - */ - requireDescriptions?: boolean; - /** - * Allows you override or disable "unstable" types, which are types that do not typically - * produce good results with a given model. These are defined on a per-model basis. - * - * Set to false to allow all "unstable" types. - */ - disabledTypes?: D; -}; - const stringToJSONSchema = z.string().transform((str, ctx): object => { try { return JSON.parse(str); @@ -114,7 +81,6 @@ export type BaseHopfieldFunctionProps< FName extends string, FDescription extends string, FParams extends ZodType, - DTypes extends DisabledTypes, TTemplates extends TypeTemplates, Template extends BaseHopfieldChatTemplate, > = { @@ -122,14 +88,12 @@ export type BaseHopfieldFunctionProps< description: FDescription; parameters: FParams; template: Template; - options?: HopfieldFunctionOptions; }; export abstract class BaseHopfieldFunction< FName extends string, FDescription extends string, FParams extends ZodType, - DTypes extends DisabledTypes, TTemplates extends TypeTemplates, Template extends BaseHopfieldChatTemplate, > extends BaseHopfieldSchema { @@ -138,19 +102,16 @@ export abstract class BaseHopfieldFunction< parameters: FParams; protected _template: Template; - protected _options: HopfieldFunctionOptions; constructor({ name, description, parameters, template, - options = {}, }: BaseHopfieldFunctionProps< FName, FDescription, FParams, - DTypes, TTemplates, Template >) { @@ -161,11 +122,9 @@ export abstract class BaseHopfieldFunction< this.parameters = parameters; this._template = template; - this._options = options; } protected abstract get _defaultTypeTemplates(): TypeTemplates; - protected abstract get _defaultDisabledTypes(): DisabledTypes; /** * Returns a formatted JSON schema function definition for LLM function calling. @@ -191,77 +150,11 @@ export abstract class BaseHopfieldFunction< }); } - const onParseDef = ( - def: ZodTypeDef, - _refs: Refs, - schema: JsonSchema7Type, - ) => { - const typeName: ZodFirstPartyTypeKind = (def as any).typeName; - - const templates = - this._template._templates === false - ? false - : { - ...this._defaultTypeTemplates, - ...this._template._templates, - }; - const requireDescriptions = this._options.requireDescriptions ?? true; - const disabledTypes = !this._options.disabledTypes - ? false - : { - ...this._defaultDisabledTypes, - ...this._options.disabledTypes, - }; - - // check here for typeName and description being defined - if ( - requireDescriptions && - requiredDescriptionTypes.includes(typeName) && - !schema.description - ) { - throw new BaseError( - `You must define a description for the type: ${typeName}`, - { - docsPath: '/chat/functions', - details: `There must be a description provided for ${typeName}, to describe what the function does for the LLM to infer a value.`, - }, - ); - } - - const descriptionEnding = - typeof templates === 'object' - ? templates?.[typeName]?.('' as any) ?? null - : null; - - if ( - descriptionEnding && - schema.description && - !schema.description?.endsWith(descriptionEnding) - ) { - throw new BaseError('You should template your descriptions.', { - docsPath: '/chat/functions', - details: `It's recommended to template your descriptions - we recommend ending the type ${typeName} with "${descriptionEnding}".`, - }); - } - - // check here for disabled types - if ( - typeof disabledTypes !== 'boolean' && - disabledTypes.includes(typeName) - ) { - throw new BaseError(`You should not use ${typeName}.`, { - docsPath: '/chat/functions', - details: `You should not use ${typeName} yet - it provides unreliable results from LLMs.`, - }); - } - }; - return { name: this.name, description: this.description, parameters: zodToJsonSchema(this.parameters as any, { $refStrategy: 'none', - onParseDef, }) as object, } as const; } diff --git a/src/openai/chat/non-streaming-with-functions.integration.ts b/src/openai/chat/non-streaming-with-functions.integration.ts index 8a066e8..3b47055 100644 --- a/src/openai/chat/non-streaming-with-functions.integration.ts +++ b/src/openai/chat/non-streaming-with-functions.integration.ts @@ -318,9 +318,6 @@ describe.concurrent('non-streaming with functions', () => { summary: z.string().describe('The summary of the message.'), category: SupportCategoryEnum, }), - options: { - requireDescriptions: false, - }, }); const chat = hopfieldChat.functions([classifyMessage]); @@ -420,9 +417,6 @@ describe.concurrent('non-streaming with functions', () => { summary: z.string().describe('The summary of the message.'), category: SupportCategoryEnum, }), - options: { - requireDescriptions: false, - }, }); const chat = hopfieldChat.functions([classifyMessage]); diff --git a/src/openai/chat/non-streaming-with-functions.ts b/src/openai/chat/non-streaming-with-functions.ts index 9ca628d..c0fa204 100644 --- a/src/openai/chat/non-streaming-with-functions.ts +++ b/src/openai/chat/non-streaming-with-functions.ts @@ -25,8 +25,8 @@ import { OpenAIChatWithFunctionsStreamingSchema, } from './streaming-with-functions.js'; import { AssistantRole } from './streaming.js'; -import OpenAI from 'openai'; -import { ZodUnion, z } from 'zod'; +import type OpenAI from 'openai'; +import { type ZodUnion, z } from 'zod'; export type OpenAIChatWithFunctionsSchemaProps< ModelName extends OpenAIChatModelName, @@ -156,7 +156,7 @@ export class OpenAIChatWithFunctionsSchema< }); } - functions( + functions( functions: NewFunctions, ) { return new OpenAIChatWithFunctionsSchema({ diff --git a/src/openai/chat/non-streaming.ts b/src/openai/chat/non-streaming.ts index f8cc1ce..745881c 100644 --- a/src/openai/chat/non-streaming.ts +++ b/src/openai/chat/non-streaming.ts @@ -11,8 +11,8 @@ import { } from './non-streaming-with-functions.js'; import { MessageAssistant, OpenAIChatBaseInput } from './shared.js'; import { OpenAIChatStreamingSchema, OpenAIStreamingChat } from './streaming.js'; -import OpenAI from 'openai'; -import { ZodUnion, z } from 'zod'; +import type OpenAI from 'openai'; +import { type ZodUnion, z } from 'zod'; /** * Omitted content due to a flag from our content filters. @@ -145,7 +145,7 @@ export class OpenAIChatSchema< }); } - functions( + functions( functions: NewFunctions, ): OpenAIChatWithFunctionsSchema { return new OpenAIChatWithFunctionsSchema({ @@ -201,7 +201,7 @@ export class OpenAIChat< }); } - override functions( + override functions( functions: NewFunctions, ): OpenAIChatWithFunctions { return new OpenAIChatWithFunctions({ diff --git a/src/openai/chat/shared.ts b/src/openai/chat/shared.ts index d472939..83544ff 100644 --- a/src/openai/chat/shared.ts +++ b/src/openai/chat/shared.ts @@ -1,6 +1,6 @@ import type { FunctionProperties, OpenAIFunctionsTuple } from '../function.js'; import { openAIChatModelNames } from '../models.js'; -import { ZodDiscriminatedUnion, z } from 'zod'; +import { type ZodDiscriminatedUnion, z } from 'zod'; export type FunctionReturnTypesUnion = ZodDiscriminatedUnion< diff --git a/src/openai/chat/streaming-with-functions.ts b/src/openai/chat/streaming-with-functions.ts index 335b3eb..214f0b1 100644 --- a/src/openai/chat/streaming-with-functions.ts +++ b/src/openai/chat/streaming-with-functions.ts @@ -7,8 +7,8 @@ import { } from '../../chat.js'; import type { LimitedTupleWithUnion } from '../../type-utils.js'; -import OpenAI from 'openai'; -import { ZodUnion, z } from 'zod'; +import type OpenAI from 'openai'; +import { type ZodUnion, z } from 'zod'; import { readableFromAsyncIterable } from '../../utils.js'; import type { FunctionConfigsUnion, diff --git a/src/openai/chat/streaming.ts b/src/openai/chat/streaming.ts index 6c70c90..5bdf7c8 100644 --- a/src/openai/chat/streaming.ts +++ b/src/openai/chat/streaming.ts @@ -1,5 +1,5 @@ -import OpenAI from 'openai'; -import { ZodUnion, z } from 'zod'; +import type OpenAI from 'openai'; +import { type ZodUnion, z } from 'zod'; import { BaseHopfieldChat, type InferResult, @@ -178,7 +178,7 @@ export class OpenAIChatStreamingSchema< }); } - functions( + functions( functions: NewFunctions, ) { return new OpenAIChatWithFunctionsStreamingSchema({ @@ -267,7 +267,7 @@ export class OpenAIStreamingChat< return result; } - override functions( + override functions( functions: NewFunctions, ) { return new OpenAIChatWithFunctionsStreaming({ diff --git a/src/openai/embedding.integration.ts b/src/openai/embedding.integration.ts index be53c5f..09b759a 100644 --- a/src/openai/embedding.integration.ts +++ b/src/openai/embedding.integration.ts @@ -15,7 +15,7 @@ test( input: ['hopfield'], }); - expect(response.model).toMatchInlineSnapshot('"text-embedding-ada-002-v2"'); + expect(response.model).toMatchInlineSnapshot('"text-embedding-ada-002"'); expect(response.data[0].embedding[0]).toBeCloseTo(-0.0073666335, 2); expect(response.data[0].embedding[1535]).toBeCloseTo(-0.0013278616, 2); }, @@ -29,7 +29,7 @@ test( input: ['ready', 'set', 'hopfield'], }); - expect(response.model).toMatchInlineSnapshot('"text-embedding-ada-002-v2"'); + expect(response.model).toMatchInlineSnapshot('"text-embedding-ada-002"'); expect(response.data[0].embedding[0]).toBeCloseTo(-0.009482461, 2); expect(response.data[2].embedding[0]).toBeCloseTo(-0.0073666335, 2); }, diff --git a/src/openai/embedding.ts b/src/openai/embedding.ts index 128197b..eb43969 100644 --- a/src/openai/embedding.ts +++ b/src/openai/embedding.ts @@ -5,8 +5,8 @@ import { type OpenAIEmbeddingModelName, defaultOpenAIEmbeddingModelName, } from './models.js'; -import OpenAI from 'openai'; -import { ZodArray, ZodNumber, ZodString, z } from 'zod'; +import type OpenAI from 'openai'; +import { type ZodArray, type ZodNumber, type ZodString, z } from 'zod'; export interface EmbeddingLengths extends Record { @@ -132,7 +132,7 @@ export class OpenAIEmbedding< async get( input: Omit, 'model'>, - ): Promise { + ): Promise<(typeof this.returnType)['_output']> { const parsedInput = await this.parameters.parseAsync(input); const response = await this.provider.embeddings.create( diff --git a/src/openai/function.test.ts b/src/openai/function.test.ts index c0998af..437bc95 100644 --- a/src/openai/function.test.ts +++ b/src/openai/function.test.ts @@ -8,7 +8,6 @@ import * as Exports from './function.js'; import hop from '../index.js'; import openai from './index.js'; -import { z } from 'zod'; it('should expose correct exports', () => { expect(Object.keys(Exports)).toMatchInlineSnapshot(` @@ -86,76 +85,6 @@ describe.concurrent('test functions', () => { `); }); - test('should fail with no enum description', () => { - expect( - () => - hop.client(openai).function({ - ...weatherFunctionParams, - parameters: z.object({ - location: z - .string() - .describe('The city and state, e.g. San Francisco, CA'), - unit: z.enum(['celsius', 'fahrenheit']), - }), - }).jsonSchema, - ).toThrowErrorMatchingInlineSnapshot(` - "You must define a description for the type: ZodEnum - - Docs: https://hopfield.ai/chat/functions - Details: There must be a description provided for ZodEnum, to describe what the function does for the LLM to infer a value. - Version: hopfield@x.y.z" - `); - }); - - test('should fail with no string description', () => { - expect( - () => - hop.client(openai).function({ - ...weatherFunctionParams, - parameters: z.object({ - location: z.string(), - unit: z - .enum(['celsius', 'fahrenheit']) - .describe( - hop - .client(openai) - .template() - .enum('The unit for the temperature.'), - ), - }), - }).jsonSchema, - ).toThrowErrorMatchingInlineSnapshot(` - "You must define a description for the type: ZodString - - Docs: https://hopfield.ai/chat/functions - Details: There must be a description provided for ZodString, to describe what the function does for the LLM to infer a value. - Version: hopfield@x.y.z" - `); - }); - - test('should fail with no enum templated description', () => { - expect( - () => - hop.client(openai).function({ - ...weatherFunctionParams, - parameters: z.object({ - location: z - .string() - .describe('The city and state, e.g. San Francisco, CA'), - unit: z - .enum(['celsius', 'fahrenheit']) - .describe('The unit for the temperature.'), - }), - }).jsonSchema, - ).toThrowErrorMatchingInlineSnapshot(` - "You should template your descriptions. - - Docs: https://hopfield.ai/chat/functions - Details: It's recommended to template your descriptions - we recommend ending the type ZodEnum with \\" This must always be a possible value from the \`enum\` array.\\". - Version: hopfield@x.y.z" - `); - }); - test('should fail with an invalid function name', () => { expect( () => diff --git a/src/openai/function.ts b/src/openai/function.ts index 5fb13f1..3396f0c 100644 --- a/src/openai/function.ts +++ b/src/openai/function.ts @@ -1,16 +1,11 @@ -import { - BaseHopfieldFunction, - type DisabledTypes, - type HopfieldFunctionOptions, -} from '../function.js'; +import { BaseHopfieldFunction } from '../function.js'; import type { TypeTemplates } from '../template.js'; import { - ZodArray, - ZodDefault, - ZodFirstPartyTypeKind, - ZodType, - ZodUnion, + type ZodArray, + type ZodDefault, + type ZodType, + type ZodUnion, z, } from 'zod'; import { @@ -24,12 +19,6 @@ export type OpenAIFunctionsTuple = [ ...OpenAIFunctionSchema[], ]; -const disallowedTypes = [ - ZodFirstPartyTypeKind.ZodAny, - ZodFirstPartyTypeKind.ZodBigInt, - ZodFirstPartyTypeKind.ZodTuple, -] as const satisfies readonly ZodFirstPartyTypeKind[]; - const openAITypeTemplates = { ZodEnum: (description: D) => `${description} This must always be a possible value from the \`enum\` array.` as const, @@ -61,30 +50,24 @@ export type OpenAIFunctionProps< FName extends string, FDescription extends string, FParams extends ZodType, - DTypes extends DisabledTypes, > = { name: FName; description: FDescription; parameters: FParams; - options?: HopfieldFunctionOptions; }; export class OpenAIFunction< FName extends string, FDescription extends string, FParams extends ZodType, - DTypes extends DisabledTypes = typeof disallowedTypes, > extends BaseHopfieldFunction< FName, FDescription, FParams, - DTypes, DefaultOpenAITypeTemplates, OpenAIChatTemplate > { - constructor( - props: OpenAIFunctionProps, - ) { + constructor(props: OpenAIFunctionProps) { super({ ...props, template: new OpenAIChatTemplate({ @@ -113,17 +96,12 @@ export class OpenAIFunction< return openAITypeTemplates; } - protected get _defaultDisabledTypes() { - return disallowedTypes; - } - static function< FName extends string, FDescription extends string, FParams extends ZodType, - DTypes extends DisabledTypes, - >(opts: OpenAIFunctionProps) { - return new OpenAIFunction(opts); + >(opts: OpenAIFunctionProps) { + return new OpenAIFunction(opts); } } diff --git a/src/openai/models.ts b/src/openai/models.ts index 80cf5c6..9586959 100644 --- a/src/openai/models.ts +++ b/src/openai/models.ts @@ -1,13 +1,25 @@ export const openAIChatModelNames = [ - 'gpt-4-0314' /** @deprecated Legacy model, to be discontinued Jun 13, 2024 */, + 'gpt-4o', + 'gpt-4o-2024-05-13', + 'gpt-4-turbo', + 'gpt-4-turbo-2024-04-09', + 'gpt-4-0125-preview', + 'gpt-4-turbo-preview', + 'gpt-4-1106-preview', + 'gpt-4-vision-preview', + 'gpt-4', + 'gpt-4-0314', 'gpt-4-0613', - 'gpt-4-32k-0314' /** @deprecated Legacy model, to be discontinued Jun 13, 2024 */, + 'gpt-4-32k', + 'gpt-4-32k-0314', 'gpt-4-32k-0613', - 'gpt-3.5-turbo-0301' /** @deprecated Legacy model, to be discontinued Jun 13, 2024 */, - 'gpt-3.5-turbo-0613' /** @deprecated Legacy model, will be replaced by gpt-3.5-turbo-1106 on Dec 11, 2023 */, + 'gpt-3.5-turbo', + 'gpt-3.5-turbo-16k', + 'gpt-3.5-turbo-0301', + 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-1106', - 'gpt-3.5-turbo-16k-0613' /** @deprecated Will be replaced by gpt-3.5-turbo-1106 on Dec 11, 2023 */, - 'gpt-4-1106-preview', + 'gpt-3.5-turbo-0125', + 'gpt-3.5-turbo-16k-0613', ] as const; /** @@ -15,7 +27,7 @@ export const openAIChatModelNames = [ * [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table * for details on which models work with the Chat API. */ -export type OpenAIChatModelName = typeof openAIChatModelNames[number]; +export type OpenAIChatModelName = (typeof openAIChatModelNames)[number]; export const openAIChatModelNamesWithFunctionCalling = [ 'gpt-3.5-turbo-1106', @@ -33,7 +45,7 @@ export type DefaultOpenAIChatModelName = typeof defaultOpenAIChatModelName; * for details on which models work with function calling. */ export type OpenAIModelNameWithFunctionCalling = - typeof openAIChatModelNamesWithFunctionCalling[number]; + (typeof openAIChatModelNamesWithFunctionCalling)[number]; export const openAIEmbeddingModelNames = ['text-embedding-ada-002'] as const; @@ -41,7 +53,8 @@ export const openAIEmbeddingModelNames = ['text-embedding-ada-002'] as const; * IDs of the embedding models which support function calling. See the * [embeddings](https://platform.openai.com/docs/models/embeddings) docs for more details. */ -export type OpenAIEmbeddingModelName = typeof openAIEmbeddingModelNames[number]; +export type OpenAIEmbeddingModelName = + (typeof openAIEmbeddingModelNames)[number]; export const defaultOpenAIEmbeddingModelName = openAIEmbeddingModelNames[0]; export type DefaultOpenAIEmbeddingModelName = diff --git a/src/openai/provider.ts b/src/openai/provider.ts index 7b12033..1b2234f 100644 --- a/src/openai/provider.ts +++ b/src/openai/provider.ts @@ -8,7 +8,6 @@ import { defaultEmbeddingCount, type DefaultEmbeddingCount, } from '../embedding.js'; -import type { DisabledTypes } from '../function.js'; import { BaseHopfield } from '../provider.js'; import type { TypeTemplates } from '../template.js'; import { OpenAIChat, OpenAIChatSchema } from './chat/non-streaming.js'; @@ -80,9 +79,8 @@ export class OpenAIHopfield< FName extends string, FDescription extends string, FParams extends ZodType, - DTypes extends DisabledTypes, // = typeof disallowedTypes, - >(opts: OpenAIFunctionProps) { - return new OpenAIFunction(opts); + >(opts: OpenAIFunctionProps) { + return new OpenAIFunction(opts); } override template(): OpenAIChatTemplate; diff --git a/src/openai/template.ts b/src/openai/template.ts index 1474763..db6491c 100644 --- a/src/openai/template.ts +++ b/src/openai/template.ts @@ -8,7 +8,7 @@ export const defaultOpenAITypeTemplates = { export type DefaultOpenAITypeTemplates = typeof defaultOpenAITypeTemplates; -export type OpenAIChatTemplateProps = { +export type OpenAIChatTemplateProps = { templates: TTemplates; }; @@ -25,7 +25,7 @@ export class OpenAIChatTemplate< return defaultOpenAITypeTemplates.ZodEnum(description); } - static template( + static template( opts: OpenAIChatTemplateProps, ) { return new OpenAIChatTemplate(opts); diff --git a/src/package.json b/src/package.json index f6df459..5062faf 100644 --- a/src/package.json +++ b/src/package.json @@ -1,12 +1,12 @@ { "name": "hopfield", "version": "0.3.5", - "repository": "propology/hopfield", + "repository": "EnjoinHQ/hopfield", "main": "./_cjs/index.js", "module": "./_esm/index.js", "peerDependencies": { - "openai": ">=4.0.0", - "typescript": ">=5.1.3" + "openai": "^4.50.0", + "typescript": "^5.4.5" }, "exports": { ".": { @@ -32,13 +32,7 @@ "!**/*.bench.ts", "!tsconfig.build.json" ], - "keywords": [ - "ai", - "llm", - "openai", - "gpt", - "ai-tools" - ], + "keywords": ["ai", "llm", "openai", "gpt", "ai-tools"], "license": "MIT", "peerDependenciesMeta": { "openai": { @@ -53,14 +47,12 @@ "types": "./_types/index.d.ts", "typesVersions": { "*": { - "openai": [ - "./_types/openai/index.d.ts" - ] + "openai": ["./_types/openai/index.d.ts"] } }, "typings": "./_types/index.d.ts", "dependencies": { - "zod": "^3.22.4", - "zod-to-json-schema": "^3.21.4" + "zod": "^3.23.8", + "zod-to-json-schema": "^3.23.0" } } diff --git a/src/provider.ts b/src/provider.ts index afa2034..c04434f 100644 --- a/src/provider.ts +++ b/src/provider.ts @@ -8,8 +8,6 @@ export abstract class BaseHopfield { abstract chat(opts?: any): BaseHopfieldChat; abstract embedding(opts?: any): BaseHopfieldEmbedding; - abstract function( - opts?: any, - ): BaseHopfieldFunction; + abstract function(opts?: any): BaseHopfieldFunction; abstract template(opts?: any): BaseHopfieldChatTemplate; } diff --git a/src/template.ts b/src/template.ts index a7c3b9c..f65132b 100644 --- a/src/template.ts +++ b/src/template.ts @@ -7,7 +7,7 @@ export type TypeTemplates = > | false; -export type BaseHopfieldChatTemplateProps = { +export type BaseHopfieldChatTemplateProps = { /** * Allows you to specify custom templates to use for different Zod types. */ diff --git a/src/type-utils.ts b/src/type-utils.ts index a4678bb..5d9225a 100644 --- a/src/type-utils.ts +++ b/src/type-utils.ts @@ -1,4 +1,4 @@ -import { type ZodLiteral, ZodNumber } from 'zod'; +import type { ZodLiteral, ZodNumber } from 'zod'; /** * Prints custom error message @@ -138,44 +138,139 @@ export type IsEmptyArray = T extends [infer _X, ...infer _Rest] export type LimitedTuple = N extends 1 ? [T] : N extends 2 - ? [T, T] - : N extends 3 - ? [T, T, T] - : N extends 4 - ? [T, T, T, T] - : N extends 5 - ? [T, T, T, T, T] - : N extends 6 - ? [T, T, T, T, T, T] - : N extends 7 - ? [T, T, T, T, T, T, T] - : N extends 8 - ? [T, T, T, T, T, T, T, T] - : N extends 9 - ? [T, T, T, T, T, T, T, T, T] - : N extends 10 - ? [T, T, T, T, T, T, T, T, T, T] - : N extends 11 - ? [T, T, T, T, T, T, T, T, T, T, T] - : N extends 12 - ? [T, T, T, T, T, T, T, T, T, T, T, T] - : N extends 13 - ? [T, T, T, T, T, T, T, T, T, T, T, T, T] - : N extends 14 - ? [T, T, T, T, T, T, T, T, T, T, T, T, T, T] - : N extends 15 - ? [T, T, T, T, T, T, T, T, T, T, T, T, T, T, T] - : N extends 16 - ? [T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T] - : N extends 17 - ? [T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T] - : N extends 18 - ? [T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T] - : N extends 19 - ? [T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T] - : N extends 20 - ? [T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T] - : never; + ? [T, T] + : N extends 3 + ? [T, T, T] + : N extends 4 + ? [T, T, T, T] + : N extends 5 + ? [T, T, T, T, T] + : N extends 6 + ? [T, T, T, T, T, T] + : N extends 7 + ? [T, T, T, T, T, T, T] + : N extends 8 + ? [T, T, T, T, T, T, T, T] + : N extends 9 + ? [T, T, T, T, T, T, T, T, T] + : N extends 10 + ? [T, T, T, T, T, T, T, T, T, T] + : N extends 11 + ? [T, T, T, T, T, T, T, T, T, T, T] + : N extends 12 + ? [T, T, T, T, T, T, T, T, T, T, T, T] + : N extends 13 + ? [T, T, T, T, T, T, T, T, T, T, T, T, T] + : N extends 14 + ? [T, T, T, T, T, T, T, T, T, T, T, T, T, T] + : N extends 15 + ? [T, T, T, T, T, T, T, T, T, T, T, T, T, T, T] + : N extends 16 + ? [ + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + ] + : N extends 17 + ? [ + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + ] + : N extends 18 + ? [ + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + ] + : N extends 19 + ? [ + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + ] + : N extends 20 + ? [ + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + T, + ] + : never; type ZL = ZodLiteral; @@ -186,206 +281,227 @@ type ZL = ZodLiteral; export type LimitedTupleWithUnion = N extends 1 ? [ZL<0>] : N extends 2 - ? [ZL<0>, ZL<1>] - : N extends 3 - ? [ZL<0>, ZL<1>, ZL<2>] - : N extends 4 - ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>] - : N extends 5 - ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>, ZL<4>] - : N extends 6 - ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>, ZL<4>, ZL<5>] - : N extends 7 - ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>, ZL<4>, ZL<5>, ZL<6>] - : N extends 8 - ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>, ZL<4>, ZL<5>, ZL<6>, ZL<7>] - : N extends 9 - ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>, ZL<4>, ZL<5>, ZL<6>, ZL<7>, ZL<8>] - : N extends 10 - ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>, ZL<4>, ZL<5>, ZL<6>, ZL<7>, ZL<8>, ZL<9>] - : N extends 11 - ? [ - ZL<0>, - ZL<1>, - ZL<2>, - ZL<3>, - ZL<4>, - ZL<5>, - ZL<6>, - ZL<7>, - ZL<8>, - ZL<9>, - ZL<10>, - ] - : N extends 12 - ? [ - ZL<0>, - ZL<1>, - ZL<2>, - ZL<3>, - ZL<4>, - ZL<5>, - ZL<6>, - ZL<7>, - ZL<8>, - ZL<9>, - ZL<10>, - ZL<11>, - ] - : N extends 13 - ? [ - ZL<0>, - ZL<1>, - ZL<2>, - ZL<3>, - ZL<4>, - ZL<5>, - ZL<6>, - ZL<7>, - ZL<8>, - ZL<9>, - ZL<10>, - ZL<11>, - ZL<12>, - ] - : N extends 14 - ? [ - ZL<0>, - ZL<1>, - ZL<2>, - ZL<3>, - ZL<4>, - ZL<5>, - ZL<6>, - ZL<7>, - ZL<8>, - ZL<9>, - ZL<10>, - ZL<11>, - ZL<12>, - ZL<13>, - ] - : N extends 15 - ? [ - ZL<0>, - ZL<1>, - ZL<2>, - ZL<3>, - ZL<4>, - ZL<5>, - ZL<6>, - ZL<7>, - ZL<8>, - ZL<9>, - ZL<10>, - ZL<11>, - ZL<12>, - ZL<13>, - ZL<14>, - ] - : N extends 16 - ? [ - ZL<0>, - ZL<1>, - ZL<2>, - ZL<3>, - ZL<4>, - ZL<5>, - ZL<6>, - ZL<7>, - ZL<8>, - ZL<9>, - ZL<10>, - ZL<11>, - ZL<12>, - ZL<13>, - ZL<14>, - ZL<15>, - ] - : N extends 17 - ? [ - ZL<0>, - ZL<1>, - ZL<2>, - ZL<3>, - ZL<4>, - ZL<5>, - ZL<6>, - ZL<7>, - ZL<8>, - ZL<9>, - ZL<10>, - ZL<11>, - ZL<12>, - ZL<13>, - ZL<14>, - ZL<15>, - ZL<16>, - ] - : N extends 18 - ? [ - ZL<0>, - ZL<1>, - ZL<2>, - ZL<3>, - ZL<4>, - ZL<5>, - ZL<6>, - ZL<7>, - ZL<8>, - ZL<9>, - ZL<10>, - ZL<11>, - ZL<12>, - ZL<13>, - ZL<14>, - ZL<15>, - ZL<16>, - ZL<17>, - ] - : N extends 19 - ? [ - ZL<0>, - ZL<1>, - ZL<2>, - ZL<3>, - ZL<4>, - ZL<5>, - ZL<6>, - ZL<7>, - ZL<8>, - ZL<9>, - ZL<10>, - ZL<11>, - ZL<12>, - ZL<13>, - ZL<14>, - ZL<15>, - ZL<16>, - ZL<17>, - ZL<18>, - ] - : N extends 20 - ? [ - ZL<0>, - ZL<1>, - ZL<2>, - ZL<3>, - ZL<4>, - ZL<5>, - ZL<6>, - ZL<7>, - ZL<8>, - ZL<9>, - ZL<10>, - ZL<11>, - ZL<12>, - ZL<13>, - ZL<14>, - ZL<15>, - ZL<16>, - ZL<17>, - ZL<18>, - ZL<19>, - ] - : never; + ? [ZL<0>, ZL<1>] + : N extends 3 + ? [ZL<0>, ZL<1>, ZL<2>] + : N extends 4 + ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>] + : N extends 5 + ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>, ZL<4>] + : N extends 6 + ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>, ZL<4>, ZL<5>] + : N extends 7 + ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>, ZL<4>, ZL<5>, ZL<6>] + : N extends 8 + ? [ZL<0>, ZL<1>, ZL<2>, ZL<3>, ZL<4>, ZL<5>, ZL<6>, ZL<7>] + : N extends 9 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ] + : N extends 10 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ZL<9>, + ] + : N extends 11 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ZL<9>, + ZL<10>, + ] + : N extends 12 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ZL<9>, + ZL<10>, + ZL<11>, + ] + : N extends 13 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ZL<9>, + ZL<10>, + ZL<11>, + ZL<12>, + ] + : N extends 14 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ZL<9>, + ZL<10>, + ZL<11>, + ZL<12>, + ZL<13>, + ] + : N extends 15 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ZL<9>, + ZL<10>, + ZL<11>, + ZL<12>, + ZL<13>, + ZL<14>, + ] + : N extends 16 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ZL<9>, + ZL<10>, + ZL<11>, + ZL<12>, + ZL<13>, + ZL<14>, + ZL<15>, + ] + : N extends 17 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ZL<9>, + ZL<10>, + ZL<11>, + ZL<12>, + ZL<13>, + ZL<14>, + ZL<15>, + ZL<16>, + ] + : N extends 18 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ZL<9>, + ZL<10>, + ZL<11>, + ZL<12>, + ZL<13>, + ZL<14>, + ZL<15>, + ZL<16>, + ZL<17>, + ] + : N extends 19 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ZL<9>, + ZL<10>, + ZL<11>, + ZL<12>, + ZL<13>, + ZL<14>, + ZL<15>, + ZL<16>, + ZL<17>, + ZL<18>, + ] + : N extends 20 + ? [ + ZL<0>, + ZL<1>, + ZL<2>, + ZL<3>, + ZL<4>, + ZL<5>, + ZL<6>, + ZL<7>, + ZL<8>, + ZL<9>, + ZL<10>, + ZL<11>, + ZL<12>, + ZL<13>, + ZL<14>, + ZL<15>, + ZL<16>, + ZL<17>, + ZL<18>, + ZL<19>, + ] + : never; diff --git a/src/utils.ts b/src/utils.ts index ff2c638..1b5791b 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -3,8 +3,8 @@ import type { StringifiedArray, StringifiedObject } from './types.js'; type JsonStringifyReturn = T extends object[] ? StringifiedArray : T extends object - ? StringifiedObject - : never; + ? StringifiedObject + : never; export function jsonStringify( value: T,