From 80356338749618620cefdd1630120a570f5f1f62 Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Sun, 19 Feb 2023 00:03:29 +0000 Subject: [PATCH] Add CI job (and make it green) (#42) * Add CI job * Add ci script * Run format in examples package * Format docs * Fix eslint config in langchain * Add ci command * Separate unit and integration tests * Fix ci command * Make path relative, adjust node version * Add os matrix * Set eol to lf * Do not cache CI command * Try to fix loadFromHub on windows * Update CONTRIBUTING for unit/int tests --- .editorconfig | 6 ++ .gitattributes | 1 + .github/workflows/ci.yml | 32 +++++++ .github/workflows/deploy.yml | 2 +- .prettierrc | 3 + .vscode/settings.json | 5 + CONTRIBUTING.md | 18 ++-- README.md | 3 +- docs/.eslintrc.js | 45 ++++----- docs/.prettierignore | 1 + docs/.prettierrc | 9 -- docs/babel.config.js | 2 +- docs/docs/getting-started.md | 80 +++++++--------- docs/docs/modules/agents/load_from_hub.md | 15 +-- docs/docs/modules/agents/overview.md | 12 ++- docs/docs/modules/chains/chat_vector_db_qa.md | 27 +++--- .../docs/modules/chains/conversation_chain.md | 4 +- docs/docs/modules/chains/llm_chain.md | 16 ++-- docs/docs/modules/chains/load_from_hub.md | 2 +- .../docs/modules/chains/question_answering.md | 10 +- docs/docs/modules/chains/vector_db_qa.md | 21 ++--- docs/docs/modules/indexes/document.md | 16 ++-- docs/docs/modules/indexes/embeddings.md | 3 +- docs/docs/modules/indexes/text_splitter.md | 47 ++++++---- docs/docs/modules/indexes/vectorstore.md | 6 +- docs/docs/modules/llms/cohere.md | 8 +- docs/docs/modules/llms/openai.md | 8 +- docs/docs/modules/memory/buffer_memory.md | 2 +- .../docs/modules/prompts/few_shot_examples.md | 49 +++++----- docs/docs/modules/prompts/prompt_template.md | 13 +-- docs/docs/overview.md | 19 ++-- docs/docusaurus.config.js | 94 ++++++++++--------- docs/package.json | 6 +- docs/sidebars.js | 38 ++++---- examples/package.json | 7 +- examples/src/agents/load_from_hub.ts | 5 +- examples/src/agents/mrkl.ts | 9 +- examples/src/chains/chat_vector_db.ts | 18 ++-- examples/src/chains/llm_chain.ts | 14 +-- examples/src/chains/load_from_hub.ts | 6 +- examples/src/chains/question_answering.ts | 10 +- examples/src/chains/vector_db_qa.ts | 18 ++-- examples/src/indexes/embeddings.ts | 17 ++-- .../src/indexes/recursive_text_splitter.ts | 13 ++- examples/src/indexes/text_splitter.ts | 16 +++- examples/src/indexes/vectorstores.ts | 10 +- examples/src/llm.ts | 8 +- examples/src/memory.ts | 16 ++-- examples/src/prompts/few_shot.ts | 64 ++++++------- examples/src/prompts/load_from_hub.ts | 6 +- examples/src/prompts/prompts.ts | 8 +- langchain/.eslintrc.js | 2 +- langchain/.prettierignore | 2 + .../{agent.test.ts => agent.int.test.ts} | 0 .../tests/{load.test.ts => load.int.test.ts} | 0 ...ts => chat_vector_db_qa_chain.int.test.ts} | 0 ...test.ts => combine_docs_chain.int.test.ts} | 0 ...lm_chain.test.ts => llm_chain.int.test.ts} | 0 ...test.ts => vector_db_qa_chain.int.test.ts} | 0 langchain/create-entrypoints.js | 5 +- langchain/embeddings/fake.ts | 11 +++ .../{openai.test.ts => openai.int.test.ts} | 0 langchain/jest.config.js | 2 +- .../{cohere.test.ts => cohere.int.test.ts} | 0 .../{openai.test.ts => openai.int.test.ts} | 0 langchain/package.json | 8 +- langchain/tsconfig.json | 1 + langchain/util/hub.ts | 9 +- .../vectorstores/tests/hnswlib.int.test.ts | 65 +++++++++++++ langchain/vectorstores/tests/hnswlib.test.ts | 20 ++-- package.json | 7 +- turbo.json | 26 +++-- 72 files changed, 597 insertions(+), 429 deletions(-) create mode 100644 .editorconfig create mode 100644 .gitattributes create mode 100644 .github/workflows/ci.yml create mode 100644 .prettierrc create mode 100644 .vscode/settings.json delete mode 100644 docs/.prettierrc create mode 100644 langchain/.prettierignore rename langchain/agents/tests/{agent.test.ts => agent.int.test.ts} (100%) rename langchain/chains/question_answering/tests/{load.test.ts => load.int.test.ts} (100%) rename langchain/chains/tests/{chat_vector_db_qa_chain.test.ts => chat_vector_db_qa_chain.int.test.ts} (100%) rename langchain/chains/tests/{combine_docs_chain.test.ts => combine_docs_chain.int.test.ts} (100%) rename langchain/chains/tests/{llm_chain.test.ts => llm_chain.int.test.ts} (100%) rename langchain/chains/tests/{vector_db_qa_chain.test.ts => vector_db_qa_chain.int.test.ts} (100%) create mode 100644 langchain/embeddings/fake.ts rename langchain/embeddings/tests/{openai.test.ts => openai.int.test.ts} (100%) rename langchain/llms/tests/{cohere.test.ts => cohere.int.test.ts} (100%) rename langchain/llms/tests/{openai.test.ts => openai.int.test.ts} (100%) create mode 100644 langchain/vectorstores/tests/hnswlib.int.test.ts diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000000..57e782e37dfb --- /dev/null +++ b/.editorconfig @@ -0,0 +1,6 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf \ No newline at end of file diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000000..94f480de94e1 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000000..65517b95982b --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,32 @@ +# This workflow will do a clean installation of node dependencies, cache/restore them, build the source code and run tests across different versions of node +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-nodejs + +name: Node.js CI + +on: + push: + branches: ["main"] + pull_request: + branches: ["main"] + +jobs: + build: + strategy: + matrix: + os: [macos-latest, windows-latest, ubuntu-latest] + node-version: [16.x, 18.x, 19.x] + # See supported Node.js release schedule at https://nodejs.org/en/about/releases/ + + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v3 + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v3 + with: + node-version: ${{ matrix.node-version }} + cache: "yarn" + - name: Install dependencies + run: yarn install --immutable + - run: yarn run ci + - run: yarn workspace langchain run test diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 2282e4ec9517..9d70507fb761 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -31,7 +31,7 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Install dependencies - run: yarn install --frozen-lockfile + run: yarn install --immutable - name: Build website run: yarn workspace docs build - name: Setup Pages diff --git a/.prettierrc b/.prettierrc new file mode 100644 index 000000000000..b84e3effb496 --- /dev/null +++ b/.prettierrc @@ -0,0 +1,3 @@ +{ + "endOfLine": "lf" +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000000..b47c8240bb04 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "yaml.schemas": { + "https://json.schemastore.org/github-workflow.json": "./.github/workflows/deploy.yml" + } +} \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fa85bbe40b9e..f261e2dde0c8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -47,6 +47,7 @@ good code into the codebase. ### 🏭Release process # TODO: + As of now, LangChain has an ad hoc release process: releases are cut with high frequency via by a developer and published to [npm](https://www.npmjs.com/package/langchain). @@ -68,26 +69,27 @@ with if you plan to contribute: - **[prettier](https://prettier.io/)** - enforcing standard code formatting - **[jest](https://jestjs.io/)** - testing code - **[TypeDoc](https://typedoc.org/)** - reference doc generation from - comments + comments - **[Docusaurus](https://docusaurus.io/)** - static site generation for documentation - Now, you should be able to run the common tasks in the following section. ## ✅Common Tasks ### Testing +Tests should be added within a `tests/` folder alongside the modules they +are testing. + **Unit tests** cover modular logic that does not require calls to outside APIs. If you add new logic, please add a unit test. +Unit tests should be called `*.test.ts`. **Integration tests** cover logic that requires making calls to outside APIs (often integration with other services). If you add support for a new external API, please add a new integration test. - -Tests should be added within a `tests/` folder alongside the modules they -are testing. +Integration tests should be called `*.int.test.ts`. To run tests, run: @@ -111,7 +113,7 @@ level of the repo. Langchain let's user import from multiple subpaths, e.g. ```ts -import { OpenAI } from "langchain/llms" +import { OpenAI } from "langchain/llms"; ``` In order to declare a new entrypoint that users can import from, you @@ -122,8 +124,8 @@ the following to the `entrypoints` variable: ```ts const entrypoints = { // ... - "tools": "agents/tools/index.ts", -} + tools: "agents/tools/index.ts", +}; ``` This will make sure the entrypoint is included in the published package, diff --git a/README.md b/README.md index 1d4cd00a83f7..43ea3a275b69 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Please fill out [this form](https://forms.gle/57d8AmXBYp8PP8tZA) and we'll set u `yarn add langchain` ```typescript -import { OpenAI } from 'langchain/llms'; +import { OpenAI } from "langchain/llms"; ``` ## 🤔 What is this? @@ -32,7 +32,6 @@ The [LangChainHub](https://github.com/hwchase17/langchain-hub) is a central plac For full documentation of prompts, chains, agents and more, please see [here](https://hwchase17.github.io/langchainjs/docs/overview). - ## 💁 Contributing As an open source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infra, or better documentation. diff --git a/docs/.eslintrc.js b/docs/.eslintrc.js index 8ccf3b17bdf7..617241c0b6c0 100644 --- a/docs/.eslintrc.js +++ b/docs/.eslintrc.js @@ -19,47 +19,48 @@ module.exports = { jest: true, node: true, }, - parser: '@babel/eslint-parser', + parser: "@babel/eslint-parser", parserOptions: { allowImportExportEverywhere: true, }, - extends: ['airbnb', 'prettier'], - plugins: ['react-hooks', 'header'], + extends: ["airbnb", "prettier"], + plugins: ["react-hooks", "header"], + ignorePatterns: ["build", "docs/api", "node_modules"], rules: { // Ignore certain webpack alias because it can't be resolved - 'import/no-unresolved': [ + "import/no-unresolved": [ ERROR, - {ignore: ['^@theme', '^@docusaurus', '^@generated']}, + { ignore: ["^@theme", "^@docusaurus", "^@generated"] }, ], - 'import/extensions': OFF, - 'header/header': [ + "import/extensions": OFF, + "header/header": [ ERROR, - 'block', + "block", [ - '*', - ' * Copyright (c) Meta Platforms, Inc. and affiliates.', - ' *', - ' * This source code is licensed under the MIT license found in the', - ' * LICENSE file in the root directory of this source tree.', - ' *', + "*", + " * Copyright (c) Meta Platforms, Inc. and affiliates.", + " *", + " * This source code is licensed under the MIT license found in the", + " * LICENSE file in the root directory of this source tree.", + " *", // Unfortunately eslint-plugin-header doesn't support optional lines. // If you want to enforce your website JS files to have @flow or @format, // modify these lines accordingly. { - pattern: '.* @format', + pattern: ".* @format", }, - ' ', + " ", ], ], - 'react/jsx-filename-extension': OFF, - 'react-hooks/rules-of-hooks': ERROR, - 'react/prop-types': OFF, // PropTypes aren't used much these days. - 'react/function-component-definition': [ + "react/jsx-filename-extension": OFF, + "react-hooks/rules-of-hooks": ERROR, + "react/prop-types": OFF, // PropTypes aren't used much these days. + "react/function-component-definition": [ WARNING, { - namedComponents: 'function-declaration', - unnamedComponents: 'arrow-function', + namedComponents: "function-declaration", + unnamedComponents: "arrow-function", }, ], }, diff --git a/docs/.prettierignore b/docs/.prettierignore index 9b744063823d..dc0831862d6b 100644 --- a/docs/.prettierignore +++ b/docs/.prettierignore @@ -1,3 +1,4 @@ node_modules build .docusaurus +docs/api \ No newline at end of file diff --git a/docs/.prettierrc b/docs/.prettierrc deleted file mode 100644 index 34cf084519c0..000000000000 --- a/docs/.prettierrc +++ /dev/null @@ -1,9 +0,0 @@ -{ - "arrowParens": "always", - "bracketSameLine": true, - "bracketSpacing": false, - "printWidth": 80, - "proseWrap": "never", - "singleQuote": true, - "trailingComma": "all" -} diff --git a/docs/babel.config.js b/docs/babel.config.js index 84ad45acbd0e..4ca11b80d66a 100644 --- a/docs/babel.config.js +++ b/docs/babel.config.js @@ -8,5 +8,5 @@ */ module.exports = { - presets: [require.resolve('@docusaurus/core/lib/babel/preset')], + presets: [require.resolve("@docusaurus/core/lib/babel/preset")], }; diff --git a/docs/docs/getting-started.md b/docs/docs/getting-started.md index 4d0920526c53..d44416acc792 100644 --- a/docs/docs/getting-started.md +++ b/docs/docs/getting-started.md @@ -1,6 +1,5 @@ # Quickstart Guide - This tutorial gives you a quick walkthrough about building an end-to-end language model application with LangChain. ## Installation @@ -11,7 +10,6 @@ To get started, install LangChain with the following command: npm i langchain ``` - ## Environment Setup Using LangChain will usually require integrations with one or more model providers, data stores, apis, etc. @@ -22,27 +20,21 @@ For this example, we will be using OpenAI's APIs, so we will first need to insta npm i openai ``` -We will then need to set the environment variable for the OpenAI key. -We can do this by setting the value in a `.env` file. +We will then need to set the environment variable for the OpenAI key. We can do this by setting the value in a `.env` file. ``` OPENAI_API_KEY="..." ``` - ## Building a Language Model Application Now that we have installed LangChain and set up our environment, we can start building our language model application. LangChain provides many modules that can be used to build language model applications. Modules can be combined to create more complex applications, or be used individually for simple applications. - - ### LLMs: Get predictions from a language model -The most basic building block of LangChain is calling an LLM on some input. -Let's walk through a simple example of how to do this. -For this purpose, let's pretend we are building a service that generates a company name based on what the company makes. +The most basic building block of LangChain is calling an LLM on some input. Let's walk through a simple example of how to do this. For this purpose, let's pretend we are building a service that generates a company name based on what the company makes. In order to do this, we first need to import the LLM wrapper. @@ -50,17 +42,18 @@ In order to do this, we first need to import the LLM wrapper. import { OpenAI } from "langchain"; ``` -We can then initialize the wrapper with any arguments. -In this example, we probably want the outputs to be MORE random, so we'll initialize it with a HIGH temperature. +We can then initialize the wrapper with any arguments. In this example, we probably want the outputs to be MORE random, so we'll initialize it with a HIGH temperature. ```typescript -const model = new OpenAI({temperature: 0.9}); +const model = new OpenAI({ temperature: 0.9 }); ``` We can now call it on some input! ```typescript -const res = await model.call("What would be a good company name a company that makes colorful socks?"); +const res = await model.call( + "What would be a good company name a company that makes colorful socks?" +); console.log(res); ``` @@ -68,17 +61,11 @@ console.log(res); { res: '\n\nFantasy Sockery' } ``` - - - ### Prompt Templates: Manage prompts for LLMs -Calling an LLM is a great first step, but it's just the beginning. -Normally when you use an LLM in an application, you are not sending user input directly to the LLM. -Instead, you are probably taking user input and constructing a prompt, and then sending that to the LLM. +Calling an LLM is a great first step, but it's just the beginning. Normally when you use an LLM in an application, you are not sending user input directly to the LLM. Instead, you are probably taking user input and constructing a prompt, and then sending that to the LLM. -For example, in the previous example, the text we passed in was hardcoded to ask for a name for a company that made colorful socks. -In this imaginary service, what we would want to do is take only the user input describing what the company does, and then format the prompt with that information. +For example, in the previous example, the text we passed in was hardcoded to ask for a name for a company that made colorful socks. In this imaginary service, what we would want to do is take only the user input describing what the company does, and then format the prompt with that information. This is easy to do with LangChain! @@ -86,14 +73,17 @@ First lets define the prompt template: ```typescript import { PromptTemplate } from "langchain/prompts"; -const template = "What is a good name for a company that makes {product}?" -const prompt = new PromptTemplate({template: template, inputVariables: ["product"]}); +const template = "What is a good name for a company that makes {product}?"; +const prompt = new PromptTemplate({ + template: template, + inputVariables: ["product"], +}); ``` Let's now see how this works! We can call the `.format` method to format it. ```typescript -const res = prompt.format({product: "colorful socks"}); +const res = prompt.format({ product: "colorful socks" }); console.log(res); ``` @@ -101,9 +91,6 @@ console.log(res); { res: 'What is a good name for a company that makes colorful socks?' } ``` - - - ### Chains: Combine LLMs and prompts in multi-step workflows Up until now, we've worked with the PromptTemplate and LLM primitives by themselves. But of course, a real application is not just one primitive, but rather a combination of them. @@ -117,24 +104,26 @@ Extending the previous example, we can construct an LLMChain which takes user in ```typescript import { OpenAI } from "langchain/llms"; import { PromptTemplate } from "langchain/prompts"; -const model = new OpenAI({temperature: 0.9}); -const template = "What is a good name for a company that makes {product}?" -const prompt = new PromptTemplate({template: template, inputVariables: ["product"]}); +const model = new OpenAI({ temperature: 0.9 }); +const template = "What is a good name for a company that makes {product}?"; +const prompt = new PromptTemplate({ + template: template, + inputVariables: ["product"], +}); ``` We can now create a very simple chain that will take user input, format the prompt with it, and then send it to the LLM: ```typescript -import {LLMChain} from "langchain/chains" - -const chain = new LLMChain({llm: model, prompt: prompt}) +import { LLMChain } from "langchain/chains"; +const chain = new LLMChain({ llm: model, prompt: prompt }); ``` Now we can run that chain only specifying the product! ```typescript -const res = await chain.call({product: "colorful socks"}); +const res = await chain.call({ product: "colorful socks" }); console.log(res); ``` @@ -142,10 +131,7 @@ console.log(res); { res: { text: '\n\nColorfulCo Sockery.' } } ``` -There we go! There's the first chain - an LLM Chain. -This is one of the simpler types of chains, but understanding how it works will set you up well for working with more complex chains. - - +There we go! There's the first chain - an LLM Chain. This is one of the simpler types of chains, but understanding how it works will set you up well for working with more complex chains. ### Agents: Dynamically call chains based on user input @@ -155,7 +141,6 @@ Agents no longer do: they use an LLM to determine which actions to take and in w When used correctly agents can be extremely powerful. In this tutorial, we show you how to easily use agents through the simplest, highest level API. - In order to load agents, you should understand the following concepts: - Tool: A function that performs a specific duty. This can be things like: Google Search, Database lookup, code REPL, other chains. The interface for a tool is currently a function that is expected to have a string as an input, with a string as an output. @@ -181,16 +166,19 @@ import { OpenAI } from "langchain"; import { initializeAgentExecutor } from "langchain/agents"; import { SerpAPI, Calculator } from "langchain/tools"; -const model = new OpenAI({temperature: 0}); +const model = new OpenAI({ temperature: 0 }); const tools = [new SerpAPI(), new Calculator()]; const executor = await initializeAgentExecutor( - tools, model, "zero-shot-react-description" + tools, + model, + "zero-shot-react-description" ); console.log("Loaded agent."); -const input = "Who is Olivia Wilde's boyfriend?" + -" What is his current age raised to the 0.23 power?"; +const input = + "Who is Olivia Wilde's boyfriend?" + + " What is his current age raised to the 0.23 power?"; console.log(`Executing with input "${input}"...`); const result = await executor.call({ input }); @@ -203,8 +191,6 @@ langchain-examples:start: Executing with input "Who is Olivia Wilde's boyfriend? langchain-examples:start: Got output Olivia Wilde's boyfriend is Jason Sudeikis, and his current age raised to the 0.23 power is 2.4242784855673896. ``` - - ### Memory: Add state to chains and agents So far, all the chains and agents we've gone through have been stateless. But often, you may want a chain or agent to have some concept of "memory" so that it may remember information about its previous interactions. The clearest and simple example of this is when designing a chatbot - you want it to remember previous messages so it can use context from that to have a better conversation. This would be a type of "short-term memory". On the more complex side, you could imagine a chain/agent remembering key pieces of information over time - this would be a form of "long-term memory". @@ -220,7 +206,7 @@ import { ConversationChain } from "langchain/chains"; const model = new OpenAI({}); const memory = new BufferMemory(); -const chain = new ConversationChain({ llm: model, memory: memory}); +const chain = new ConversationChain({ llm: model, memory: memory }); const res1 = await chain.call({ input: "Hi! I'm Jim." }); console.log(res1); ``` diff --git a/docs/docs/modules/agents/load_from_hub.md b/docs/docs/modules/agents/load_from_hub.md index cbb4694e1e0c..6afe8a6cfcf6 100644 --- a/docs/docs/modules/agents/load_from_hub.md +++ b/docs/docs/modules/agents/load_from_hub.md @@ -25,19 +25,20 @@ const model = new OpenAI(); const tools = [new SerpAPI(), new Calculator()]; const agent = await loadAgent( -"lc://agents/zero-shot-react-description/agent.json", -{ llm: model, tools } + "lc://agents/zero-shot-react-description/agent.json", + { llm: model, tools } ); console.log("Loaded agent from Langchain hub"); const executor = AgentExecutor.fromAgentAndTools({ -agent, -tools, -returnIntermediateSteps: true, + agent, + tools, + returnIntermediateSteps: true, }); -const input = "Who is Olivia Wilde's boyfriend?" + -" What is his current age raised to the 0.23 power?"; +const input = + "Who is Olivia Wilde's boyfriend?" + + " What is his current age raised to the 0.23 power?"; console.log(`Executing with input "${input}"...`); const result = await executor.call({ input }); diff --git a/docs/docs/modules/agents/overview.md b/docs/docs/modules/agents/overview.md index b5e7d8662431..8df8f95ef4f0 100644 --- a/docs/docs/modules/agents/overview.md +++ b/docs/docs/modules/agents/overview.md @@ -4,7 +4,6 @@ Agents use an LLM to determine which actions to take and in what order. An actio When used correctly agents can be extremely powerful. In this tutorial, we show you how to easily use agents through the simplest, highest level API. - In order to load agents, you should understand the following concepts: - Tool: A function that performs a specific duty. This can be things like: Google Search, Database lookup, code REPL, other chains. The interface for a tool is currently a function that is expected to have a string as an input, with a string as an output. @@ -30,16 +29,19 @@ import { OpenAI } from "langchain"; import { initializeAgentExecutor } from "langchain/agents"; import { SerpAPI, Calculator } from "langchain/tools"; -const model = new OpenAI({temperature: 0}); +const model = new OpenAI({ temperature: 0 }); const tools = [new SerpAPI(), new Calculator()]; const executor = await initializeAgentExecutor( - tools, model, "zero-shot-react-description" + tools, + model, + "zero-shot-react-description" ); console.log("Loaded agent."); -const input = "Who is Olivia Wilde's boyfriend?" + -" What is his current age raised to the 0.23 power?"; +const input = + "Who is Olivia Wilde's boyfriend?" + + " What is his current age raised to the 0.23 power?"; console.log(`Executing with input "${input}"...`); const result = await executor.call({ input }); diff --git a/docs/docs/modules/chains/chat_vector_db_qa.md b/docs/docs/modules/chains/chat_vector_db_qa.md index 1f7fdffea4da..ce2cb65ac647 100644 --- a/docs/docs/modules/chains/chat_vector_db_qa.md +++ b/docs/docs/modules/chains/chat_vector_db_qa.md @@ -1,9 +1,8 @@ # Chat Vector DB QA Chain -A Chat Vector DB QA chain takes as input a question and chat history. -It first combines the chat history and the question into a standalone question, then looks up relevant documents from the vector database, and then passes those documents and the question to a question answering chain to return a response. +A Chat Vector DB QA chain takes as input a question and chat history. It first combines the chat history and the question into a standalone question, then looks up relevant documents from the vector database, and then passes those documents and the question to a question answering chain to return a response. -To create one, you will need a vectorstore, which can be created from embeddings. +To create one, you will need a vectorstore, which can be created from embeddings. Below is an end-to-end example of doing question answering over a recent state of the union address. @@ -13,30 +12,28 @@ import { ChatVectorDBQAChain } from "langchain/chains"; import { HNSWLib } from "langchain/vectorstores"; import { OpenAIEmbeddings } from "langchain/embeddings"; import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; -import * as fs from 'fs'; - +import * as fs from "fs"; /* Initialize the LLM to use to answer the question */ const model = new OpenAI({}); /* Load in the file we want to do question answering over */ -const text = fs.readFileSync('state_of_the_union.txt','utf8'); +const text = fs.readFileSync("state_of_the_union.txt", "utf8"); /* Split the text into chunks */ -const textSplitter = new RecursiveCharacterTextSplitter({chunkSize: 1000}); +const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 }); const docs = textSplitter.createDocuments([text]); /* Create the vectorstore */ -const vectorStore = await HNSWLib.fromDocuments( - docs, - new OpenAIEmbeddings() -); +const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()); /* Create the chain */ const chain = ChatVectorDBQAChain.fromLLM(model, vectorStore); /* Ask it a question */ -const question = "What did the president say about Justice Breyer?" +const question = "What did the president say about Justice Breyer?"; const res = await chain.call({ question: question, chat_history: [] }); console.log(res); /* Ask it a follow up question */ -const chatHistory = question + res["text"] -const followUpRes = await chain.call({ question: "Was that nice?", chat_history: chatHistory }); +const chatHistory = question + res["text"]; +const followUpRes = await chain.call({ + question: "Was that nice?", + chat_history: chatHistory, +}); console.log(followUpRes); - ``` diff --git a/docs/docs/modules/chains/conversation_chain.md b/docs/docs/modules/chains/conversation_chain.md index 7a11b05bc737..204f5ad7c017 100644 --- a/docs/docs/modules/chains/conversation_chain.md +++ b/docs/docs/modules/chains/conversation_chain.md @@ -1,4 +1,4 @@ -# Conversation Chain +# Conversation Chain The conversation chain is prebuilt chain aimed at facilitating a chat bot experience. @@ -14,4 +14,4 @@ const res1 = await chain.call({ input: "Hi! I'm Jim." }); console.log({ res1 }); const res2 = await chain.call({ input: "What's my name?" }); console.log({ res2 }); -``` \ No newline at end of file +``` diff --git a/docs/docs/modules/chains/llm_chain.md b/docs/docs/modules/chains/llm_chain.md index e2494eb81981..6281d766dc8b 100644 --- a/docs/docs/modules/chains/llm_chain.md +++ b/docs/docs/modules/chains/llm_chain.md @@ -9,24 +9,26 @@ We can construct an LLMChain which takes user input, formats it with a PromptTem ```typescript import { OpenAI } from "langchain/llms"; import { PromptTemplate } from "langchain/prompts"; -const model = new OpenAI({temperature: 0.9}); -const template = "What is a good name for a company that makes {product}?" -const prompt = new PromptTemplate({template: template, inputVariables: ["product"]}); +const model = new OpenAI({ temperature: 0.9 }); +const template = "What is a good name for a company that makes {product}?"; +const prompt = new PromptTemplate({ + template: template, + inputVariables: ["product"], +}); ``` We can now create a very simple chain that will take user input, format the prompt with it, and then send it to the LLM: ```typescript -import {LLMChain} from "langchain/chains" - -const chain = new LLMChain({llm: model, prompt: prompt}) +import { LLMChain } from "langchain/chains"; +const chain = new LLMChain({ llm: model, prompt: prompt }); ``` Now we can run that chain only specifying the product! ```typescript -const res = await chain.call({product: "colorful socks"}); +const res = await chain.call({ product: "colorful socks" }); console.log({ res }); ``` diff --git a/docs/docs/modules/chains/load_from_hub.md b/docs/docs/modules/chains/load_from_hub.md index fd0a6537aa8b..768409174a31 100644 --- a/docs/docs/modules/chains/load_from_hub.md +++ b/docs/docs/modules/chains/load_from_hub.md @@ -6,6 +6,6 @@ import { loadChain } from "langchain/chains"; const chain = await loadChain("lc://chains/hello-world/chain.json"); -const res = chain.call({topic: "foo"}); +const res = chain.call({ topic: "foo" }); console.log(res); ``` diff --git a/docs/docs/modules/chains/question_answering.md b/docs/docs/modules/chains/question_answering.md index b968c51e342c..fdbf81e708cd 100644 --- a/docs/docs/modules/chains/question_answering.md +++ b/docs/docs/modules/chains/question_answering.md @@ -9,7 +9,13 @@ import { Document } from "langchain/document"; const model = new OpenAI({}); const chain = loadQAChain(model); -const docs = [ new Document({pageContent: 'harrison went to harvard' }), new Document({pageContent: 'ankush went to princeton' }), ]; -const res = await chain.call({ input_documents: docs, question: "Where did harrison go to college" }); +const docs = [ + new Document({ pageContent: "harrison went to harvard" }), + new Document({ pageContent: "ankush went to princeton" }), +]; +const res = await chain.call({ + input_documents: docs, + question: "Where did harrison go to college", +}); console.log({ res }); ``` diff --git a/docs/docs/modules/chains/vector_db_qa.md b/docs/docs/modules/chains/vector_db_qa.md index 17bb2245722e..25abe3a9bfb0 100644 --- a/docs/docs/modules/chains/vector_db_qa.md +++ b/docs/docs/modules/chains/vector_db_qa.md @@ -2,7 +2,7 @@ A Vector DB QA chain takes as input a question, looks up relevant documents from the vector database, and then passes those documents and the question to a question answering chain to return a response. -To create one, you will need a vectorstore, which can be created from embeddings. +To create one, you will need a vectorstore, which can be created from embeddings. Below is an end-to-end example of doing question answering over a recent state of the union address. @@ -12,23 +12,22 @@ import { VectorDBQAChain } from "langchain/chains"; import { HNSWLib } from "langchain/vectorstores"; import { OpenAIEmbeddings } from "langchain/embeddings"; import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; -import * as fs from 'fs'; +import * as fs from "fs"; /* Initialize the LLM to use to answer the question */ const model = new OpenAI({}); /* Load in the file we want to do question answering over */ -const text = fs.readFileSync('state_of_the_union.txt','utf8'); +const text = fs.readFileSync("state_of_the_union.txt", "utf8"); /* Split the text into chunks */ -const textSplitter = new RecursiveCharacterTextSplitter({chunkSize: 1000}) -const docs = textSplitter.createDocuments([text]) +const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 }); +const docs = textSplitter.createDocuments([text]); /* Create the vectorstore */ -const vectorStore = await HNSWLib.fromDocuments( - docs, - new OpenAIEmbeddings() -); +const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()); /* Create the chain */ const chain = VectorDBQAChain.fromLLM(model, vectorStore); /* Ask it a question */ -const res = await chain.call({ input_documents: docs, query: "What did the president say about Justice Breyer?" }); - +const res = await chain.call({ + input_documents: docs, + query: "What did the president say about Justice Breyer?", +}); ``` diff --git a/docs/docs/modules/indexes/document.md b/docs/docs/modules/indexes/document.md index 5e7f66041b22..d297e43954f7 100644 --- a/docs/docs/modules/indexes/document.md +++ b/docs/docs/modules/indexes/document.md @@ -1,23 +1,19 @@ # Documents -Language models only know information about what they were trained on. -In order to get them answer questions or summarize other information you have to pass it to the language model. -Therefor, it is very important to have a concept of a document. +Language models only know information about what they were trained on. In order to get them answer questions or summarize other information you have to pass it to the language model. Therefor, it is very important to have a concept of a document. -A document at its core is fairly simple. -It consists of a piece of text and optional metadata. -The piece of text is what we interact with the language model, while the optional metadata is useful for keeping track of metadata about the document (such as the source). +A document at its core is fairly simple. It consists of a piece of text and optional metadata. The piece of text is what we interact with the language model, while the optional metadata is useful for keeping track of metadata about the document (such as the source). You can create a document object rather easily in LangChain with: ```typescript -import { Document } from 'langchain/document'; -const doc = new Document({pageContent: 'foo' }) +import { Document } from "langchain/document"; +const doc = new Document({ pageContent: "foo" }); ``` You can create one with metadata with: ```typescript -import { Document } from 'langchain/document'; -const doc = new Document({pageContent: 'foo', metadata: { source: '1' }}) +import { Document } from "langchain/document"; +const doc = new Document({ pageContent: "foo", metadata: { source: "1" } }); ``` diff --git a/docs/docs/modules/indexes/embeddings.md b/docs/docs/modules/indexes/embeddings.md index 1104a164251b..d0585f897c96 100644 --- a/docs/docs/modules/indexes/embeddings.md +++ b/docs/docs/modules/indexes/embeddings.md @@ -1,7 +1,6 @@ # Embeddings -Embeddings can be used to create a numerical representation of textual data. -This numerical representation is useful because it can be used to find similar documents. +Embeddings can be used to create a numerical representation of textual data. This numerical representation is useful because it can be used to find similar documents. Below is an example of how to use the OpenAI embeddings. Embeddings occasionally have different embedding methods for queries versus documents, so the embedding class exposes a `embedQuery` and `embedDocuments` method. diff --git a/docs/docs/modules/indexes/text_splitter.md b/docs/docs/modules/indexes/text_splitter.md index 4e9a07c61b04..fc350f0a75cd 100644 --- a/docs/docs/modules/indexes/text_splitter.md +++ b/docs/docs/modules/indexes/text_splitter.md @@ -1,46 +1,53 @@ # Text Splitter -Language Models are often limited by the amount of text that you can pass to them. -Therefor, it is neccessary to split them up into smaller chunks. -LangChain provides several utilities for doing so. +Language Models are often limited by the amount of text that you can pass to them. Therefor, it is neccessary to split them up into smaller chunks. LangChain provides several utilities for doing so. -The recommended TextSplitter is the `RecursiveCharacterTextSplitter`. -This will split documents recursively by different characters - starting with `"\n\n"`, then `"\n"`, then `" "`. This is nice because it will try to keep all the semantically relevant content in the same place for as long as possible. +The recommended TextSplitter is the `RecursiveCharacterTextSplitter`. This will split documents recursively by different characters - starting with `"\n\n"`, then `"\n"`, then `" "`. This is nice because it will try to keep all the semantically relevant content in the same place for as long as possible. Important parameters to know here are `chunkSize` and `chunkOverlap`. `chunkSize` controls the max size (in terms of number of characters) of the final documents. `chunkOverlap` specifies how much overlap there should be between chunks. This is often helpful to make sure that the text isn't split weirdly. In the example below we set these values to be small (for illustration purposes), but in practice they default to `4000` and `200` respectively. ```typescript -import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter'; +import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f. This is a weird text to write, but gotta test the splittingggg some how.\n\n Bye!\n\n-H.`; -const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1 }); +const splitter = new RecursiveCharacterTextSplitter({ + chunkSize: 10, + chunkOverlap: 1, +}); const output = splitter.createDocuments([text]); ``` -You'll note that in the above example we are splitting a raw text string and getting back a list of documents. -We can also split documents directly. +You'll note that in the above example we are splitting a raw text string and getting back a list of documents. We can also split documents directly. ```typescript -import { Document } from 'langchain/document'; -import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter'; +import { Document } from "langchain/document"; +import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f. This is a weird text to write, but gotta test the splittingggg some how.\n\n Bye!\n\n-H.`; -const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1 }); -const docOutput = splitter.splitDocuments([new Document({pageContent: text})]); +const splitter = new RecursiveCharacterTextSplitter({ + chunkSize: 10, + chunkOverlap: 1, +}); +const docOutput = splitter.splitDocuments([ + new Document({ pageContent: text }), +]); ``` -Besides the `RecursiveCharacterTextSplitter`, there is also the more standard `CharacterTextSplitter`. -This splits only on one type of character (defaults to `"\n\n"`). You can use it in the exact same way. +Besides the `RecursiveCharacterTextSplitter`, there is also the more standard `CharacterTextSplitter`. This splits only on one type of character (defaults to `"\n\n"`). You can use it in the exact same way. ```typescript -import { Document } from 'langchain/document'; -import { CharacterTextSplitter } from 'langchain/text_splitter'; - -const text = 'foo bar baz 123'; -const splitter = new CharacterTextSplitter({separator: ' ', chunkSize: 7, chunkOverlap: 3 }); +import { Document } from "langchain/document"; +import { CharacterTextSplitter } from "langchain/text_splitter"; + +const text = "foo bar baz 123"; +const splitter = new CharacterTextSplitter({ + separator: " ", + chunkSize: 7, + chunkOverlap: 3, +}); const output = splitter.createDocuments([text]); ``` diff --git a/docs/docs/modules/indexes/vectorstore.md b/docs/docs/modules/indexes/vectorstore.md index 52258b76c7f5..ea7274fd33c5 100644 --- a/docs/docs/modules/indexes/vectorstore.md +++ b/docs/docs/modules/indexes/vectorstore.md @@ -7,9 +7,9 @@ import { HNSWLib } from "langchain/vectorstores"; import { OpenAIEmbeddings } from "langchain/embeddings"; const vectorStore = await HNSWLib.fromTexts( -["Hello world", "Bye bye", "hello nice world"], -[{ id: 2 }, { id: 1 }, { id: 3 }], -new OpenAIEmbeddings() + ["Hello world", "Bye bye", "hello nice world"], + [{ id: 2 }, { id: 1 }, { id: 3 }], + new OpenAIEmbeddings() ); const resultOne = await vectorStore.similaritySearch("hello world", 1); diff --git a/docs/docs/modules/llms/cohere.md b/docs/docs/modules/llms/cohere.md index 6f6237326bd4..a8788f3a0950 100644 --- a/docs/docs/modules/llms/cohere.md +++ b/docs/docs/modules/llms/cohere.md @@ -5,7 +5,9 @@ This guide goes through how to use the Cohere LLM wrapper. ```typescript import { Cohere } from "langchain/llms"; -const model = new Cohere({maxTokens: 20}); -const res = await model.call("What would be a good company name a company that makes colorful socks?"); +const model = new Cohere({ maxTokens: 20 }); +const res = await model.call( + "What would be a good company name a company that makes colorful socks?" +); console.log({ res }); -``` \ No newline at end of file +``` diff --git a/docs/docs/modules/llms/openai.md b/docs/docs/modules/llms/openai.md index 2f1da2526657..afc31be3ccde 100644 --- a/docs/docs/modules/llms/openai.md +++ b/docs/docs/modules/llms/openai.md @@ -5,7 +5,9 @@ This guide goes through how to use the OpenAI LLM wrapper. ```typescript import { OpenAI } from "langchain/llms"; -const model = new OpenAI({temperature: 0.9}); -const res = await model.call("What would be a good company name a company that makes colorful socks?"); +const model = new OpenAI({ temperature: 0.9 }); +const res = await model.call( + "What would be a good company name a company that makes colorful socks?" +); console.log({ res }); -``` \ No newline at end of file +``` diff --git a/docs/docs/modules/memory/buffer_memory.md b/docs/docs/modules/memory/buffer_memory.md index 832f1df99242..efac1314eb85 100644 --- a/docs/docs/modules/memory/buffer_memory.md +++ b/docs/docs/modules/memory/buffer_memory.md @@ -9,7 +9,7 @@ import { ConversationChain } from "langchain/chains"; const model = new OpenAI({}); const memory = new BufferMemory(); -const chain = new ConversationChain({ llm: model, memory: memory}); +const chain = new ConversationChain({ llm: model, memory: memory }); const res1 = await chain.call({ input: "Hi! I'm Jim." }); console.log({ res1 }); ``` diff --git a/docs/docs/modules/prompts/few_shot_examples.md b/docs/docs/modules/prompts/few_shot_examples.md index 2c496eba11f0..820398c77d22 100644 --- a/docs/docs/modules/prompts/few_shot_examples.md +++ b/docs/docs/modules/prompts/few_shot_examples.md @@ -11,35 +11,34 @@ import { FewShotPromptTemplate, PromptTemplate } from "langchain/prompts"; /* First, create the list of few shot examples. */ const examples = [ - {"word": "happy", "antonym": "sad"}, - {"word": "tall", "antonym": "short"}, -] + { word: "happy", antonym: "sad" }, + { word: "tall", antonym: "short" }, +]; /** Next, we specify the template to format the examples we have provided. -We use the `PromptTemplate` class for this. */ -const exampleFormatterTemplate = "Word: {word}\nAntonym: {antonym}\n" +We use the `PromptTemplate` class for this. */ +const exampleFormatterTemplate = "Word: {word}\nAntonym: {antonym}\n"; const examplePrompt = new PromptTemplate({ - inputVariables: ["word", "antonym"], - template: exampleFormatterTemplate, -}) + inputVariables: ["word", "antonym"], + template: exampleFormatterTemplate, +}); /*# Finally, we create the `FewShotPromptTemplate` object.*/ const fewShotPrompt = new FewShotPromptTemplate({ - /* These are the examples we want to insert into the prompt.*/ - examples: examples, - /*This is how we want to format the examples when we insert them into the prompt.*/ - examplePrompt: examplePrompt, - /*The prefix is some text that goes before the examples in the prompt. Usually, this consists of intructions.*/ - prefix:"Give the antonym of every input", - /*The suffix is some text that goes after the examples in the prompt. Usually, this is where the user input will go*/ - suffix:"Word: {input}\nAntonym:", - /*The input variables are the variables that the overall prompt expects.*/ - inputVariables: ["input"], - /*The example_separator is the string we will use to join the prefix, examples, and suffix together with.*/ - exampleSeparator:"\n\n", - /* The template format is the formatting method to use for the template. Should usually be f-string. */ - templateFormat: "f-string", -}) + /* These are the examples we want to insert into the prompt.*/ + examples: examples, + /*This is how we want to format the examples when we insert them into the prompt.*/ + examplePrompt: examplePrompt, + /*The prefix is some text that goes before the examples in the prompt. Usually, this consists of intructions.*/ + prefix: "Give the antonym of every input", + /*The suffix is some text that goes after the examples in the prompt. Usually, this is where the user input will go*/ + suffix: "Word: {input}\nAntonym:", + /*The input variables are the variables that the overall prompt expects.*/ + inputVariables: ["input"], + /*The example_separator is the string we will use to join the prefix, examples, and suffix together with.*/ + exampleSeparator: "\n\n", + /* The template format is the formatting method to use for the template. Should usually be f-string. */ + templateFormat: "f-string", +}); /*We can now generate a prompt using the `format` method.*/ -const res = fewShotPrompt.format({input: "big"}); +const res = fewShotPrompt.format({ input: "big" }); console.log({ res }); - ``` diff --git a/docs/docs/modules/prompts/prompt_template.md b/docs/docs/modules/prompts/prompt_template.md index ef12b4bfa817..dfa0ef56c744 100644 --- a/docs/docs/modules/prompts/prompt_template.md +++ b/docs/docs/modules/prompts/prompt_template.md @@ -1,19 +1,20 @@ # Prompt Templates -This example walks through how to use PromptTemplates. -At their core, prompt templates are objects that are made up of a template with certain input variables. -This object can then be called with `.format(...)` to format the input variables accordingly. +This example walks through how to use PromptTemplates. At their core, prompt templates are objects that are made up of a template with certain input variables. This object can then be called with `.format(...)` to format the input variables accordingly. ```typescript import { PromptTemplate } from "langchain/prompts"; -const template = "What is a good name for a company that makes {product}?" -const prompt = new PromptTemplate({template: template, inputVariables: ["product"]}); +const template = "What is a good name for a company that makes {product}?"; +const prompt = new PromptTemplate({ + template: template, + inputVariables: ["product"], +}); ``` Let's now see how this works! We can call the `.format` method to format it. ```typescript -const res = prompt.format({product: "colorful socks"}); +const res = prompt.format({ product: "colorful socks" }); console.log({ res }); ``` diff --git a/docs/docs/overview.md b/docs/docs/overview.md index 998d61275b3f..24384a359b83 100644 --- a/docs/docs/overview.md +++ b/docs/docs/overview.md @@ -1,10 +1,6 @@ # Welcome to LangChain -Large language models (LLMs) are emerging as a transformative technology, enabling -developers to build applications that they previously could not. -But using these LLMs in isolation is often not enough to -create a truly powerful app - the real power comes when you are able to -combine them with other sources of computation or knowledge. +Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. But using these LLMs in isolation is often not enough to create a truly powerful app - the real power comes when you are able to combine them with other sources of computation or knowledge. This library is aimed at assisting in the development of those types of applications. Common examples of these types of applications include: @@ -20,10 +16,7 @@ Checkout the below guide for a walkthrough of how to get started using LangChain ## Modules -There are several main modules that LangChain provides support for. -For each module we provide some examples to get started and get familiar with some of the concepts. -These modules are, in increasing order of complexity: - +There are several main modules that LangChain provides support for. For each module we provide some examples to get started and get familiar with some of the concepts. These modules are, in increasing order of complexity: - Prompts: This includes prompt management, prompt optimization, and prompt serialization. @@ -37,15 +30,15 @@ These modules are, in increasing order of complexity: - Memory: Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory. - ## Reference Docs ---------------- -All of LangChain's reference documentation, in one place. Full documentation on all methods and classes. +--- +All of LangChain's reference documentation, in one place. Full documentation on all methods and classes. ## Additional Resources ---------------------- + +--- Additional collection of resources we think may be useful as you develop your application! diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 12f6f717fab8..2f32b7de5731 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -11,30 +11,30 @@ /** @type {import('@docusaurus/types').Config} */ const config = { - title: 'Langchain', - tagline: 'The tagline of my site', - favicon: 'img/favicon.ico', + title: "Langchain", + tagline: "The tagline of my site", + favicon: "img/favicon.ico", // Set the production url of your site here - url: 'https://hwchase17.github.io', + url: "https://hwchase17.github.io", // Set the // pathname under which your site is served // For GitHub pages deployment, it is often '//' - baseUrl: '/langchainjs/', + baseUrl: "/langchainjs/", // GitHub pages deployment config. // If you aren't using GitHub pages, you don't need these. - organizationName: 'hwchase17', // Usually your GitHub org/user name. - projectName: 'langchainjs', // Usually your repo name. - deploymentBranch: 'gh-pages', + organizationName: "hwchase17", // Usually your GitHub org/user name. + projectName: "langchainjs", // Usually your repo name. + deploymentBranch: "gh-pages", - onBrokenLinks: 'throw', - onBrokenMarkdownLinks: 'warn', + onBrokenLinks: "throw", + onBrokenMarkdownLinks: "warn", plugins: [ [ - 'docusaurus-plugin-typedoc', + "docusaurus-plugin-typedoc", { - tsconfig: '../langchain/tsconfig.json', + tsconfig: "../langchain/tsconfig.json", sidebar: { fullNames: true, }, @@ -44,45 +44,51 @@ const config = { presets: [ [ - 'classic', + "classic", /** @type {import('@docusaurus/preset-classic').Options} */ ({ docs: { - sidebarPath: require.resolve('./sidebars.js'), - editUrl: 'https://github.com/hwchase17/langchainjs/', + sidebarPath: require.resolve("./sidebars.js"), + editUrl: "https://github.com/hwchase17/langchainjs/", async sidebarItemsGenerator({ defaultSidebarItemsGenerator, ...args }) { const allInternal = []; - const filterInternal = (items) => items.filter(item => { - const isInternal = item.label?.includes("internal"); - if (isInternal) { - allInternal.push(item); - } - return !isInternal; - }).map((item) => { - if (item.items && Array.isArray(item.items)) { - return { ...item, items: filterInternal(item.items) } - } - return item; - }); + const filterInternal = (items) => + items + .filter((item) => { + const isInternal = item.label?.includes("internal"); + if (isInternal) { + allInternal.push(item); + } + return !isInternal; + }) + .map((item) => { + if (item.items && Array.isArray(item.items)) { + return { ...item, items: filterInternal(item.items) }; + } + return item; + }); const sidebarItems = await defaultSidebarItemsGenerator(args); - const filtered = filterInternal(sidebarItems) + const filtered = filterInternal(sidebarItems); if (allInternal.length > 0) { - return [...filtered, { - type: "category", - label: "Internal", - collapsible: true, - collapsed: true, - items: allInternal - }]; + return [ + ...filtered, + { + type: "category", + label: "Internal", + collapsible: true, + collapsed: true, + items: allInternal, + }, + ]; } return filtered; }, }, theme: { - customCss: require.resolve('./src/css/custom.css'), + customCss: require.resolve("./src/css/custom.css"), }, }), ], @@ -91,24 +97,24 @@ const config = { themeConfig: /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ ({ - image: 'img/docusaurus-social-card.jpg', + image: "img/docusaurus-social-card.jpg", navbar: { - title: 'Langchain', + title: "Langchain", logo: { - alt: 'Langchain logo', - src: 'img/docusaurus.png', + alt: "Langchain logo", + src: "img/docusaurus.png", }, items: [ // Please keep GitHub link to the right for consistency. { - href: 'https://github.com/hwchase17/langchainjs', - label: 'GitHub', - position: 'right', + href: "https://github.com/hwchase17/langchainjs", + label: "GitHub", + position: "right", }, ], }, footer: { - style: 'dark', + style: "dark", // Please do not remove the credits, help to publicize Docusaurus :) copyright: `Copyright © ${new Date().getFullYear()} Langchain, Inc. Built with Docusaurus.`, }, diff --git a/docs/package.json b/docs/package.json index 152e356608ca..6dea74d8a7a1 100644 --- a/docs/package.json +++ b/docs/package.json @@ -14,8 +14,8 @@ "write-heading-ids": "docusaurus write-heading-ids", "ci": "yarn lint && yarn format:diff", "lint": "eslint --cache \"**/*.js\"", - "format": "prettier --config .prettierrc --write \"**/*.{js,jsx,ts,tsx,md,mdx}\"", - "format:diff": "prettier --config .prettierrc --list-different \"**/*.{js,jsx,ts,tsx,md,mdx}\"" + "format": "prettier --write \"**/*.{js,jsx,ts,tsx,md,mdx}\"", + "format:diff": "prettier --list-different \"**/*.{js,jsx,ts,tsx,md,mdx}\"" }, "dependencies": { "@docusaurus/core": "2.3.1", @@ -58,4 +58,4 @@ "engines": { "node": ">=16.14" } -} +} \ No newline at end of file diff --git a/docs/sidebars.js b/docs/sidebars.js index 907fe73f5f87..6e4252c9a960 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -21,49 +21,49 @@ module.exports = { // By default, Docusaurus generates a sidebar from the docs folder structure sidebar: [ - 'overview', - 'getting-started', + "overview", + "getting-started", { - type: 'category', - label: 'Guides', + type: "category", + label: "Guides", collapsed: false, items: [ { - type: "category", + type: "category", label: "Prompts", - items: [{type: 'autogenerated', dirName: 'modules/prompts'}] + items: [{ type: "autogenerated", dirName: "modules/prompts" }], }, { - type: "category", + type: "category", label: "LLMs", - items: [{type: 'autogenerated', dirName: 'modules/llms'}] + items: [{ type: "autogenerated", dirName: "modules/llms" }], }, { - type: "category", + type: "category", label: "Indexes", - items: [{type: 'autogenerated', dirName: 'modules/indexes'}] + items: [{ type: "autogenerated", dirName: "modules/indexes" }], }, { - type: "category", + type: "category", label: "Chains", - items: [{type: 'autogenerated', dirName: 'modules/chains'}] + items: [{ type: "autogenerated", dirName: "modules/chains" }], }, { - type: "category", + type: "category", label: "Agents", - items: [{type: 'autogenerated', dirName: 'modules/agents'}] + items: [{ type: "autogenerated", dirName: "modules/agents" }], }, { - type: "category", + type: "category", label: "Memory", - items: [{type: 'autogenerated', dirName: 'modules/memory'}] + items: [{ type: "autogenerated", dirName: "modules/memory" }], }, ], }, { - type: 'category', - label: 'Reference Docs', - items: [{type: 'autogenerated', dirName: 'api'}], + type: "category", + label: "Reference Docs", + items: [{ type: "autogenerated", dirName: "api" }], }, ], }; diff --git a/examples/package.json b/examples/package.json index a0d4c39b571c..b90f87e40c49 100644 --- a/examples/package.json +++ b/examples/package.json @@ -11,7 +11,10 @@ "build": "tsc --declaration --outDir dist/", "start": "yarn build && node -r dotenv/config dist/index.js", "lint": "eslint src", - "lint:fix": "yarn lint --fix" + "lint:fix": "yarn lint --fix", + "format": "prettier --write \"**/*.ts\"", + "format:diff": "prettier --list-different \"**/*.ts\"", + "ci": "yarn lint && yarn format:diff && yarn build" }, "author": "Langchain", "license": "MIT", @@ -33,4 +36,4 @@ "prettier": "^2.8.3", "typescript": "^4.9.5" } -} +} \ No newline at end of file diff --git a/examples/src/agents/load_from_hub.ts b/examples/src/agents/load_from_hub.ts index 909c0fdf3fad..55e09426e45d 100644 --- a/examples/src/agents/load_from_hub.ts +++ b/examples/src/agents/load_from_hub.ts @@ -3,7 +3,7 @@ import { loadAgent, AgentExecutor } from "langchain/agents"; import { SerpAPI, Calculator } from "langchain/tools"; export const run = async () => { - const model = new OpenAI({temperature: 0}); + const model = new OpenAI({ temperature: 0 }); const tools = [new SerpAPI(), new Calculator()]; const agent = await loadAgent( @@ -18,7 +18,8 @@ export const run = async () => { returnIntermediateSteps: true, }); - const input = "Who is Olivia Wilde's boyfriend?" + + const input = + "Who is Olivia Wilde's boyfriend?" + " What is his current age raised to the 0.23 power?"; console.log(`Executing with input "${input}"...`); diff --git a/examples/src/agents/mrkl.ts b/examples/src/agents/mrkl.ts index d0f3886b0a78..1c5fc5234be3 100644 --- a/examples/src/agents/mrkl.ts +++ b/examples/src/agents/mrkl.ts @@ -3,15 +3,18 @@ import { initializeAgentExecutor } from "langchain/agents"; import { SerpAPI, Calculator } from "langchain/tools"; export const run = async () => { - const model = new OpenAI({temperature: 0}); + const model = new OpenAI({ temperature: 0 }); const tools = [new SerpAPI(), new Calculator()]; const executor = await initializeAgentExecutor( - tools, model, "zero-shot-react-description" + tools, + model, + "zero-shot-react-description" ); console.log("Loaded agent."); - const input = "Who is Olivia Wilde's boyfriend?" + + const input = + "Who is Olivia Wilde's boyfriend?" + " What is his current age raised to the 0.23 power?"; console.log(`Executing with input "${input}"...`); diff --git a/examples/src/chains/chat_vector_db.ts b/examples/src/chains/chat_vector_db.ts index 15bf3428d5da..df8b24e9981d 100644 --- a/examples/src/chains/chat_vector_db.ts +++ b/examples/src/chains/chat_vector_db.ts @@ -3,21 +3,18 @@ import { ChatVectorDBQAChain } from "langchain/chains"; import { HNSWLib } from "langchain/vectorstores"; import { OpenAIEmbeddings } from "langchain/embeddings"; import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; -import * as fs from 'fs'; +import * as fs from "fs"; export const run = async () => { /* Initialize the LLM to use to answer the question */ const model = new OpenAI({}); /* Load in the file we want to do question answering over */ - const text = fs.readFileSync('state_of_the_union.txt','utf8'); + const text = fs.readFileSync("state_of_the_union.txt", "utf8"); /* Split the text into chunks */ - const textSplitter = new RecursiveCharacterTextSplitter({chunkSize: 1000}); + const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 }); const docs = textSplitter.createDocuments([text]); /* Create the vectorstore */ - const vectorStore = await HNSWLib.fromDocuments( - docs, - new OpenAIEmbeddings() - ); + const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()); /* Create the chain */ const chain = ChatVectorDBQAChain.fromLLM(model, vectorStore); /* Ask it a question */ @@ -26,6 +23,9 @@ export const run = async () => { console.log(res); /* Ask it a follow up question */ const chatHistory = question + res.text; - const followUpRes = await chain.call({ question: "Was that nice?", chat_history: chatHistory }); + const followUpRes = await chain.call({ + question: "Was that nice?", + chat_history: chatHistory, + }); console.log(followUpRes); -}; \ No newline at end of file +}; diff --git a/examples/src/chains/llm_chain.ts b/examples/src/chains/llm_chain.ts index 3e2e20fbe984..3f02567e476d 100644 --- a/examples/src/chains/llm_chain.ts +++ b/examples/src/chains/llm_chain.ts @@ -1,12 +1,12 @@ import { OpenAI } from "langchain/llms"; import { PromptTemplate } from "langchain/prompts"; -import {LLMChain} from "langchain/chains"; +import { LLMChain } from "langchain/chains"; export const run = async () => { - const model = new OpenAI({temperature: 0.9}); - const template = "What is a good name for a company that makes {product}?"; - const prompt = new PromptTemplate({template, inputVariables: ["product"]}); - const chain = new LLMChain({llm: model, prompt}); - const res = await chain.call({product: "colorful socks"}); - console.log({ res }); + const model = new OpenAI({ temperature: 0.9 }); + const template = "What is a good name for a company that makes {product}?"; + const prompt = new PromptTemplate({ template, inputVariables: ["product"] }); + const chain = new LLMChain({ llm: model, prompt }); + const res = await chain.call({ product: "colorful socks" }); + console.log({ res }); }; diff --git a/examples/src/chains/load_from_hub.ts b/examples/src/chains/load_from_hub.ts index c43c5e7be38a..97d9b716db58 100644 --- a/examples/src/chains/load_from_hub.ts +++ b/examples/src/chains/load_from_hub.ts @@ -1,7 +1,7 @@ import { loadChain } from "langchain/chains"; export const run = async () => { - const chain = await loadChain("lc://chains/hello-world/chain.json"); - const res = chain.call({topic: "foo"}); - console.log(res); + const chain = await loadChain("lc://chains/hello-world/chain.json"); + const res = chain.call({ topic: "foo" }); + console.log(res); }; diff --git a/examples/src/chains/question_answering.ts b/examples/src/chains/question_answering.ts index dfc5d13bdab9..3de52909de1e 100644 --- a/examples/src/chains/question_answering.ts +++ b/examples/src/chains/question_answering.ts @@ -5,7 +5,13 @@ import { Document } from "langchain/document"; export const run = async () => { const model = new OpenAI({}); const chain = loadQAChain(model); - const docs = [ new Document({pageContent: 'harrison went to harvard' }), new Document({pageContent: 'ankush went to princeton' }), ]; - const res = await chain.call({ input_documents: docs, question: "Where did harrison go to college" }); + const docs = [ + new Document({ pageContent: "harrison went to harvard" }), + new Document({ pageContent: "ankush went to princeton" }), + ]; + const res = await chain.call({ + input_documents: docs, + question: "Where did harrison go to college", + }); console.log({ res }); }; diff --git a/examples/src/chains/vector_db_qa.ts b/examples/src/chains/vector_db_qa.ts index 6c9b4f45cc84..d272d1969569 100644 --- a/examples/src/chains/vector_db_qa.ts +++ b/examples/src/chains/vector_db_qa.ts @@ -3,24 +3,24 @@ import { VectorDBQAChain } from "langchain/chains"; import { HNSWLib } from "langchain/vectorstores"; import { OpenAIEmbeddings } from "langchain/embeddings"; import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; -import * as fs from 'fs'; +import * as fs from "fs"; export const run = async () => { /* Initialize the LLM to use to answer the question */ const model = new OpenAI({}); /* Load in the file we want to do question answering over */ - const text = fs.readFileSync('state_of_the_union.txt','utf8'); + const text = fs.readFileSync("state_of_the_union.txt", "utf8"); /* Split the text into chunks */ - const textSplitter = new RecursiveCharacterTextSplitter({chunkSize: 1000}); + const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 }); const docs = textSplitter.createDocuments([text]); /* Create the vectorstore */ - const vectorStore = await HNSWLib.fromDocuments( - docs, - new OpenAIEmbeddings() - ); + const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()); /* Create the chain */ const chain = VectorDBQAChain.fromLLM(model, vectorStore); /* Ask it a question */ - const res = await chain.call({ input_documents: docs, query: "What did the president say about Justice Breyer?" }); + const res = await chain.call({ + input_documents: docs, + query: "What did the president say about Justice Breyer?", + }); console.log({ res }); -}; \ No newline at end of file +}; diff --git a/examples/src/indexes/embeddings.ts b/examples/src/indexes/embeddings.ts index 118d73d3464f..a7197141613c 100644 --- a/examples/src/indexes/embeddings.ts +++ b/examples/src/indexes/embeddings.ts @@ -1,11 +1,14 @@ import { OpenAIEmbeddings } from "langchain/embeddings"; export const run = async () => { - /* Embed queries */ - const embeddings = new OpenAIEmbeddings(); - const res = await embeddings.embedQuery("Hello world"); - console.log(res); - /* Embed documents */ - const documentRes = await embeddings.embedDocuments(["Hello world", "Bye bye"]); - console.log({ documentRes }); + /* Embed queries */ + const embeddings = new OpenAIEmbeddings(); + const res = await embeddings.embedQuery("Hello world"); + console.log(res); + /* Embed documents */ + const documentRes = await embeddings.embedDocuments([ + "Hello world", + "Bye bye", + ]); + console.log({ documentRes }); }; diff --git a/examples/src/indexes/recursive_text_splitter.ts b/examples/src/indexes/recursive_text_splitter.ts index 241d14d5ad5c..8ce2b1dcc97c 100644 --- a/examples/src/indexes/recursive_text_splitter.ts +++ b/examples/src/indexes/recursive_text_splitter.ts @@ -1,10 +1,13 @@ -import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter'; +import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; export const run = async () => { - const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f. + const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f. This is a weird text to write, but gotta test the splittingggg some how.\n\n Bye!\n\n-H.`; - const splitter = new RecursiveCharacterTextSplitter({ chunkSize: 10, chunkOverlap: 1 }); - const output = splitter.createDocuments([text]); - console.log(output); + const splitter = new RecursiveCharacterTextSplitter({ + chunkSize: 10, + chunkOverlap: 1, + }); + const output = splitter.createDocuments([text]); + console.log(output); }; diff --git a/examples/src/indexes/text_splitter.ts b/examples/src/indexes/text_splitter.ts index 198fddde3099..dcb6469c7048 100644 --- a/examples/src/indexes/text_splitter.ts +++ b/examples/src/indexes/text_splitter.ts @@ -1,13 +1,19 @@ -import { Document } from 'langchain/document'; -import { CharacterTextSplitter } from 'langchain/text_splitter'; +import { Document } from "langchain/document"; +import { CharacterTextSplitter } from "langchain/text_splitter"; export const run = async () => { /* Split text */ - const text = 'foo bar baz 123'; - const splitter = new CharacterTextSplitter({separator: ' ', chunkSize: 7, chunkOverlap: 3 }); + const text = "foo bar baz 123"; + const splitter = new CharacterTextSplitter({ + separator: " ", + chunkSize: 7, + chunkOverlap: 3, + }); const output = splitter.createDocuments([text]); console.log({ output }); /* Split documents */ - const docOutput = splitter.splitDocuments([new Document({pageContent: text})]); + const docOutput = splitter.splitDocuments([ + new Document({ pageContent: text }), + ]); console.log({ docOutput }); }; diff --git a/examples/src/indexes/vectorstores.ts b/examples/src/indexes/vectorstores.ts index d0500579bd80..14c0704befae 100644 --- a/examples/src/indexes/vectorstores.ts +++ b/examples/src/indexes/vectorstores.ts @@ -2,12 +2,12 @@ import { HNSWLib } from "langchain/vectorstores"; import { OpenAIEmbeddings } from "langchain/embeddings"; export const run = async () => { - const vectorStore = await HNSWLib.fromTexts( + const vectorStore = await HNSWLib.fromTexts( ["Hello world", "Bye bye", "hello nice world"], [{ id: 2 }, { id: 1 }, { id: 3 }], new OpenAIEmbeddings() - ); + ); - const resultOne = await vectorStore.similaritySearch("hello world", 1); - console.log(resultOne); -}; \ No newline at end of file + const resultOne = await vectorStore.similaritySearch("hello world", 1); + console.log(resultOne); +}; diff --git a/examples/src/llm.ts b/examples/src/llm.ts index 65a57677cb25..2616da6d6c07 100644 --- a/examples/src/llm.ts +++ b/examples/src/llm.ts @@ -1,7 +1,9 @@ import { OpenAI } from "langchain/llms"; export const run = async () => { - const model = new OpenAI({temperature: 0.9}); - const res = await model.call("What would be a good company name a company that makes colorful socks?"); - console.log({ res }); + const model = new OpenAI({ temperature: 0.9 }); + const res = await model.call( + "What would be a good company name a company that makes colorful socks?" + ); + console.log({ res }); }; diff --git a/examples/src/memory.ts b/examples/src/memory.ts index 013c3d3cc98f..a19e2b711f14 100644 --- a/examples/src/memory.ts +++ b/examples/src/memory.ts @@ -3,11 +3,11 @@ import { BufferMemory } from "langchain/memory"; import { ConversationChain } from "langchain/chains"; export const run = async () => { - const model = new OpenAI({}); - const memory = new BufferMemory(); - const chain = new ConversationChain({ llm: model, memory}); - const res1 = await chain.call({ input: "Hi! I'm Jim." }); - console.log({ res1 }); - const res2 = await chain.call({ input: "What's my name?" }); - console.log({ res2 }); -}; \ No newline at end of file + const model = new OpenAI({}); + const memory = new BufferMemory(); + const chain = new ConversationChain({ llm: model, memory }); + const res1 = await chain.call({ input: "Hi! I'm Jim." }); + console.log({ res1 }); + const res2 = await chain.call({ input: "What's my name?" }); + console.log({ res2 }); +}; diff --git a/examples/src/prompts/few_shot.ts b/examples/src/prompts/few_shot.ts index fe7064e7480f..ebb02e36107d 100644 --- a/examples/src/prompts/few_shot.ts +++ b/examples/src/prompts/few_shot.ts @@ -1,36 +1,36 @@ import { FewShotPromptTemplate, PromptTemplate } from "langchain/prompts"; export const run = async () => { - /* First, create the list of few shot examples. */ - const examples = [ - {"word": "happy", "antonym": "sad"}, - {"word": "tall", "antonym": "short"}, - ]; - /** Next, we specify the template to format the examples we have provided. - We use the `PromptTemplate` class for this. */ - const exampleFormatterTemplate = "Word: {word}\nAntonym: {antonym}\n"; - const examplePrompt = new PromptTemplate({ - inputVariables: ["word", "antonym"], - template: exampleFormatterTemplate, - }); - /* # Finally, we create the `FewShotPromptTemplate` object. */ - const fewShotPrompt = new FewShotPromptTemplate({ - /* These are the examples we want to insert into the prompt. */ - examples, - /* This is how we want to format the examples when we insert them into the prompt. */ - examplePrompt, - /* The prefix is some text that goes before the examples in the prompt. Usually, this consists of intructions. */ - prefix:"Give the antonym of every input", - /* The suffix is some text that goes after the examples in the prompt. Usually, this is where the user input will go */ - suffix:"Word: {input}\nAntonym:", - /* The input variables are the variables that the overall prompt expects. */ - inputVariables: ["input"], - /* The example_separator is the string we will use to join the prefix, examples, and suffix together with. */ - exampleSeparator:"\n\n", - /* The template format is the formatting method to use for the template. Should usually be f-string. */ - templateFormat: "f-string", - }); - /* We can now generate a prompt using the `format` method. */ - const res = fewShotPrompt.format({input: "big"}); - console.log({ res }); + /* First, create the list of few shot examples. */ + const examples = [ + { word: "happy", antonym: "sad" }, + { word: "tall", antonym: "short" }, + ]; + /** Next, we specify the template to format the examples we have provided. + We use the `PromptTemplate` class for this. */ + const exampleFormatterTemplate = "Word: {word}\nAntonym: {antonym}\n"; + const examplePrompt = new PromptTemplate({ + inputVariables: ["word", "antonym"], + template: exampleFormatterTemplate, + }); + /* # Finally, we create the `FewShotPromptTemplate` object. */ + const fewShotPrompt = new FewShotPromptTemplate({ + /* These are the examples we want to insert into the prompt. */ + examples, + /* This is how we want to format the examples when we insert them into the prompt. */ + examplePrompt, + /* The prefix is some text that goes before the examples in the prompt. Usually, this consists of intructions. */ + prefix: "Give the antonym of every input", + /* The suffix is some text that goes after the examples in the prompt. Usually, this is where the user input will go */ + suffix: "Word: {input}\nAntonym:", + /* The input variables are the variables that the overall prompt expects. */ + inputVariables: ["input"], + /* The example_separator is the string we will use to join the prefix, examples, and suffix together with. */ + exampleSeparator: "\n\n", + /* The template format is the formatting method to use for the template. Should usually be f-string. */ + templateFormat: "f-string", + }); + /* We can now generate a prompt using the `format` method. */ + const res = fewShotPrompt.format({ input: "big" }); + console.log({ res }); }; diff --git a/examples/src/prompts/load_from_hub.ts b/examples/src/prompts/load_from_hub.ts index 134483eff782..c40a8f649947 100644 --- a/examples/src/prompts/load_from_hub.ts +++ b/examples/src/prompts/load_from_hub.ts @@ -1,7 +1,7 @@ import { loadPrompt } from "langchain/prompts"; export const run = async () => { - const prompt = await loadPrompt("lc://prompts/hello-world/prompt.yaml"); - const res = prompt.format({}); - console.log({ res }); + const prompt = await loadPrompt("lc://prompts/hello-world/prompt.yaml"); + const res = prompt.format({}); + console.log({ res }); }; diff --git a/examples/src/prompts/prompts.ts b/examples/src/prompts/prompts.ts index ebb20ebd0443..dadddaa2e805 100644 --- a/examples/src/prompts/prompts.ts +++ b/examples/src/prompts/prompts.ts @@ -1,8 +1,8 @@ import { PromptTemplate } from "langchain/prompts"; export const run = async () => { - const template = "What is a good name for a company that makes {product}?"; - const prompt = new PromptTemplate({template, inputVariables: ["product"]}); - const res = prompt.format({product: "colorful socks"}); - console.log({ res }); + const template = "What is a good name for a company that makes {product}?"; + const prompt = new PromptTemplate({ template, inputVariables: ["product"] }); + const res = prompt.format({ product: "colorful socks" }); + console.log({ res }); }; diff --git a/langchain/.eslintrc.js b/langchain/.eslintrc.js index aa21ad142ea6..4db809a273dc 100644 --- a/langchain/.eslintrc.js +++ b/langchain/.eslintrc.js @@ -11,7 +11,7 @@ module.exports = { sourceType: "module", }, plugins: ["@typescript-eslint"], - ignorePatterns: ["dist", "node_modules"], + ignorePatterns: ["dist", "docs", "node_modules"], rules: { "@typescript-eslint/explicit-module-boundary-types": 0, "@typescript-eslint/no-empty-function": 0, diff --git a/langchain/.prettierignore b/langchain/.prettierignore new file mode 100644 index 000000000000..6d56539b5b6c --- /dev/null +++ b/langchain/.prettierignore @@ -0,0 +1,2 @@ +dist/ +docs/ \ No newline at end of file diff --git a/langchain/agents/tests/agent.test.ts b/langchain/agents/tests/agent.int.test.ts similarity index 100% rename from langchain/agents/tests/agent.test.ts rename to langchain/agents/tests/agent.int.test.ts diff --git a/langchain/chains/question_answering/tests/load.test.ts b/langchain/chains/question_answering/tests/load.int.test.ts similarity index 100% rename from langchain/chains/question_answering/tests/load.test.ts rename to langchain/chains/question_answering/tests/load.int.test.ts diff --git a/langchain/chains/tests/chat_vector_db_qa_chain.test.ts b/langchain/chains/tests/chat_vector_db_qa_chain.int.test.ts similarity index 100% rename from langchain/chains/tests/chat_vector_db_qa_chain.test.ts rename to langchain/chains/tests/chat_vector_db_qa_chain.int.test.ts diff --git a/langchain/chains/tests/combine_docs_chain.test.ts b/langchain/chains/tests/combine_docs_chain.int.test.ts similarity index 100% rename from langchain/chains/tests/combine_docs_chain.test.ts rename to langchain/chains/tests/combine_docs_chain.int.test.ts diff --git a/langchain/chains/tests/llm_chain.test.ts b/langchain/chains/tests/llm_chain.int.test.ts similarity index 100% rename from langchain/chains/tests/llm_chain.test.ts rename to langchain/chains/tests/llm_chain.int.test.ts diff --git a/langchain/chains/tests/vector_db_qa_chain.test.ts b/langchain/chains/tests/vector_db_qa_chain.int.test.ts similarity index 100% rename from langchain/chains/tests/vector_db_qa_chain.test.ts rename to langchain/chains/tests/vector_db_qa_chain.int.test.ts diff --git a/langchain/create-entrypoints.js b/langchain/create-entrypoints.js index ad0b3e242018..db0ca4f29e7f 100644 --- a/langchain/create-entrypoints.js +++ b/langchain/create-entrypoints.js @@ -1,3 +1,4 @@ +/* eslint-disable @typescript-eslint/no-var-requires */ const path = require("path"); const fs = require("fs"); @@ -26,7 +27,9 @@ const updateConfig = () => { ...json, typedocOptions: { ...json.typedocOptions, - entryPoints: [...Object.values(entrypoints), "index.ts"].map(x => `./${x}`), + entryPoints: [...Object.values(entrypoints), "index.ts"].map( + (x) => `./${x}` + ), }, })); diff --git a/langchain/embeddings/fake.ts b/langchain/embeddings/fake.ts new file mode 100644 index 000000000000..e6ffe72ef008 --- /dev/null +++ b/langchain/embeddings/fake.ts @@ -0,0 +1,11 @@ +import { Embeddings } from "./base"; + +export class FakeEmbeddings extends Embeddings { + embedDocuments(documents: string[]): Promise { + return Promise.resolve(documents.map(() => [0.1, 0.2, 0.3, 0.4])); + } + + embedQuery(_: string): Promise { + return Promise.resolve([0.1, 0.2, 0.3, 0.4]); + } +} diff --git a/langchain/embeddings/tests/openai.test.ts b/langchain/embeddings/tests/openai.int.test.ts similarity index 100% rename from langchain/embeddings/tests/openai.test.ts rename to langchain/embeddings/tests/openai.int.test.ts diff --git a/langchain/jest.config.js b/langchain/jest.config.js index a5f495183ccd..77773329f3cd 100644 --- a/langchain/jest.config.js +++ b/langchain/jest.config.js @@ -2,7 +2,7 @@ module.exports = { preset: "ts-jest/presets/js-with-ts", testEnvironment: "node", - modulePathIgnorePatterns: ["dist/"], + modulePathIgnorePatterns: ["dist/", "docs/"], transform: { "^.+\\.(ts|tsx)$": "ts-jest", "^.+\\.(js)$": "babel-jest", diff --git a/langchain/llms/tests/cohere.test.ts b/langchain/llms/tests/cohere.int.test.ts similarity index 100% rename from langchain/llms/tests/cohere.test.ts rename to langchain/llms/tests/cohere.int.test.ts diff --git a/langchain/llms/tests/openai.test.ts b/langchain/llms/tests/openai.int.test.ts similarity index 100% rename from langchain/llms/tests/openai.test.ts rename to langchain/llms/tests/openai.int.test.ts diff --git a/langchain/package.json b/langchain/package.json index f266774ca772..eb0e80fe150e 100644 --- a/langchain/package.json +++ b/langchain/package.json @@ -39,8 +39,12 @@ "precommit": "tsc --noEmit && lint-staged", "clean": "rm -rf dist/ && node create-entrypoints.js clean", "prepack": "yarn build", - "test": "node create-entrypoints.js clean && jest", - "prepare": "husky install" + "test": "node create-entrypoints.js clean && jest --testPathIgnorePatterns=\\.int\\.test.ts", + "test:integration": "node create-entrypoints.js clean && jest --testPathPattern=\\.int\\.test.ts", + "prepare": "husky install", + "format": "prettier --write \"**/*.ts\"", + "format:diff": "prettier --list-different \"**/*.ts\"", + "ci": "yarn lint && yarn format:diff && yarn build" }, "author": "Langchain", "license": "MIT", diff --git a/langchain/tsconfig.json b/langchain/tsconfig.json index 0e844b453991..f3a09067b4d9 100644 --- a/langchain/tsconfig.json +++ b/langchain/tsconfig.json @@ -23,6 +23,7 @@ "exclude": [ "node_modules/", "dist/", + "docs/", "tests/" ], "include": [ diff --git a/langchain/util/hub.ts b/langchain/util/hub.ts index 5c65f673a3d4..6d8538756172 100644 --- a/langchain/util/hub.ts +++ b/langchain/util/hub.ts @@ -11,6 +11,8 @@ const URL_BASE = // eslint-disable-next-line @typescript-eslint/no-explicit-any export type LoadValues = Record; +const URL_PATH_SEPARATOR = "/"; + export const loadFromHub = async ( uri: string, loader: (a: string, values: LoadValues) => T, @@ -24,7 +26,7 @@ export const loadFromHub = async ( } const [rawRef, remotePath] = match.slice(1); const ref = rawRef ? rawRef.slice(1) : DEFAULT_REF; - const parts = remotePath.split(path.sep); + const parts = remotePath.split(URL_PATH_SEPARATOR); if (parts[0] !== validPrefix) { return undefined; } @@ -41,7 +43,10 @@ export const loadFromHub = async ( const text = await res.text(); const tmpdir = fs.mkdtempSync(path.join(os.tmpdir(), "langchain")); - const file = path.join(tmpdir, path.basename(remotePath)); + const file = path.join( + tmpdir, + path.basename(remotePath.replace(URL_PATH_SEPARATOR, path.sep)) + ); fs.writeFileSync(file, text); return loader(file, values); }; diff --git a/langchain/vectorstores/tests/hnswlib.int.test.ts b/langchain/vectorstores/tests/hnswlib.int.test.ts new file mode 100644 index 000000000000..3fe9f37eae9f --- /dev/null +++ b/langchain/vectorstores/tests/hnswlib.int.test.ts @@ -0,0 +1,65 @@ +import { test, expect } from "@jest/globals"; +import fs from "fs/promises"; +import path from "path"; +import os from "os"; + +import { HNSWLib } from "../hnswlib"; +import { OpenAIEmbeddings } from "../../embeddings"; + +test("Test HNSWLib.fromTexts", async () => { + const vectorStore = await HNSWLib.fromTexts( + ["Hello world", "Bye bye", "hello nice world"], + [{ id: 2 }, { id: 1 }, { id: 3 }], + new OpenAIEmbeddings() + ); + expect(vectorStore.index?.getCurrentCount()).toBe(3); + + const resultOne = await vectorStore.similaritySearch("hello world", 1); + const resultOneMetadatas = resultOne.map(({ metadata }) => metadata); + expect(resultOneMetadatas).toEqual([{ id: 2 }]); + + const resultTwo = await vectorStore.similaritySearch("hello world", 3); + const resultTwoMetadatas = resultTwo.map(({ metadata }) => metadata); + expect(resultTwoMetadatas).toEqual([{ id: 2 }, { id: 3 }, { id: 1 }]); +}); + +test("Test HNSWLib.load and HNSWLib.save", async () => { + const vectorStore = await HNSWLib.fromTexts( + ["Hello world", "Bye bye", "hello nice world"], + [{ id: 2 }, { id: 1 }, { id: 3 }], + new OpenAIEmbeddings() + ); + expect(vectorStore.index?.getCurrentCount()).toBe(3); + + const resultOne = await vectorStore.similaritySearch("hello world", 1); + const resultOneMetadatas = resultOne.map(({ metadata }) => metadata); + expect(resultOneMetadatas).toEqual([{ id: 2 }]); + + const resultTwo = await vectorStore.similaritySearch("hello world", 3); + const resultTwoMetadatas = resultTwo.map(({ metadata }) => metadata); + expect(resultTwoMetadatas).toEqual([{ id: 2 }, { id: 3 }, { id: 1 }]); + + const tempDirectory = await fs.mkdtemp(path.join(os.tmpdir(), "lcjs-")); + + console.log(tempDirectory); + + await vectorStore.save(tempDirectory); + + const loadedVectorStore = await HNSWLib.load( + tempDirectory, + new OpenAIEmbeddings() + ); + + const resultThree = await loadedVectorStore.similaritySearch( + "hello world", + 1 + ); + + const resultThreeMetadatas = resultThree.map(({ metadata }) => metadata); + expect(resultThreeMetadatas).toEqual([{ id: 2 }]); + + const resultFour = await loadedVectorStore.similaritySearch("hello world", 3); + + const resultFourMetadatas = resultFour.map(({ metadata }) => metadata); + expect(resultFourMetadatas).toEqual([{ id: 2 }, { id: 3 }, { id: 1 }]); +}); diff --git a/langchain/vectorstores/tests/hnswlib.test.ts b/langchain/vectorstores/tests/hnswlib.test.ts index 3fe9f37eae9f..6820d02024e9 100644 --- a/langchain/vectorstores/tests/hnswlib.test.ts +++ b/langchain/vectorstores/tests/hnswlib.test.ts @@ -4,40 +4,40 @@ import path from "path"; import os from "os"; import { HNSWLib } from "../hnswlib"; -import { OpenAIEmbeddings } from "../../embeddings"; +import { FakeEmbeddings } from "../../embeddings/fake"; test("Test HNSWLib.fromTexts", async () => { const vectorStore = await HNSWLib.fromTexts( ["Hello world", "Bye bye", "hello nice world"], [{ id: 2 }, { id: 1 }, { id: 3 }], - new OpenAIEmbeddings() + new FakeEmbeddings() ); expect(vectorStore.index?.getCurrentCount()).toBe(3); const resultOne = await vectorStore.similaritySearch("hello world", 1); const resultOneMetadatas = resultOne.map(({ metadata }) => metadata); - expect(resultOneMetadatas).toEqual([{ id: 2 }]); + expect(resultOneMetadatas).toEqual([{ id: 3 }]); const resultTwo = await vectorStore.similaritySearch("hello world", 3); const resultTwoMetadatas = resultTwo.map(({ metadata }) => metadata); - expect(resultTwoMetadatas).toEqual([{ id: 2 }, { id: 3 }, { id: 1 }]); + expect(resultTwoMetadatas).toEqual([{ id: 2 }, { id: 1 }, { id: 3 }]); }); test("Test HNSWLib.load and HNSWLib.save", async () => { const vectorStore = await HNSWLib.fromTexts( ["Hello world", "Bye bye", "hello nice world"], [{ id: 2 }, { id: 1 }, { id: 3 }], - new OpenAIEmbeddings() + new FakeEmbeddings() ); expect(vectorStore.index?.getCurrentCount()).toBe(3); const resultOne = await vectorStore.similaritySearch("hello world", 1); const resultOneMetadatas = resultOne.map(({ metadata }) => metadata); - expect(resultOneMetadatas).toEqual([{ id: 2 }]); + expect(resultOneMetadatas).toEqual([{ id: 3 }]); const resultTwo = await vectorStore.similaritySearch("hello world", 3); const resultTwoMetadatas = resultTwo.map(({ metadata }) => metadata); - expect(resultTwoMetadatas).toEqual([{ id: 2 }, { id: 3 }, { id: 1 }]); + expect(resultTwoMetadatas).toEqual([{ id: 2 }, { id: 1 }, { id: 3 }]); const tempDirectory = await fs.mkdtemp(path.join(os.tmpdir(), "lcjs-")); @@ -47,7 +47,7 @@ test("Test HNSWLib.load and HNSWLib.save", async () => { const loadedVectorStore = await HNSWLib.load( tempDirectory, - new OpenAIEmbeddings() + new FakeEmbeddings() ); const resultThree = await loadedVectorStore.similaritySearch( @@ -56,10 +56,10 @@ test("Test HNSWLib.load and HNSWLib.save", async () => { ); const resultThreeMetadatas = resultThree.map(({ metadata }) => metadata); - expect(resultThreeMetadatas).toEqual([{ id: 2 }]); + expect(resultThreeMetadatas).toEqual([{ id: 3 }]); const resultFour = await loadedVectorStore.similaritySearch("hello world", 3); const resultFourMetadatas = resultFour.map(({ metadata }) => metadata); - expect(resultFourMetadatas).toEqual([{ id: 2 }, { id: 3 }, { id: 1 }]); + expect(resultFourMetadatas).toEqual([{ id: 2 }, { id: 1 }, { id: 3 }]); }); diff --git a/package.json b/package.json index 42d3c14b6273..32b33fc698d8 100644 --- a/package.json +++ b/package.json @@ -5,12 +5,13 @@ "node": ">=18" }, "workspaces": [ + "langchain", "examples", - "docs", - "langchain" + "docs" ], "packageManager": "yarn@3.4.1", "scripts": { + "ci": "turbo run ci", "build": "turbo run build", "lint": "turbo run lint", "lint:fix": "yarn lint -- --fix", @@ -32,4 +33,4 @@ "dependencies": { "turbo": "^1.7.4" } -} +} \ No newline at end of file diff --git a/turbo.json b/turbo.json index 42bf3f403e41..533ed84dd23a 100644 --- a/turbo.json +++ b/turbo.json @@ -1,20 +1,30 @@ { "$schema": "https://turbo.build/schema.json", - "globalDependencies": ["**/.env"], + "globalDependencies": [ + "**/.env" + ], "pipeline": { "build": { - "dependsOn": ["^build"], - "outputs": ["dist/**"] + "dependsOn": [ + "^build" + ], + "outputs": [ + "dist/**" + ] }, "lint": { "outputs": [] }, - "test": { - }, - "precommit": { + "test": {}, + "precommit": {}, + "ci": { + "cache": false, + "dependsOn": [ + "^ci" + ] }, "start": { "cache": false } - } -} + } +} \ No newline at end of file