├── degreegurucrawler ├── degreegurucrawler │ ├── __init__.py │ ├── spiders │ │ ├── __init__.py │ │ └── configurable.py │ ├── items.py │ ├── pipelines.py │ ├── utils │ │ ├── config.py │ │ ├── crawler.yaml │ │ └── upstash_vector_store.py │ ├── settings.py │ └── middlewares.py ├── .gitignore ├── requirements.txt ├── Dockerfile ├── docker-compose.yml └── scrapy.cfg ├── .eslintrc.json ├── .prettierignore ├── figs ├── overview.gif ├── overview.png ├── vector-db.png ├── infrastructure.png ├── redis-create.png ├── vector-db-create.png ├── vector-db-read-only.png └── how-this-project-works.png ├── public ├── icon-user.png ├── vercel.svg └── next.svg ├── src ├── app │ ├── favicon.ico │ ├── layout.tsx │ ├── vectorstore │ │ ├── UpstashVectorStore.d.ts │ │ └── UpstashVectorStore.js │ ├── globals.css │ ├── page.tsx │ └── api │ │ └── guru │ │ └── route.tsx ├── utils │ ├── cx.ts │ └── const.ts └── components │ ├── powered-by.tsx │ ├── form.tsx │ ├── message.tsx │ ├── upstash-logo.tsx │ └── message-loading.tsx ├── next.config.js ├── postcss.config.js ├── tailwind.config.ts ├── .env.local.example ├── .gitignore ├── tsconfig.json ├── .prettierrc ├── package.json └── README.md /degreegurucrawler/degreegurucrawler/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "next/core-web-vitals" 3 | } 4 | -------------------------------------------------------------------------------- /degreegurucrawler/.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | __pycache__ 3 | degreegurudb -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | # Ignore artifacts: 2 | degreegurucrawler 3 | figs 4 | -------------------------------------------------------------------------------- /figs/overview.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upstash/degree-guru/master/figs/overview.gif -------------------------------------------------------------------------------- /figs/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upstash/degree-guru/master/figs/overview.png -------------------------------------------------------------------------------- /figs/vector-db.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upstash/degree-guru/master/figs/vector-db.png -------------------------------------------------------------------------------- /public/icon-user.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upstash/degree-guru/master/public/icon-user.png -------------------------------------------------------------------------------- /src/app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upstash/degree-guru/master/src/app/favicon.ico -------------------------------------------------------------------------------- /figs/infrastructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upstash/degree-guru/master/figs/infrastructure.png -------------------------------------------------------------------------------- /figs/redis-create.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upstash/degree-guru/master/figs/redis-create.png -------------------------------------------------------------------------------- /degreegurucrawler/requirements.txt: -------------------------------------------------------------------------------- 1 | upstash_vector 2 | scrapy==2.11.0 3 | langchain==0.1.0 4 | openai==1.7.2 -------------------------------------------------------------------------------- /figs/vector-db-create.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upstash/degree-guru/master/figs/vector-db-create.png -------------------------------------------------------------------------------- /figs/vector-db-read-only.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upstash/degree-guru/master/figs/vector-db-read-only.png -------------------------------------------------------------------------------- /figs/how-this-project-works.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upstash/degree-guru/master/figs/how-this-project-works.png -------------------------------------------------------------------------------- /next.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = {}; 3 | 4 | module.exports = nextConfig; 5 | -------------------------------------------------------------------------------- /postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | }; 7 | -------------------------------------------------------------------------------- /src/utils/cx.ts: -------------------------------------------------------------------------------- 1 | import { ClassValue, clsx } from "clsx"; 2 | import { twMerge } from "tailwind-merge"; 3 | 4 | export default function cx(...inputs: ClassValue[]) { 5 | return twMerge(clsx(inputs)); 6 | } 7 | -------------------------------------------------------------------------------- /degreegurucrawler/degreegurucrawler/spiders/__init__.py: -------------------------------------------------------------------------------- 1 | # This package will contain the spiders of your Scrapy project 2 | # 3 | # Please refer to the documentation for information on how to create and manage 4 | # your spiders. 5 | -------------------------------------------------------------------------------- /degreegurucrawler/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | FROM python:3.8-slim 3 | 4 | # copy directory into dockerfile 5 | COPY . crawler 6 | WORKDIR crawler 7 | 8 | # install requirements 9 | RUN pip install -r requirements.txt 10 | 11 | CMD ["scrapy", "crawl", "configurable"] 12 | -------------------------------------------------------------------------------- /tailwind.config.ts: -------------------------------------------------------------------------------- 1 | import type { Config } from "tailwindcss"; 2 | 3 | const config: Config = { 4 | content: [ 5 | "./src/pages/**/*.{js,ts,jsx,tsx,mdx}", 6 | "./src/components/**/*.{js,ts,jsx,tsx,mdx}", 7 | "./src/app/**/*.{js,ts,jsx,tsx,mdx}", 8 | ], 9 | plugins: [], 10 | }; 11 | export default config; 12 | -------------------------------------------------------------------------------- /degreegurucrawler/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | my_service: 5 | image: degreegurucrawler 6 | build: 7 | context: . 8 | dockerfile: Dockerfile 9 | environment: 10 | - UPSTASH_VECTOR_REST_URL=**** 11 | - UPSTASH_VECTOR_REST_TOKEN=**** 12 | - OPENAI_API_KEY=**** 13 | -------------------------------------------------------------------------------- /degreegurucrawler/scrapy.cfg: -------------------------------------------------------------------------------- 1 | # Automatically created by: scrapy startproject 2 | # 3 | # For more information about the [deploy] section see: 4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html 5 | 6 | [settings] 7 | default = degreegurucrawler.settings 8 | 9 | [deploy] 10 | #url = http://localhost:6800/ 11 | project = degreegurucrawler 12 | -------------------------------------------------------------------------------- /degreegurucrawler/degreegurucrawler/items.py: -------------------------------------------------------------------------------- 1 | # Define here the models for your scraped items 2 | # 3 | # See documentation in: 4 | # https://docs.scrapy.org/en/latest/topics/items.html 5 | 6 | import scrapy 7 | 8 | 9 | class DegreegurucrawlerItem(scrapy.Item): 10 | # define the fields for your item here like: 11 | # name = scrapy.Field() 12 | pass 13 | -------------------------------------------------------------------------------- /.env.local.example: -------------------------------------------------------------------------------- 1 | # Redis tokens retrieved here: https://console.upstash.com/ 2 | UPSTASH_REDIS_REST_URL= 3 | UPSTASH_REDIS_REST_TOKEN= 4 | 5 | # Vector database tokens retrieved here: https://console.upstash.com/vector 6 | UPSTASH_VECTOR_REST_URL= 7 | UPSTASH_VECTOR_REST_TOKEN= 8 | 9 | # OpenAI key retrieved here: https://platform.openai.com/api-keys 10 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /src/utils/const.ts: -------------------------------------------------------------------------------- 1 | export const INITIAL_QUESTIONS = [ 2 | { 3 | content: "Are there resources for students interested in creative writing?", 4 | }, 5 | { 6 | content: "Are there courses on environmental sustainability?", 7 | }, 8 | { 9 | content: 10 | "Are there any workshops or seminars on entrepreneurship for students?", 11 | }, 12 | { 13 | content: "What kinds of courses will I take as a philosophy major?", 14 | }, 15 | ]; 16 | -------------------------------------------------------------------------------- /degreegurucrawler/degreegurucrawler/pipelines.py: -------------------------------------------------------------------------------- 1 | # Define your item pipelines here 2 | # 3 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting 4 | # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html 5 | 6 | 7 | # useful for handling different item types with a single interface 8 | from itemadapter import ItemAdapter 9 | 10 | 11 | class DegreegurucrawlerPipeline: 12 | def process_item(self, item, spider): 13 | return item 14 | -------------------------------------------------------------------------------- /degreegurucrawler/degreegurucrawler/utils/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | 4 | config_path = "degreegurucrawler/utils/crawler.yaml" 5 | with open(config_path, 'r') as file: 6 | config = yaml.load(file, Loader=yaml.FullLoader) 7 | 8 | embedding_function_config = { 9 | "api_key": os.environ.get('OPENAI_API_KEY'), 10 | "model_name": config["index"]["openAI_embedding_model"] 11 | } 12 | 13 | crawler_config = config["crawler"] 14 | text_splitter_config = config["index"]["text_splitter"] 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | .yarn/install-state.gz 8 | 9 | # testing 10 | /coverage 11 | 12 | # next.js 13 | /.next/ 14 | /out/ 15 | 16 | # production 17 | /build 18 | 19 | # misc 20 | .DS_Store 21 | *.pem 22 | .idea 23 | 24 | # debug 25 | npm-debug.log* 26 | yarn-debug.log* 27 | yarn-error.log* 28 | 29 | # local env files 30 | .env*.local 31 | 32 | # vercel 33 | .vercel 34 | 35 | # typescript 36 | *.tsbuildinfo 37 | next-env.d.ts 38 | -------------------------------------------------------------------------------- /public/vercel.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/app/layout.tsx: -------------------------------------------------------------------------------- 1 | import type { Metadata } from "next"; 2 | import { Inter } from "next/font/google"; 3 | import "./globals.css"; 4 | import cx from "@/utils/cx"; 5 | 6 | const inter = Inter({ subsets: ["latin"] }); 7 | 8 | export const metadata: Metadata = { 9 | title: "DegreeGuru", 10 | description: "DegreeGuru ChatBot", 11 | }; 12 | 13 | export default function RootLayout({ 14 | children, 15 | }: { 16 | children: React.ReactNode; 17 | }) { 18 | return ( 19 | 20 | 21 | {children} 22 | 23 | 24 | ); 25 | } 26 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "skipLibCheck": true, 7 | "strict": true, 8 | "noEmit": true, 9 | "esModuleInterop": true, 10 | "module": "esnext", 11 | "moduleResolution": "bundler", 12 | "resolveJsonModule": true, 13 | "isolatedModules": true, 14 | "jsx": "preserve", 15 | "incremental": true, 16 | "plugins": [ 17 | { 18 | "name": "next" 19 | } 20 | ], 21 | "paths": { 22 | "@/*": ["./src/*"] 23 | } 24 | }, 25 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], 26 | "exclude": ["node_modules"] 27 | } 28 | -------------------------------------------------------------------------------- /src/components/powered-by.tsx: -------------------------------------------------------------------------------- 1 | const PoweredBy = () => { 2 | return ( 3 |

4 | This project is a prototype for a RAG chatbot.
Built using{" "} 5 | 6 | LangChain 7 | 8 | ,{" "} 9 | 10 | Upstash Vector 11 | {" "} 12 | and{" "} 13 | 14 | Vercel AI SDK 15 | {" "} 16 | ・{" "} 17 | 18 | Source Code 19 | 20 |

21 | ); 22 | }; 23 | 24 | export default PoweredBy; 25 | -------------------------------------------------------------------------------- /src/app/vectorstore/UpstashVectorStore.d.ts: -------------------------------------------------------------------------------- 1 | import { Index } from "@upstash/vector"; 2 | import { Document } from "@langchain/core/documents"; 3 | import { 4 | MaxMarginalRelevanceSearchOptions, 5 | VectorStore, 6 | } from "@langchain/core/vectorstores"; 7 | 8 | 9 | type UpstashMetadata = Record; 10 | 11 | 12 | export class UpstashVectorStore extends VectorStore { 13 | declare FilterType: PineconeMetadata; 14 | 15 | constructor(embeddings: any); 16 | index: Index; 17 | similaritySearchVectorWithScore( 18 | query: any, 19 | k: any, 20 | filter: any, 21 | ): Promise; 22 | 23 | maxMarginalRelevanceSearch( 24 | query: string, 25 | options: MaxMarginalRelevanceSearchOptions 26 | ): Promise 27 | } 28 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "arrowParens": "always", 3 | "bracketSameLine": false, 4 | "bracketSpacing": true, 5 | "semi": true, 6 | "singleQuote": false, 7 | "jsxSingleQuote": false, 8 | "quoteProps": "as-needed", 9 | "trailingComma": "all", 10 | "singleAttributePerLine": false, 11 | "htmlWhitespaceSensitivity": "css", 12 | "vueIndentScriptAndStyle": false, 13 | "proseWrap": "preserve", 14 | "insertPragma": false, 15 | "printWidth": 80, 16 | "requirePragma": false, 17 | "tabWidth": 2, 18 | "useTabs": false, 19 | "embeddedLanguageFormatting": "auto", 20 | "jsxBracketSameLine": false, 21 | "fluid": false, 22 | "importOrderSeparation": true, 23 | "importOrderSortSpecifiers": true, 24 | "importOrderBuiltinModulesToTop": true, 25 | "importOrderParserPlugins": ["typescript", "jsx"] 26 | } 27 | -------------------------------------------------------------------------------- /degreegurucrawler/degreegurucrawler/utils/crawler.yaml: -------------------------------------------------------------------------------- 1 | crawler: 2 | start_urls: 3 | - https://www.some.domain.com 4 | link_extractor: 5 | allow: '.*some\.domain.*' 6 | deny: 7 | - "#" 8 | - '\?' 9 | - course 10 | - search 11 | - subjects 12 | - degree-charts 13 | - archive 14 | - news 15 | - alumni 16 | - announcement 17 | - people 18 | - topics 19 | - membership 20 | - section 21 | - about 22 | - letter 23 | - member 24 | - committee 25 | - book 26 | - year 27 | - project 28 | - user 29 | - page 30 | - event 31 | - resource 32 | - login 33 | index: 34 | openAI_embedding_model: text-embedding-ada-002 35 | text_splitter: 36 | chunk_size: 1000 37 | chunk_overlap: 100 -------------------------------------------------------------------------------- /src/app/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | @layer base { 6 | a, 7 | button { 8 | @apply transition; 9 | } 10 | 11 | a { 12 | @apply text-emerald-700 underline 13 | decoration-emerald-700/60 decoration-2 14 | hover:decoration-emerald-700 hover:bg-emerald-200 15 | outline-0 focus:ring-2 focus:ring-offset-1 focus:ring-emerald-500; 16 | } 17 | 18 | ::selection { 19 | @apply bg-emerald-200 text-emerald-950; 20 | } 21 | 22 | button:focus, 23 | input:focus { 24 | @apply outline-0 ring-2 ring-offset-1 ring-emerald-500 caret-emerald-500; 25 | } 26 | 27 | label, 28 | strong, 29 | b { 30 | @apply font-semibold; 31 | } 32 | 33 | h1, 34 | h2, 35 | h3, 36 | h4 { 37 | @apply text-balance; 38 | } 39 | 40 | p { 41 | @apply text-pretty; 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "degreeguru", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "next dev", 7 | "build": "next build", 8 | "start": "next start", 9 | "lint": "next lint" 10 | }, 11 | "dependencies": { 12 | "@tabler/icons-react": "^2.47.0", 13 | "@upstash/ratelimit": "^2.0.3", 14 | "@upstash/redis": "^1.34.0", 15 | "@upstash/vector": "^0.1.0-alpha-13", 16 | "ai": "^2.2.31", 17 | "langchain": "^0.1.5", 18 | "markdown-to-jsx": "^7.4.0", 19 | "next": "14.2.35", 20 | "react": "^18", 21 | "react-dom": "^18" 22 | }, 23 | "devDependencies": { 24 | "@types/node": "^20", 25 | "@types/react": "^18", 26 | "@types/react-dom": "^18", 27 | "autoprefixer": "^10.0.1", 28 | "clsx": "^2.1.0", 29 | "eslint": "^8", 30 | "eslint-config-next": "14.0.4", 31 | "postcss": "^8", 32 | "prettier": "^3.2.5", 33 | "tailwind-merge": "^2.2.1", 34 | "tailwindcss": "^3.3.0", 35 | "typescript": "^5" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /public/next.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/components/form.tsx: -------------------------------------------------------------------------------- 1 | import { ComponentProps, forwardRef } from "react"; 2 | import { IconArrowBack } from "@tabler/icons-react"; 3 | import cx from "@/utils/cx"; 4 | 5 | export interface Props extends ComponentProps<"form"> { 6 | inputProps: ComponentProps<"input">; 7 | buttonProps: ComponentProps<"button">; 8 | } 9 | 10 | const Form = ({ inputProps, buttonProps, onSubmit }: Props, ref: any) => { 11 | return ( 12 |
17 | {/**/} 18 | 19 | 31 | 32 | 43 | 44 | ); 45 | }; 46 | 47 | export default forwardRef(Form); 48 | -------------------------------------------------------------------------------- /src/components/message.tsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import Markdown from "markdown-to-jsx"; 3 | import cx from "@/utils/cx"; 4 | import { Message as MessageProps } from "ai/react"; 5 | import UpstashLogo from "@/components/upstash-logo"; 6 | import { IconUser } from "@tabler/icons-react"; 7 | 8 | const Message: React.FC = ({ content, role }) => { 9 | const isUser = role === "user"; 10 | 11 | return ( 12 |
18 | 19 |
    {children}
, 27 | ul: ({ children }) =>
    {children}
, 28 | }, 29 | }} 30 | > 31 | {content} 32 |
33 |
34 | ); 35 | }; 36 | 37 | const Avatar: React.FC<{ isUser?: boolean; className?: string }> = ({ 38 | isUser = false, 39 | className, 40 | }) => { 41 | return ( 42 |
49 | {isUser ? : } 50 |
51 | ); 52 | }; 53 | 54 | export default Message; 55 | export { Avatar }; 56 | -------------------------------------------------------------------------------- /degreegurucrawler/degreegurucrawler/utils/upstash_vector_store.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from openai import OpenAI 3 | from upstash_vector import Index 4 | 5 | class UpstashVectorStore: 6 | 7 | def __init__( 8 | self, 9 | url: str, 10 | token: str 11 | ): 12 | self.client = OpenAI() 13 | self.index = Index(url=url, token=token) 14 | 15 | def get_embeddings( 16 | self, 17 | documents: List[str], 18 | model: str = "text-embedding-ada-002" 19 | ) -> List[List[float]]: 20 | """ 21 | Given a list of documents, generates and returns a list of embeddings 22 | """ 23 | documents = [document.replace("\n", " ") for document in documents] 24 | embeddings = self.client.embeddings.create( 25 | input = documents, 26 | model=model 27 | ) 28 | return [data.embedding for data in embeddings.data] 29 | 30 | def add( 31 | self, 32 | ids: List[str], 33 | documents: List[str], 34 | link: str 35 | ) -> None: 36 | """ 37 | Adds a list of documents to the Upstash Vector Store 38 | """ 39 | embeddings = self.get_embeddings(documents) 40 | self.index.upsert( 41 | vectors=[ 42 | ( 43 | id, 44 | embedding, 45 | { 46 | "text": document, 47 | "url": link 48 | } 49 | ) 50 | for id, embedding, document 51 | in zip(ids, embeddings, documents) 52 | ] 53 | ) 54 | -------------------------------------------------------------------------------- /src/components/upstash-logo.tsx: -------------------------------------------------------------------------------- 1 | import React, { HTMLProps } from "react"; 2 | 3 | export interface Props extends HTMLProps { 4 | size?: number; 5 | } 6 | 7 | export default function UpstashLogo({ height = 20, ...props }: Props) { 8 | return ( 9 | 16 | 17 | 21 | 25 | 29 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | ); 41 | } 42 | -------------------------------------------------------------------------------- /src/components/message-loading.tsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import cx from "@/utils/cx"; 3 | import { Avatar } from "@/components/message"; 4 | 5 | const MessageLoading: React.FC = () => { 6 | return ( 7 |
13 | 14 | 15 | {/* https://github.com/n3r4zzurr0/svg-spinners/blob/main/svg-smil/3-dots-bounce.svg?short_path=50864c0 */} 16 | 23 | 24 | 33 | 34 | 35 | 43 | 44 | 45 | 54 | 55 | 56 |
57 | ); 58 | }; 59 | 60 | export default MessageLoading; 61 | -------------------------------------------------------------------------------- /src/app/vectorstore/UpstashVectorStore.js: -------------------------------------------------------------------------------- 1 | import { VectorStore } from "@langchain/core/vectorstores"; 2 | import { Document } from "@langchain/core/documents"; 3 | import { Index } from "@upstash/vector"; 4 | import { maximalMarginalRelevance } from "@langchain/core/utils/math"; 5 | 6 | 7 | export class UpstashVectorStore extends VectorStore { 8 | _vectorstoreType() { 9 | return "upstash"; 10 | } 11 | 12 | constructor(embeddings) { 13 | super(embeddings); 14 | 15 | this.index = new Index({ 16 | url: process.env.UPSTASH_VECTOR_URL, 17 | token: process.env.UPSTASH_VECTOR_TOKEN, 18 | }); 19 | } 20 | 21 | async similaritySearchVectorWithScore(query, k, filter) { 22 | const result = await this.index.query({ 23 | vector: query, 24 | topK: k, 25 | includeVectors: false, 26 | includeMetadata: true, 27 | }); 28 | 29 | const results = []; 30 | for (let i = 0; i < result.length; i++) { 31 | results.push([ 32 | new Document({ 33 | pageContent: JSON.stringify(result[i]?.metadata) || "", 34 | }), 35 | ]); 36 | } 37 | 38 | return results; 39 | } 40 | 41 | async maxMarginalRelevanceSearch(query, options) { 42 | const queryEmbedding = await this.embeddings.embedQuery(query); 43 | const result = await this.index.query({ 44 | vector: queryEmbedding, 45 | topK: options.fetchK ?? 20, 46 | includeVectors: true, 47 | includeMetadata: true, 48 | }); 49 | const embeddingList = result.map((r) => r.vector) 50 | 51 | const mmrIndexes = maximalMarginalRelevance( 52 | queryEmbedding, 53 | embeddingList, 54 | options.lambda, 55 | options.k 56 | ); 57 | const topMmrMatches = mmrIndexes.map((idx) => result[idx]); 58 | 59 | const results = []; 60 | for (let i = 0; i < topMmrMatches.length; i++) { 61 | results.push( 62 | new Document({ 63 | pageContent: JSON.stringify(topMmrMatches[i]?.metadata) || "", 64 | }), 65 | ); 66 | } 67 | 68 | return results; 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /degreegurucrawler/degreegurucrawler/spiders/configurable.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import uuid 4 | import logging 5 | 6 | from ..utils.upstash_vector_store import UpstashVectorStore 7 | from ..utils.config import text_splitter_config, crawler_config 8 | 9 | from scrapy.spiders import CrawlSpider, Rule 10 | from scrapy.linkextractors import LinkExtractor 11 | 12 | from langchain.text_splitter import RecursiveCharacterTextSplitter 13 | 14 | 15 | class ConfigurableSpider(CrawlSpider): 16 | 17 | name = "configurable" 18 | start_urls = crawler_config["start_urls"] 19 | rules = ( 20 | Rule( 21 | LinkExtractor( 22 | **crawler_config["link_extractor"] 23 | ), 24 | callback="parse_page", 25 | follow=True # to enable following links on each page when callback is provided 26 | ), 27 | ) 28 | 29 | def __init__(self, *a, **kw): 30 | super().__init__(*a, **kw) 31 | 32 | self.vectorstore = UpstashVectorStore( 33 | url=os.environ.get("UPSTASH_VECTOR_REST_URL"), 34 | token=os.environ.get("UPSTASH_VECTOR_REST_TOKEN") 35 | ) 36 | 37 | print( 38 | f"Creating a vector index at {os.environ.get('UPSTASH_VECTOR_REST_URL')}.\n" 39 | f" Vector store info before crawl: {self.vectorstore.index.info()}" 40 | ) 41 | 42 | self.text_splitter = RecursiveCharacterTextSplitter( 43 | **text_splitter_config 44 | ) 45 | 46 | self._disable_loggers() 47 | 48 | def _disable_loggers(self): 49 | """ 50 | disables some of the loggers to keep the log clean 51 | """ 52 | 53 | disable_loggers = [ 54 | "scrapy.spidermiddlewares.depth", 55 | "protego", 56 | "httpcore.http11", 57 | "httpx", 58 | "openai._base_client", 59 | "urllib3.connectionpool" 60 | ] 61 | for logger in disable_loggers: 62 | logging.getLogger(logger).setLevel(logging.WARNING) 63 | 64 | def parse_page(self, response): 65 | """ 66 | Creates chunks out of the crawled webpage and adds them to the vector 67 | store. 68 | """ 69 | 70 | # extract text content 71 | text_content = response.xpath('//p').getall() 72 | text_content = '\n'.join(text_content) 73 | 74 | # split documents 75 | documents = self.text_splitter.split_text(text_content) 76 | 77 | if len(documents) == 0: 78 | return 79 | 80 | # get source url 81 | link = response.url 82 | 83 | # add documents to vector store 84 | self.vectorstore.add( 85 | ids=[str(uuid.uuid4())[:8] for doc in documents], 86 | documents=documents, 87 | link=link 88 | ) 89 | -------------------------------------------------------------------------------- /degreegurucrawler/degreegurucrawler/settings.py: -------------------------------------------------------------------------------- 1 | # Scrapy settings for degreegurucrawler project 2 | # 3 | # For simplicity, this file contains only settings considered important or 4 | # commonly used. You can find more settings consulting the documentation: 5 | # 6 | # https://docs.scrapy.org/en/latest/topics/settings.html 7 | # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html 8 | # https://docs.scrapy.org/en/latest/topics/spider-middleware.html 9 | 10 | BOT_NAME = "degreegurucrawler" 11 | 12 | SPIDER_MODULES = ["degreegurucrawler.spiders"] 13 | NEWSPIDER_MODULE = "degreegurucrawler.spiders" 14 | 15 | DEPTH_LIMIT = 3 16 | 17 | # Crawl responsibly by identifying yourself (and your website) on the user-agent 18 | #USER_AGENT = "degreegurucrawler (+http://www.yourdomain.com)" 19 | 20 | # Obey robots.txt rules 21 | ROBOTSTXT_OBEY = True 22 | 23 | # Configure maximum concurrent requests performed by Scrapy (default: 16) 24 | #CONCURRENT_REQUESTS = 32 25 | 26 | # Configure a delay for requests for the same website (default: 0) 27 | # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay 28 | # See also autothrottle settings and docs 29 | #DOWNLOAD_DELAY = 3 30 | # The download delay setting will honor only one of: 31 | #CONCURRENT_REQUESTS_PER_DOMAIN = 16 32 | #CONCURRENT_REQUESTS_PER_IP = 16 33 | 34 | # Disable cookies (enabled by default) 35 | #COOKIES_ENABLED = False 36 | 37 | # Disable Telnet Console (enabled by default) 38 | #TELNETCONSOLE_ENABLED = False 39 | 40 | # Override the default request headers: 41 | #DEFAULT_REQUEST_HEADERS = { 42 | # "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", 43 | # "Accept-Language": "en", 44 | #} 45 | 46 | # Enable or disable spider middlewares 47 | # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html 48 | #SPIDER_MIDDLEWARES = { 49 | # "degreegurucrawler.middlewares.DegreegurucrawlerSpiderMiddleware": 543, 50 | #} 51 | 52 | # Enable or disable downloader middlewares 53 | # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html 54 | #DOWNLOADER_MIDDLEWARES = { 55 | # "degreegurucrawler.middlewares.DegreegurucrawlerDownloaderMiddleware": 543, 56 | #} 57 | 58 | # Enable or disable extensions 59 | # See https://docs.scrapy.org/en/latest/topics/extensions.html 60 | #EXTENSIONS = { 61 | # "scrapy.extensions.telnet.TelnetConsole": None, 62 | #} 63 | 64 | # Configure item pipelines 65 | # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html 66 | #ITEM_PIPELINES = { 67 | # "degreegurucrawler.pipelines.DegreegurucrawlerPipeline": 300, 68 | #} 69 | 70 | # Enable and configure the AutoThrottle extension (disabled by default) 71 | # See https://docs.scrapy.org/en/latest/topics/autothrottle.html 72 | #AUTOTHROTTLE_ENABLED = True 73 | # The initial download delay 74 | #AUTOTHROTTLE_START_DELAY = 5 75 | # The maximum download delay to be set in case of high latencies 76 | #AUTOTHROTTLE_MAX_DELAY = 60 77 | # The average number of requests Scrapy should be sending in parallel to 78 | # each remote server 79 | #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 80 | # Enable showing throttling stats for every response received: 81 | #AUTOTHROTTLE_DEBUG = False 82 | 83 | # Enable and configure HTTP caching (disabled by default) 84 | # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings 85 | #HTTPCACHE_ENABLED = True 86 | #HTTPCACHE_EXPIRATION_SECS = 0 87 | #HTTPCACHE_DIR = "httpcache" 88 | #HTTPCACHE_IGNORE_HTTP_CODES = [] 89 | #HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage" 90 | 91 | # Set settings whose default value is deprecated to a future-proof value 92 | REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7" 93 | TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor" 94 | FEED_EXPORT_ENCODING = "utf-8" 95 | -------------------------------------------------------------------------------- /src/app/page.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import React, { useCallback, useEffect, useRef, useState } from "react"; 4 | import { Message as MessageProps, useChat } from "ai/react"; 5 | import Form from "@/components/form"; 6 | import Message from "@/components/message"; 7 | import cx from "@/utils/cx"; 8 | import PoweredBy from "@/components/powered-by"; 9 | import MessageLoading from "@/components/message-loading"; 10 | import { INITIAL_QUESTIONS } from "@/utils/const"; 11 | 12 | export default function Home() { 13 | const formRef = useRef(null); 14 | const messagesEndRef = useRef(null); 15 | 16 | const [streaming, setStreaming] = useState(false); 17 | 18 | const { messages, input, handleInputChange, handleSubmit, setInput } = 19 | useChat({ 20 | api: "/api/guru", 21 | initialMessages: [ 22 | { 23 | id: "0", 24 | role: "system", 25 | content: `**Welcome to DegreeGuru** 26 | 27 | Your ultimate companion in navigating the academic landscape of Stanford.`, 28 | }, 29 | ], 30 | onResponse: () => { 31 | setStreaming(false); 32 | }, 33 | }); 34 | 35 | const onClickQuestion = (value: string) => { 36 | setInput(value); 37 | setTimeout(() => { 38 | formRef.current?.dispatchEvent( 39 | new Event("submit", { 40 | cancelable: true, 41 | bubbles: true, 42 | }), 43 | ); 44 | }, 1); 45 | }; 46 | 47 | useEffect(() => { 48 | if (messagesEndRef.current) { 49 | messagesEndRef.current.scrollIntoView(); 50 | } 51 | }, [messages]); 52 | 53 | const onSubmit = useCallback( 54 | (e: React.FormEvent) => { 55 | e.preventDefault(); 56 | handleSubmit(e); 57 | setStreaming(true); 58 | }, 59 | [handleSubmit], 60 | ); 61 | 62 | return ( 63 |
64 |
65 | {messages.map((message: MessageProps) => { 66 | return ; 67 | })} 68 | 69 | {/* loading */} 70 | {streaming && } 71 | 72 | {/* initial question */} 73 | {messages.length === 1 && ( 74 |
75 | {INITIAL_QUESTIONS.map((message) => { 76 | return ( 77 | 87 | ); 88 | })} 89 |
90 | )} 91 | 92 | {/* bottom ref */} 93 |
94 |
95 | 96 |
103 | 107 | 108 |
109 |
121 | 122 | 123 |
124 |
125 |
126 | ); 127 | } 128 | -------------------------------------------------------------------------------- /degreegurucrawler/degreegurucrawler/middlewares.py: -------------------------------------------------------------------------------- 1 | # Define here the models for your spider middleware 2 | # 3 | # See documentation in: 4 | # https://docs.scrapy.org/en/latest/topics/spider-middleware.html 5 | 6 | from scrapy import signals 7 | 8 | # useful for handling different item types with a single interface 9 | from itemadapter import is_item, ItemAdapter 10 | 11 | 12 | class DegreegurucrawlerSpiderMiddleware: 13 | # Not all methods need to be defined. If a method is not defined, 14 | # scrapy acts as if the spider middleware does not modify the 15 | # passed objects. 16 | 17 | @classmethod 18 | def from_crawler(cls, crawler): 19 | # This method is used by Scrapy to create your spiders. 20 | s = cls() 21 | crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) 22 | return s 23 | 24 | def process_spider_input(self, response, spider): 25 | # Called for each response that goes through the spider 26 | # middleware and into the spider. 27 | 28 | # Should return None or raise an exception. 29 | return None 30 | 31 | def process_spider_output(self, response, result, spider): 32 | # Called with the results returned from the Spider, after 33 | # it has processed the response. 34 | 35 | # Must return an iterable of Request, or item objects. 36 | for i in result: 37 | yield i 38 | 39 | def process_spider_exception(self, response, exception, spider): 40 | # Called when a spider or process_spider_input() method 41 | # (from other spider middleware) raises an exception. 42 | 43 | # Should return either None or an iterable of Request or item objects. 44 | pass 45 | 46 | def process_start_requests(self, start_requests, spider): 47 | # Called with the start requests of the spider, and works 48 | # similarly to the process_spider_output() method, except 49 | # that it doesn’t have a response associated. 50 | 51 | # Must return only requests (not items). 52 | for r in start_requests: 53 | yield r 54 | 55 | def spider_opened(self, spider): 56 | spider.logger.info("Spider opened: %s" % spider.name) 57 | 58 | 59 | class DegreegurucrawlerDownloaderMiddleware: 60 | # Not all methods need to be defined. If a method is not defined, 61 | # scrapy acts as if the downloader middleware does not modify the 62 | # passed objects. 63 | 64 | @classmethod 65 | def from_crawler(cls, crawler): 66 | # This method is used by Scrapy to create your spiders. 67 | s = cls() 68 | crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) 69 | return s 70 | 71 | def process_request(self, request, spider): 72 | # Called for each request that goes through the downloader 73 | # middleware. 74 | 75 | # Must either: 76 | # - return None: continue processing this request 77 | # - or return a Response object 78 | # - or return a Request object 79 | # - or raise IgnoreRequest: process_exception() methods of 80 | # installed downloader middleware will be called 81 | return None 82 | 83 | def process_response(self, request, response, spider): 84 | # Called with the response returned from the downloader. 85 | 86 | # Must either; 87 | # - return a Response object 88 | # - return a Request object 89 | # - or raise IgnoreRequest 90 | return response 91 | 92 | def process_exception(self, request, exception, spider): 93 | # Called when a download handler or a process_request() 94 | # (from other downloader middleware) raises an exception. 95 | 96 | # Must either: 97 | # - return None: continue processing this exception 98 | # - return a Response object: stops process_exception() chain 99 | # - return a Request object: stops process_exception() chain 100 | pass 101 | 102 | def spider_opened(self, spider): 103 | spider.logger.info("Spider opened: %s" % spider.name) 104 | -------------------------------------------------------------------------------- /src/app/api/guru/route.tsx: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from "next/server"; 2 | 3 | import { Ratelimit } from "@upstash/ratelimit"; 4 | import { Redis } from "@upstash/redis"; 5 | 6 | import { Message as VercelChatMessage, StreamingTextResponse } from "ai"; 7 | 8 | import { AIMessage, ChatMessage, HumanMessage } from "@langchain/core/messages"; 9 | import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; 10 | import { createRetrieverTool } from "langchain/tools/retriever"; 11 | import { AgentExecutor, createOpenAIFunctionsAgent } from "langchain/agents"; 12 | import { 13 | ChatPromptTemplate, 14 | MessagesPlaceholder, 15 | } from "@langchain/core/prompts"; 16 | 17 | import { UpstashVectorStore } from "@/app/vectorstore/UpstashVectorStore"; 18 | 19 | export const runtime = "edge"; 20 | 21 | const redis = Redis.fromEnv(); 22 | 23 | const ratelimit = new Ratelimit({ 24 | redis: redis, 25 | limiter: Ratelimit.slidingWindow(1, "10 s"), 26 | }); 27 | 28 | const convertVercelMessageToLangChainMessage = (message: VercelChatMessage) => { 29 | if (message.role === "user") { 30 | return new HumanMessage(message.content); 31 | } else if (message.role === "assistant") { 32 | return new AIMessage(message.content); 33 | } else { 34 | return new ChatMessage(message.content, message.role); 35 | } 36 | }; 37 | 38 | export async function POST(req: NextRequest) { 39 | try { 40 | const ip = req.ip ?? "127.0.0.1"; 41 | const { success } = await ratelimit.limit(ip); 42 | 43 | if (!success) { 44 | const textEncoder = new TextEncoder(); 45 | const customString = 46 | "Oops! It seems you've reached the rate limit. Please try again later."; 47 | 48 | const transformStream = new ReadableStream({ 49 | async start(controller) { 50 | controller.enqueue(textEncoder.encode(customString)); 51 | controller.close(); 52 | }, 53 | }); 54 | return new StreamingTextResponse(transformStream); 55 | } 56 | 57 | const body = await req.json(); 58 | 59 | /** 60 | * We represent intermediate steps as system messages for display purposes, 61 | * but don't want them in the chat history. 62 | */ 63 | const messages = (body.messages ?? []).filter( 64 | (message: VercelChatMessage) => 65 | message.role === "user" || message.role === "assistant", 66 | ); 67 | const returnIntermediateSteps = false; 68 | const previousMessages = messages 69 | .slice(0, -1) 70 | .map(convertVercelMessageToLangChainMessage); 71 | const currentMessageContent = messages[messages.length - 1].content; 72 | 73 | const chatModel = new ChatOpenAI({ 74 | modelName: "gpt-3.5-turbo-1106", 75 | temperature: 0.2, 76 | // IMPORTANT: Must "streaming: true" on OpenAI to enable final output streaming below. 77 | streaming: true, 78 | }, { 79 | apiKey: process.env.OPENAI_API_KEY, 80 | organization: process.env.OPENAI_ORGANIZATION 81 | }); 82 | 83 | /** 84 | * Create vector store and retriever 85 | */ 86 | const vectorstore = await new UpstashVectorStore(new OpenAIEmbeddings()); 87 | const retriever = vectorstore.asRetriever( 88 | { 89 | k: 6, 90 | searchType: "mmr", 91 | searchKwargs: { 92 | fetchK: 20, 93 | lambda: 0.5 94 | }, 95 | verbose: false 96 | }, 97 | ); 98 | 99 | /** 100 | * Wrap the retriever in a tool to present it to the agent in a 101 | * usable form. 102 | */ 103 | const tool = createRetrieverTool(retriever, { 104 | name: "search_latest_knowledge", 105 | description: "Searches and returns up-to-date general information.", 106 | }); 107 | 108 | /** 109 | * Based on https://smith.langchain.com/hub/hwchase17/openai-functions-agent 110 | * 111 | * This default prompt for the OpenAI functions agent has a placeholder 112 | * where chat messages get inserted as "chat_history". 113 | * 114 | * You can customize this prompt yourself! 115 | */ 116 | 117 | const AGENT_SYSTEM_TEMPLATE = ` 118 | You are an artificial intelligence university bot named DegreeGuru, programmed to respond to inquiries about Stanford in a highly systematic and data-driven manner. 119 | 120 | Begin your answers with a formal greeting and sign off with a closing statement about promoting knowledge. 121 | 122 | Your responses should be precise and factual, with an emphasis on using the context provided and providing links from the context whenever posible. If some link does not look like it belongs to stanford, don't use the link and the information in your response. 123 | 124 | Don't repeat yourself in your responses even if some information is repeated in the context. 125 | 126 | Reply with apologies and tell the user that you don't know the answer only when you are faced with a question whose answer is not available in the context. 127 | `; 128 | 129 | const prompt = ChatPromptTemplate.fromMessages([ 130 | ["system", AGENT_SYSTEM_TEMPLATE], 131 | new MessagesPlaceholder("chat_history"), 132 | ["human", "{input}"], 133 | new MessagesPlaceholder("agent_scratchpad"), 134 | ]); 135 | 136 | const agent = await createOpenAIFunctionsAgent({ 137 | llm: chatModel, 138 | tools: [tool], 139 | prompt, 140 | }); 141 | 142 | const agentExecutor = new AgentExecutor({ 143 | agent, 144 | tools: [tool], 145 | // Set this if you want to receive all intermediate steps in the output of .invoke(). 146 | returnIntermediateSteps, 147 | }); 148 | 149 | if (!returnIntermediateSteps) { 150 | /** 151 | * Agent executors also allow you to stream back all generated tokens and steps 152 | * from their runs. 153 | * 154 | * This contains a lot of data, so we do some filtering of the generated log chunks 155 | * and only stream back the final response. 156 | * 157 | * This filtering is easiest with the OpenAI functions or tools agents, since final outputs 158 | * are log chunk values from the model that contain a string instead of a function call object. 159 | * 160 | * See: https://js.langchain.com/docs/modules/agents/how_to/streaming#streaming-tokens 161 | */ 162 | const logStream = await agentExecutor.streamLog({ 163 | input: currentMessageContent, 164 | chat_history: previousMessages, 165 | }); 166 | 167 | const textEncoder = new TextEncoder(); 168 | const transformStream = new ReadableStream({ 169 | async start(controller) { 170 | for await (const chunk of logStream) { 171 | if (chunk.ops?.length > 0 && chunk.ops[0].op === "add") { 172 | const addOp = chunk.ops[0]; 173 | if ( 174 | addOp.path.startsWith("/logs/ChatOpenAI") && 175 | typeof addOp.value === "string" && 176 | addOp.value.length 177 | ) { 178 | controller.enqueue(textEncoder.encode(addOp.value)); 179 | } 180 | } 181 | } 182 | controller.close(); 183 | }, 184 | }); 185 | 186 | return new StreamingTextResponse(transformStream); 187 | } else { 188 | /** 189 | * Intermediate steps are the default outputs with the executor's `.stream()` method. 190 | * We could also pick them out from `streamLog` chunks. 191 | * They are generated as JSON objects, so streaming them is a bit more complicated. 192 | */ 193 | const result = await agentExecutor.invoke({ 194 | input: currentMessageContent, 195 | chat_history: previousMessages, 196 | }); 197 | 198 | const urls = JSON.parse( 199 | `[${result.intermediateSteps[0]?.observation.replaceAll("}\n\n{", "}, {")}]`, 200 | ).map((source: { url: any }) => source.url); 201 | 202 | return NextResponse.json( 203 | { 204 | _no_streaming_response_: true, 205 | output: result.output, 206 | sources: urls, 207 | }, 208 | { status: 200 }, 209 | ); 210 | } 211 | } catch (e: any) { 212 | console.log(e.message); 213 | return NextResponse.json({ error: e.message }, { status: 500 }); 214 | } 215 | } 216 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DegreeGuru 2 | 3 | ## Build a RAG Chatbot using Vercel AI SDK, Langchain, Upstash Vector and OpenAI 4 | 5 | [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fupstash%2Fdegreeguru&env=UPSTASH_REDIS_REST_URL,UPSTASH_REDIS_REST_TOKEN,UPSTASH_VECTOR_REST_URL,UPSTASH_VECTOR_REST_TOKEN,OPENAI_API_KEY&demo-title=DegreeGuru%20Demo&demo-description=A%20Demo%20Showcasing%20the%20DegreeGuru%20App&demo-url=https%3A%2F%2Fdegreeguru.vercel.app%2F&demo-image=https%3A%2F%2Fupstash.com%2Ficons%2Ffavicon-32x32.png) 6 | 7 | ![overview](figs/overview.gif) 8 | 9 | > [!NOTE] 10 | > **This project is a Community Project.** 11 | > 12 | > The project is maintained and supported by the community. Upstash may contribute but does not officially support or assume responsibility for it. 13 | 14 | **DegreeGuru** is a project designed to teach you making your own AI RAG chatbot on any custom data. Some of our favorite features: 15 | 16 | - 🕷️ Built-in crawler that scrapes the website you point it to, automatically making this data available for the AI 17 | - ⚡ Fast answers using Upstash Vector and real-time data streaming 18 | - 🛡️ Includes rate limiting to prevent API abuse 19 | 20 | This chatbot is trained on data from Stanford University as an example, but is totally domain agnostic. We've created this project so you can turn it into a chatbot with your very own data by simply modifying the `crawler.yaml` file. 21 | 22 | ## Overview 23 | 24 | 1. [Stack](#stack) 25 | 2. [Quickstart](#quickstart) 26 | 1. [Crawler](#crawler) 27 | 2. [ChatBot](#chatbot) 28 | 3. [Conclusion](#conclusion) 29 | 4. [Shortcomings](#shortcomings) 30 | 31 | ## Stack 32 | 33 | - Crawler: [scrapy](https://scrapy.org/) 34 | - Chatbot App: [Next.js](https://nextjs.org/) 35 | - Vector DB: [Upstash](https://upstash.com/) 36 | - LLM Orchestration: [Langchain.js](https://js.langchain.com) 37 | - Generative Model: [OpenAI](https://openai.com/), [gpt-3.5-turbo-1106](https://platform.openai.com/docs/models) 38 | - Embedding Model: [OpenAI](https://openai.com/), [text-embedding-ada-002](https://platform.openai.com/docs/guides/embeddings) 39 | - Text Streaming: [Vercel AI](https://vercel.com/ai) 40 | - Rate Limiting: [Upstash](https://upstash.com/) 41 | 42 | ## Quickstart 43 | 44 | For local development, we recommend forking this project and cloning the forked repository to your local machine by running the following command: 45 | 46 | ``` 47 | git clone git@github.com:[YOUR_GITHUB_ACCOUNT]/DegreeGuru.git 48 | ``` 49 | 50 | This project contains two primary components: the crawler and the chatbot. First, we'll take a look at how the crawler extracts information from any website you point it to. This data is automatically stored in an Upstash Vector database. If you already have a vector database available, the crawling stage can be skipped. 51 | 52 | ### Step 1: Crawler 53 | 54 | ![crawler-diagram](figs/how-this-project-works.png) 55 | 56 | The crawler is developed using Python, by [initializing a Scrapy project](https://docs.scrapy.org/en/latest/intro/tutorial.html#creating-a-project) and implementing a [custom spider](https://github.com/upstash/degreeguru/blob/master/degreegurucrawler/degreegurucrawler/spiders/configurable.py). The spider is equipped with [the `parse_page` function](https://github.com/upstash/degreeguru/blob/master/degreegurucrawler/degreegurucrawler/spiders/configurable.py#L42), invoked each time the spider visits a webpage. This callback function splits the text on the webpage into chunks, generates vector embeddings for each chunk, and upserts those vectors into your Upstash Vector Database. Each vector stored in our database includes the original text and website URL as metadata. 57 | 58 |
59 | 60 | To run the crawler, follow these steps: 61 | 62 | > [!TIP] 63 | > If you have docker installed, you can skip the "Configure Environment Variables" and "Install Required Python Libraries" sections. Instead you can simply update the environment variables in [docker-compose.yml](https://github.com/upstash/DegreeGuru/blob/master/degreegurucrawler/docker-compose.yml) and run `docker-compose up`. This will create a container running our crawler. Don't forget to configure the crawler as explained in the following sections! 64 | 65 |
66 | 67 | Configure Environment Variables 68 | Before we can run our crawler, we need to configure environment variables. They let us securely store sensitive information, such as the API keys we need to communicate with OpenAI or Upstash Vector. 69 | 70 | If you don't already have an Upstash Vector Database, create one [here](https://console.upstash.com/vector) and set 1536 as the vector dimensions. We set 1536 here because that is the amount needed by the embedding model we will use. 71 | 72 | ![vector-db-create](figs/vector-db-create.png) 73 | 74 | The following environment variables should be set: 75 | 76 | ``` 77 | # Upstash Vector credentials retrieved here: https://console.upstash.com/vector 78 | UPSTASH_VECTOR_REST_URL=**** 79 | UPSTASH_VECTOR_REST_TOKEN=**** 80 | 81 | # OpenAI key retrieved here: https://platform.openai.com/api-keys 82 | OPENAI_API_KEY=**** 83 | ``` 84 | 85 |
86 | 87 |
88 | Install Required Python Libraries 89 | 90 | To install the libraries, we suggest setting up a virtual Python environment. Before starting the installation, navigate to the `degreegurucrawler` directory. 91 | 92 | To setup a virtual environment, first install `virtualenv` package: 93 | 94 | ```bash 95 | pip install virtualenv 96 | ``` 97 | 98 | Then, create a new virtual environment and activate it: 99 | 100 | ```bash 101 | # create environment 102 | python3 -m venv venv 103 | 104 | # activate environment 105 | source venv/bin/activate 106 | ``` 107 | 108 | Finally, use [the `requirements.txt`](https://github.com/upstash/degreeguru/blob/master/degreegurucrawler/requirements.txt) to install the required libraries: 109 | 110 | ```bash 111 | pip install -r requirements.txt 112 | ``` 113 | 114 |
115 | 116 | 117 | 118 |
119 | 120 | After setting these environment variables, we are almost ready to run the crawler. The subsequent step involves configuring the crawler itself, primarily accomplished through the `crawler.yaml` file located in the `degreegurucrawler/utils` directory. Additionally, it is imperative to address a crucial setting within the `settings.py` file. 121 | 122 |
123 | Configuring the crawler in `crawler.yaml` 124 | 125 | The crawler.yaml has two main sections: `crawler` and `index`: 126 | 127 | ```yaml 128 | crawler: 129 | start_urls: 130 | - https://www.some.domain.com 131 | link_extractor: 132 | allow: '.*some\.domain.*' 133 | deny: 134 | - "#" 135 | - '\?' 136 | - about 137 | index: 138 | openAI_embedding_model: text-embedding-ada-002 139 | text_splitter: 140 | chunk_size: 1000 141 | chunk_overlap: 100 142 | ``` 143 | 144 | In the `crawler` section, there are two subsections: 145 | 146 | - `start_urls`: the entrypoints our crawler will start searching from 147 | - `link_extractor`: a dictionary passed as arguments to [`scrapy.linkextractors.LinkExtractor`](https://docs.scrapy.org/en/latest/topics/link-extractors.html). Some important parameters are: 148 | - `allow`: Only extracts links matching the given regex(s) 149 | - `allow_domains`: Only extract links matching the given domain(s) 150 | - `deny`: Deny links matching the given regex(s) 151 | 152 | In the `index` section, there are two subsections: 153 | 154 | - `openAI_embedding_model`: The embedding model to use 155 | - `test_splitter`: a dictionary passed as arguments to [`langchain.text_splitter.RecursiveCharacterTextSplitter`](https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.RecursiveCharacterTextSplitter.html) 156 | 157 |
158 | 159 |
160 | Configuring crawl depth via `settings.py` 161 | 162 | `settings.py` file has an important setting called `DEPTH_LIMIT` which determines how many consecutive links our spider can crawl. A high value lets our crawler visit the deepest corners of a website, taking longer to finish with possibly diminishing returns. A low value could end the crawl before extracting relevant information. 163 | 164 | If pages are skipped due to the `DEPTH_LIMIT`, Scrapy logs those skipped URLs for us. Because this usually causes a lot of logs, we've disabled this option in our project. If you'd like to keep it enabled, remove [the `"scrapy.spidermiddlewares.depth"` from the `disable_loggers` in `degreegurucrawler/spider/configurable.py` file](https://github.com/upstash/degreeguru/blob/master/degreegurucrawler/degreegurucrawler/spiders/configurable.py#L22). 165 | 166 |
167 | 168 |
169 | 170 | That's it! 🎉 We've configured our crawler and are ready to run it using the following command: 171 | 172 | ``` 173 | scrapy crawl configurable --logfile degreegurucrawl.log 174 | ``` 175 | 176 | Note that running this might take time. You can monitor the progress by looking at the log file `degreegurucrawl.log` or the metrics of your Upstash Vector Database dashboard as shown below. 177 | 178 | ![vector-db](figs/vector-db.png) 179 | 180 | > [!TIP] 181 | > If you want to do a dry run (without creating embeddings or a vector database), simply comment out [the line where we pass the `callback` parameter to the `Rule` object in `ConfigurableSpider`](https://github.com/upstash/degreeguru/blob/master/degreegurucrawler/degreegurucrawler/spiders/configurable.py#L38) 182 | 183 | ### Step 2: Chatbot 184 | 185 | In this section, we'll explore how to chat with the data we've just crawled and stored in our vector database. Here's an overview of what this will look like architecturally: 186 | 187 | ![chatbot-diagram](figs/infrastructure.png) 188 | 189 | Before we can run the chatbot locally, we need to set the environment variables as shown in the [`.env.local.example`](https://github.com/upstash/degreeguru/blob/master/.env.local.example) file. Rename this file and remove the `.example` ending, leaving us with `.env.local`. 190 | 191 | Your `.env.local` file should look like this: 192 | ``` 193 | # Redis tokens retrieved here: https://console.upstash.com/ 194 | UPSTASH_REDIS_REST_URL= 195 | UPSTASH_REDIS_REST_TOKEN= 196 | 197 | # Vector database tokens retrieved here: https://console.upstash.com/vector 198 | UPSTASH_VECTOR_REST_URL= 199 | UPSTASH_VECTOR_REST_TOKEN= 200 | 201 | # OpenAI key retrieved here: https://platform.openai.com/api-keys 202 | OPENAI_API_KEY= 203 | ``` 204 | 205 | The first four variables are provided by Upstash, you can visit the commented links for the place to retrieve these tokens. You can find the vector database tokens here: 206 | 207 | ![vector-db-read-only](figs/vector-db-read-only.png) 208 | 209 | The `UPSTASH_REDIS_REST_URL` and `UPSTASH_REDIS_REST_TOKEN` are needed for rate-limiting based on IP address. In order to get these secrets, go to Upstash dashboard and create a Redis database. 210 | 211 | ![redis-create](figs/redis-create.png) 212 | 213 | Finally, set the `OPENAI_API_KEY` environment variable you can get [here](https://platform.openai.com/api-keys) which allows us to vectorize user queries and generate responses. 214 | 215 | That's the setup done! 🎉 We've configured our crawler, set up all neccessary environment variables are after running `npm install` to install all local packages needed to run the app, we can start our chatbot using the command: 216 | 217 | ```bash 218 | npm run dev 219 | ``` 220 | 221 | Visit `http://localhost:3000` to see your chatbot live in action! 222 | 223 | ### Step 3: Optional tweaking 224 | 225 | You can use this chatbot in two different modes: 226 | 227 | - Streaming Mode: model responses are streamed to the web application in real-time as the model generates them. Interaction with the app is more fluid. 228 | - Non-Streaming Mode: Model responses are shown to the user once entirely generated. In this mode, DegreeGuru can explicitly provide the URLs of the web pages it uses as context. 229 | 230 |
231 | Changing streaming mode 232 | 233 | To turn streaming on/off, navigate to `src/app/route/guru` and open the `route.tsx` file. Setting [`returnIntermediateSteps`](https://github.com/upstash/degreeguru/blob/master/src/app/api/guru/route.tsx#L64) to `true` disables streaming, setting it to `false` enables streaming. 234 | 235 |
236 | 237 | To customize the chatbot further, you can update the [AGENT_SYSTEM_TEMPLATE in your route.tsx file](https://github.com/upstash/DegreeGuru/blob/master/src/app/api/guru/route.tsx#L101) to better match your specific use case. 238 | 239 |
240 | 241 | ## Conclusion 242 | 243 | Congratulations on setting up your own AI chatbot! We hope you learned a lot by following along and seeing how the different parts of this app, namely the crawler, vector database, and LLM, play together. A major focus in developing this project was on its user-friendly design and adaptable settings to make this project perfect for your use case. 244 | 245 | ## Limitations 246 | 247 | The above implementation works great for a variety of use cases. There are a few limitations I'd like to mention: 248 | 249 | - Because the Upstash LangChain integration is a work-in-progress, the [`UpstashVectorStore`](https://github.com/upstash/degreeguru/blob/master/src/app/vectorstore/UpstashVectorStore.js) used with LangChain currently only implements the `similaritySearchVectorWithScore` method needed for our agent. Once we're done developing our native LangChain integration, we'll update this project accordingly. 250 | - When the non-streaming mode is enabled, the message history can cause an error after the user enters another query. 251 | - Our sources are available as URLs in the Upstash Vector Database, but we cannot show the sources explicitly when streaming. Instead, we provide the links to the chatbot as context and expect the bot to include the links in the response. 252 | --------------------------------------------------------------------------------