Ollama RAG

parmarjatin4911@gmail.com - Jan 28 - - Dev Community

Ollama RAG

pip install ollama
pip install langchain beautifulsoup4 chromadb gradio

import ollama

stream = ollama.chat(
model='mistral',
messages=[{'role': 'user', 'content': 'Why is the sky blue?'}],
stream=True,
)

for chunk in stream:
print(chunk['message']['content'], end='', flush=True)

print(ollama.embeddings(model='mistral', prompt='They sky is blue because of rayleigh scattering'))

RAG

import ollama
import bs4
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import OllamaEmbeddings
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough

loader = WebBaseLoader(
web_paths=("http://localhost/llm.html",),
bs_kwargs=dict(
parse_only=bs4.SoupStrainer(
class_=("post-content", "post-title", "post-header")
)
),
)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_documents(docs)

Create Ollama embeddings and vector store

embeddings = OllamaEmbeddings(model="mistral")
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)

Create the retriever

retriever = vectorstore.as_retriever()

def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)

Define the Ollama LLM function

def ollama_llm(question, context):
formatted_prompt = f"Question: {question}\n\nContext: {context}"
response = ollama.chat(model='mistral', messages=[{'role': 'user', 'content': formatted_prompt}])
return response['message']['content']

Define the RAG chain

def rag_chain(question):
retrieved_docs = retriever.invoke(question)
formatted_context = format_docs(retrieved_docs)
return ollama_llm(question, formatted_context)

Use the RAG chain

result = rag_chain("What is Task Decomposition?")
print(result)

UI

import gradio as gr
import bs4
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import OllamaEmbeddings
import ollama

Function to load, split, and retrieve documents

def load_and_retrieve_docs(url):
loader = WebBaseLoader(
web_paths=(url,),
bs_kwargs=dict()
)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
embeddings = OllamaEmbeddings(model="mistral")
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
return vectorstore.as_retriever()

Function to format documents

def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)

Function that defines the RAG chain

def rag_chain(url, question):
retriever = load_and_retrieve_docs(url)
retrieved_docs = retriever.invoke(question)
formatted_context = format_docs(retrieved_docs)
formatted_prompt = f"Question: {question}\n\nContext: {formatted_context}"
response = ollama.chat(model='mistral', messages=[{'role': 'user', 'content': formatted_prompt}])
return response['message']['content']

Gradio interface

iface = gr.Interface(
fn=rag_chain,
inputs=["text", "text"],
outputs="text",
title="RAG Chain Question Answering",
description="Enter a URL and a query to get answers from the RAG chain."
)

Launch the app

iface.launch()

. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
Terabox Video Player