diff --git a/chat_with_pdf_query.py b/chat_with_pdf_query.py index ac23ee3..e4547cd 100644 --- a/chat_with_pdf_query.py +++ b/chat_with_pdf_query.py @@ -223,7 +223,7 @@ def stream_string(s, chunk_size=10): "https://emoji.slack-edge.com/T024FJS4M/couchbase/4a361e948b15ed91.png" ) - st.title("Chat with PDF (GSI Vector Search)") + st.title("Chat with PDF (Query Vector Store)") st.markdown( "Answers with [Couchbase logo](https://emoji.slack-edge.com/T024FJS4M/couchbase/4a361e948b15ed91.png) are generated using *RAG* while 🤖 are generated by pure *LLM (ChatGPT)*" ) @@ -251,7 +251,7 @@ def stream_string(s, chunk_size=10): ) st.markdown( - "For RAG, we are using [Langchain](https://langchain.com/), [Couchbase GSI Vector Search](https://couchbase.com/) & [OpenAI](https://openai.com/). We fetch parts of the PDF relevant to the question using N1QL-based Vector search & add it as the context to the LLM. The LLM is instructed to answer based on the context from the Vector Store." + "For RAG, we are using [Langchain](https://langchain.com/), [Couchbase GSI Vector Search](https://couchbase.com/) & [OpenAI](https://openai.com/). We fetch parts of the PDF relevant to the question using Query based Vector search & add it as the context to the LLM. The LLM is instructed to answer based on the context from the Vector Store." ) # View Code