@@ -42,15 +42,15 @@ Indices can be defined through yaml specification that corresponds directly to t
4242
4343``` yaml
4444index :
45- name : users
45+ name : user_index
4646 storage_type : hash
47- prefix : " user: "
48- key_field : " id "
47+ prefix : users
48+ key_field : user
4949
5050fields :
5151 # define tag fields
5252 tag :
53- - name : users
53+ - name : user
5454 - name : job
5555 - name : credit_store
5656 # define numeric fields
@@ -65,7 +65,7 @@ fields:
6565
6666This would correspond to a dataset that looked something like
6767
68- | users | age | job | credit_score | user_embedding |
68+ | user | age | job | credit_score | user_embedding |
6969|-------|-----|------------|--------------|-----------------------------------|
7070| john | 1 | engineer | high | \x3f\x8c\xcc\x3f\x8c\xcc ?@ |
7171| mary | 2 | doctor | low | \x3f\x8c\xcc\x3f\x8c\xcc ?@ |
@@ -74,6 +74,8 @@ This would correspond to a dataset that looked something like
7474
7575With the schema, the RedisVL library can be used to create, load vectors and perform vector searches
7676` ` ` python
77+ import pandas as pd
78+
7779from redisvl.index import SearchIndex
7880from redisvl.query import create_vector_query
7981
@@ -82,47 +84,45 @@ index = SearchIndex.from_yaml("./users_schema.yml"))
8284index.connect("redis://localhost:6379")
8385index.create()
8486
85- index.load(pd.read_csv("./users.csv").to_records( ))
87+ index.load(pd.read_csv("./users.csv").to_dict("records" ))
8688
8789query = create_vector_query(
88- ["users ", "age", "job", "credit_score"],
90+ ["user ", "age", "job", "credit_score"],
8991 number_of_results=2,
9092 vector_field_name="user_embedding",
9193)
9294
9395query_vector = np.array([0.1, 0.1, 0.5]).tobytes()
9496results = index.search(query, query_params={"vector" : query_vector})
9597
98+
9699` ` `
97100
98101### Semantic cache
99102
100103The ` ` LLMCache` ` Interface in RedisVL can be used as follows.
101104
102105` ` ` python
103- # init open ai client
104- import openai
105- openai.api_key = "sk-xxx"
106-
107106from redisvl.llmcache.semantic import SemanticCache
108- cache = SemanticCache(redis_host="localhost", redis_port=6379, redis_password=None)
109-
110- def ask_gpt3(question) :
111- response = openai.Completion.create(
112- engine="text-davinci-003",
113- prompt=question,
114- max_tokens=100
115- )
116- return response.choices[0].text.strip()
117-
118- def answer_question(question : str):
119- results = cache.check(question)
120- if results :
121- return results[0]
122- else :
123- answer = ask_gpt3(question)
124- cache.store(question, answer)
125- return answer
107+ cache = SemanticCache(
108+ redis_url="redis://localhost:6379",
109+ threshold=0.9, # semantic similarity threshold
110+ )
111+
112+ # check if the cache has a result for a given query
113+ cache.check("What is the capital of France?")
114+ [ ]
115+
116+ # store a result for a given query
117+ cache.store("What is the capital of France?", "Paris")
118+
119+ # Cache will now have the query
120+ cache.check("What is the capital of France?")
121+ ["Paris"]
122+
123+ # Cache will return the result if the query is similar enough
124+ cache.get("What really is the capital of France?")
125+ ["Paris"]
126126```
127127
128128
0 commit comments