ionosphere commited on
Commit
1a555a0
·
1 Parent(s): 2b3d902
Files changed (4) hide show
  1. .gitignore +4 -0
  2. README.md +34 -6
  3. app.py +49 -58
  4. requirements.txt +12 -1
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ chroma_db/*
2
+ __pycache__/*
3
+ .venv
4
+ .env
README.md CHANGED
@@ -1,13 +1,41 @@
1
  ---
2
- title: Simple Chatbot
3
- emoji: 💬
4
- colorFrom: yellow
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.6.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
11
  ---
 
12
 
13
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: GAIA Chatbot - level 2
3
+ emoji: 👁
4
+ colorFrom: red
5
+ colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 5.13.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ short_description: Example of chatbot with RAG on one link
12
  ---
13
+ # Run on a space
14
 
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
16
+
17
+ Simply push your code on a huggingface space.
18
+
19
+ # Run locally
20
+
21
+ You must have python (3.8)[https://www.python.org/downloads/].
22
+
23
+ Check https://www.gradio.app/guides/quickstart for more details about Gradio.
24
+
25
+ ## Install dependencies
26
+
27
+ `python -m venv .venv`
28
+
29
+ `source .venv/bin/activate`
30
+
31
+ `pip install -r requirements.txt`
32
+
33
+ ## Add Claude API Key to your environement variables
34
+
35
+ ANTHROPIC_API_KEY=XXXXXXXXXXXXXXXXXX
36
+
37
+ ## Run your code
38
+
39
+ `python3 app.py`
40
+
41
+ ## Open your browser to `http://127.0.0.1:7860`
app.py CHANGED
@@ -1,64 +1,55 @@
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
  )
61
 
 
62
 
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
+ import os
2
+ import sys
3
+ from dotenv import load_dotenv
4
+ import anthropic
5
  import gradio as gr
6
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
7
+
8
+ load_dotenv()
9
+
10
+ title = "Gaia Anthropic Claude 3.5 Sonnet Chat RAG URL Demo"
11
+ description = "Example of an assistant with Gradio, RAG from url and Claude 3.5 Sonnet via its API"
12
+ placeholder = "Vous pouvez me posez une question sur ce contexte, appuyer sur Entrée pour valider"
13
+ placeholder_url = "Extract text from this url"
14
+ # llm_model = 'open-mixtral-8x22b'
15
+ llm_model = 'claude-3-5-sonnet-latest'
16
+ # choose api_key from .env or from input field
17
+ # placeholder_api_key = "API key"
18
+ # env_mistral_api_key = os.environ.get("MISTRAL_API_KEY")
19
+ env_api_key = os.environ.get("ANTHROPIC_API_KEY")
20
+
21
+ # Set-up clients
22
+ # llm = MistralAI(api_key=env_api_key,model=llm_model)
23
+ # llm = Anthropic(api_key=env_api_key, model=llm_model)
24
+ # embed_model = MistralAIEmbedding(model_name='mistral-embed', api_key=env_mistral_api_key)
25
+ # Settings.llm = llm
26
+ # Settings.embed_model = embed_model
27
+ # Settings.chunk_size = 1024
28
+
29
+ # client = anthropic.Anthropic(api_key=env_api_key)
30
+
31
+ def answer(message, history):
32
+ files = []
33
+ for msg in history:
34
+ if msg['role'] == "user" and isinstance(msg['content'], tuple):
35
+ files.append(msg['content'][0])
36
+ for file in message["files"]:
37
+ files.append(file)
38
+
39
+ documents = SimpleDirectoryReader(input_files=files).load_data()
40
+ index = VectorStoreIndex.from_documents(documents)
41
+ query_engine = index.as_query_engine()
42
+ return str(query_engine.query(message["text"]))
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  demo = gr.ChatInterface(
45
+ answer,
46
+ type="messages",
47
+ title="Llama Index RAG Chatbot",
48
+ description="Upload any text or pdf files and ask questions about them!",
49
+ textbox=gr.MultimodalTextbox(file_types=[".pdf", ".txt", ".html"]),
50
+ multimodal=True
 
 
 
 
 
 
 
51
  )
52
 
53
+ demo.title = title
54
 
55
+ demo.launch(share=True)
 
requirements.txt CHANGED
@@ -1 +1,12 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mistralai
2
+ gradio
3
+ python-dotenv
4
+ anthropic
5
+ faiss-cpu
6
+ numpy
7
+ html2text
8
+ llama-index
9
+ llama-index-readers-web
10
+ llama-index-readers-file
11
+ llama-index-llms-mistralai
12
+ llama-index-embeddings-mistralai