Spaces:
Sleeping
Sleeping
| import os | |
| import gspread | |
| from oauth2client.service_account import ServiceAccountCredentials | |
| from pymongo import MongoClient | |
| # Read the authentication token from the environment variable | |
| hugging_face_token = os.getenv("HUGGING_FACE_TOKEN") | |
| replicate_token = os.getenv("REPLICATE_TOKEN") | |
| groq_token = os.getenv("GROQ_TOKEN") | |
| atlas_token = os.getenv("ATLAS_TOKEN") | |
| open_ruter_token = os.getenv("OPEN_RUTER_TOKEN") | |
| #atlas configuration | |
| class AtlasClient: | |
| def __init__(self, dbname): | |
| self.mongodb_client = MongoClient(atlas_token) | |
| self.database = self.mongodb_client[dbname] | |
| # A quick way to test if we can connect to Atlas instance | |
| def ping(self): | |
| self.mongodb_client.admin.command("ping") | |
| def add(self, item, collection_name): | |
| collection = self.database[collection_name] | |
| collection.insert_one(item) | |
| # Google Sheets configuration | |
| def init_google_sheets_client(): | |
| scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] | |
| creds = ServiceAccountCredentials.from_json_keyfile_name('tokyo-portal-326513-90aee094bab9.json', scope) | |
| return gspread.authorize(creds) | |
| # Google Sheets name | |
| google_sheets_name = "Chatbot Test" | |
| # Define available models | |
| huggingface_tokenizer = { | |
| "Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct", | |
| "Llama-2-7B-Chat": "meta-llama/Llama-2-7b-chat-hf", | |
| "mistralai/mistral-7b-instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2", | |
| "Meta-Llama-3-70B-Instruct":"meta-llama/Meta-Llama-3-70B-Instruct", | |
| } | |
| #Avaiable models for replicate | |
| replicate_model= { | |
| "Meta-Llama-3-8B-Instruct": "meta/meta-llama-3-8b-instruct", | |
| "Llama-2-7B-Chat": "meta/llama-2-7b-chat", | |
| "mistralai/mistral-7b-instruct-v0.2": "mistralai/mistral-7b-instruct-v0.2", | |
| "Meta-Llama-3-70B-Instruct":"meta/meta-llama-3-70b-instruct", | |
| } | |
| groq_model = { | |
| "llama3-8b-8192": "llama3-8b-8192", | |
| "llama-guard-3-8b": "llama-guard-3-8b", | |
| "gemma-7b-it": "gemma-7b-it", | |
| "llama3-70b-8192": "llama3-70b-8192", | |
| } | |
| custom_model = { | |
| "rodrisouza/Llama-3-8B-Finetuning-Stories": "rodrisouza/Llama-3-8B-Finetuning-Stories" | |
| } | |
| openai_model = { | |
| "meta-llama/llama-3.1-70b-instruct:free": "meta-llama/llama-3.1-70b-instruct:free", | |
| "meta-llama/llama-3.1-8b-instruct:free": "meta-llama/llama-3.1-8b-instruct:free", | |
| "mistralai/mistral-7b-instruct:free": "mistralai/mistral-7b-instruct:free", | |
| "google/gemma-2-9b-it:free": "google/gemma-2-9b-it:free", | |
| } | |
| # Default model (first in list) | |
| default_model_name = list(replicate_model.items())[0][0] | |
| # Define available user names | |
| user_names = ["Laura Musto", "Brian Carpenter", "Germán Capdehourat", "Isabel Amigo", "Aiala Rosá", "Luis Chiruzzo", "Ignacio Sastre", "Santiago Góngora", "Ignacio Remersaro", "Rodrigo Souza"] | |
| MAX_INTERACTIONS = 5 | |
| QUESTION_PROMPT = "Please ask a simple question about the story to encourage interaction." | |