mirror of
https://github.com/Ladebeze66/projetcbaollm.git
synced 2025-12-13 09:06:53 +01:00
105 lines
3.8 KiB
Python
105 lines
3.8 KiB
Python
import gradio as gr
|
|
import requests
|
|
import json
|
|
import os
|
|
|
|
# Configuration serveur
|
|
OLLAMA_SERVER = "http://217.182.105.173:11434/api/generate"
|
|
MODEL_NAME = "mistral"
|
|
|
|
# Dossier où seront stockées les conversations des utilisateurs
|
|
HISTORY_DIR = "conversations"
|
|
os.makedirs(HISTORY_DIR, exist_ok=True)
|
|
|
|
def get_history_file(user):
|
|
"""Renvoie le chemin du fichier de conversation pour un utilisateur."""
|
|
return os.path.join(HISTORY_DIR, f"{user}.json")
|
|
|
|
def load_conversation(user):
|
|
"""Charge l'historique des conversations d'un utilisateur."""
|
|
history_file = get_history_file(user)
|
|
if os.path.exists(history_file):
|
|
with open(history_file, "r", encoding="utf-8") as f:
|
|
try:
|
|
return json.load(f)
|
|
except json.JSONDecodeError:
|
|
return []
|
|
return []
|
|
|
|
def save_conversation(user, user_prompt, bot_response):
|
|
"""Sauvegarde une conversation utilisateur dans son fichier dédié."""
|
|
history_file = get_history_file(user)
|
|
history = load_conversation(user)
|
|
history.append({"user": user_prompt, "bot": bot_response})
|
|
|
|
with open(history_file, "w", encoding="utf-8") as f:
|
|
json.dump(history, f, indent=4, ensure_ascii=False)
|
|
|
|
def chat_with_ollama(user, prompt):
|
|
"""Envoie le prompt à Ollama et récupère la réponse, puis l'enregistre."""
|
|
data = {"model": MODEL_NAME, "prompt": prompt}
|
|
headers = {"Content-Type": "application/json"}
|
|
|
|
response = requests.post(OLLAMA_SERVER, json=data, headers=headers, stream=True)
|
|
|
|
if response.status_code != 200:
|
|
return f"Erreur HTTP {response.status_code}: {response.text}"
|
|
|
|
full_response = ""
|
|
for line in response.iter_lines():
|
|
if line:
|
|
try:
|
|
json_line = line.decode('utf-8')
|
|
parsed_json = json.loads(json_line)
|
|
full_response += parsed_json.get("response", "")
|
|
except json.JSONDecodeError as e:
|
|
print(f"Erreur de parsing JSON : {e}")
|
|
|
|
if full_response:
|
|
save_conversation(user, prompt, full_response) # Sauvegarde avec user
|
|
|
|
return full_response if full_response else "Erreur : réponse vide"
|
|
|
|
def chat_interface(user, user_input):
|
|
"""Gestion de l'interface avec utilisateur et historique spécifique."""
|
|
if not user:
|
|
return "⚠️ Veuillez entrer un identifiant utilisateur avant de chatter."
|
|
|
|
response = chat_with_ollama(user, user_input)
|
|
user_history = load_conversation(user)
|
|
|
|
# Affichage des derniers messages sous forme de bulle de chat
|
|
history_display = ""
|
|
for c in user_history[-5:]:
|
|
history_display += f"👤 **{user}** : {c['user']}\n"
|
|
history_display += f"🤖 **Bot** : {c['bot']}\n\n"
|
|
|
|
return f"📌 **Utilisateur :** {user}\n\n{history_display}👤 **{user}** : {user_input}\n🤖 **Bot** : {response}"
|
|
|
|
# Interface Gradio stylisée
|
|
with gr.Blocks(css="""
|
|
body {background-color: #f8f9fa; font-family: Arial, sans-serif;}
|
|
.gradio-container {max-width: 700px; margin: auto;}
|
|
.chatbox {background: white; padding: 15px; border-radius: 10px; box-shadow: 2px 2px 15px rgba(0,0,0,0.1);}
|
|
.gr-textbox {margin-bottom: 10px;}
|
|
.gr-button {background-color: #007bff; color: white; border-radius: 5px;}
|
|
""") as iface:
|
|
with gr.Column():
|
|
gr.Markdown("<h2 style='text-align: center;'>💬 Chat avec Ollama OVH</h2>")
|
|
|
|
with gr.Row():
|
|
user_input = gr.Textbox(label="Nom d'utilisateur", placeholder="Entrez votre nom...", interactive=True)
|
|
|
|
with gr.Row():
|
|
chat_input = gr.Textbox(label="Votre message", placeholder="Tapez votre question ici...")
|
|
|
|
with gr.Row():
|
|
chat_button = gr.Button("Envoyer")
|
|
|
|
with gr.Row():
|
|
chat_output = gr.Textbox(label="Réponse", interactive=False, elem_id="chatbox")
|
|
|
|
chat_button.click(chat_interface, inputs=[user_input, chat_input], outputs=chat_output)
|
|
|
|
iface.launch()
|