Ajout des classes d'agents et LLM

This commit is contained in:
Ladebeze66 2025-04-06 14:41:45 +02:00
parent 3c4aefd508
commit af5847eb0d
14 changed files with 467 additions and 0 deletions

View File

@ -0,0 +1,14 @@
from .base_agent import BaseAgent
class AgentImageAnalyser(BaseAgent):
"""
Agent pour analyser les images et extraire les informations pertinentes.
"""
def __init__(self, llm):
super().__init__("AgentImageAnalyser", llm)
def executer(self, image_description: str, contexte: str) -> str:
prompt = f"Analyse cette image en tenant compte du contexte suivant : {contexte}. Description de l'image : {image_description}"
response = self.llm.interroger(prompt)
self.ajouter_historique("analyse_image", {"image": image_description, "contexte": contexte}, response)
return response

View File

@ -0,0 +1,15 @@
from .base_agent import BaseAgent
class AgentImageSorter(BaseAgent):
"""
Agent pour trier les images en fonction de leur contenu.
"""
def __init__(self, llm):
super().__init__("AgentImageSorter", llm)
def executer(self, image_description: str) -> bool:
prompt = f"L'image suivante est-elle pertinente pour BRG_Lab ? Description : {image_description}"
response = self.llm.interroger(prompt)
result = "oui" in response.lower()
self.ajouter_historique("tri_image", image_description, result)
return result

View File

@ -0,0 +1,15 @@
from .base_agent import BaseAgent
from typing import Dict
class AgentJsonAnalyser(BaseAgent):
"""
Agent pour analyser les fichiers JSON et extraire les informations pertinentes.
"""
def __init__(self, llm):
super().__init__("AgentJsonAnalyser", llm)
def executer(self, ticket_json: Dict) -> str:
prompt = f"Analyse ce ticket JSON et identifie les éléments importants : {ticket_json}"
response = self.llm.interroger(prompt)
self.ajouter_historique("analyse_json", ticket_json, response)
return response

View File

@ -0,0 +1,28 @@
import json
from .base_agent import BaseAgent
from datetime import datetime
from typing import Dict
class AgentReportGenerator(BaseAgent):
"""
Agent pour générer un rapport à partir des informations collectées.
"""
def __init__(self, llm):
super().__init__("AgentReportGenerator", llm)
def executer(self, rapport_data: Dict, filename: str):
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
#Sauvegarde json
json_path = f"../reports/json_reports/{filename}_{timestamp}.json"
with open(json_path, "w", encoding="utf-8") as f_json:
json.dump(rapport_data, f_json, ensure_ascii=False, indent=4)
#Sauvegarde Markdown
md_path =f"../repports/markdown_reports/{filename}_{timestamp}.md"
with open(md_path, "w", encoding="utf-8") as f_md:
f_md.write(f"# Rapport {filename}\n\n")
for key, value in rapport_data.items():
f_md.write(f"## {key.capitalize()}\n{value}\n\n")
self.ajouter_historique("generation_rapport", filename, "Rapport généré")

22
agents/base_agent.py Normal file
View File

@ -0,0 +1,22 @@
from abc import ABC, abstractmethod
from typing import List, Dict, Any
class BaseAgent(ABC):
"""
Classe de base pour les agents.
"""
def __init__(self, nom: str, llm: Any):
self.nom = nom
self.llm = llm
self.historique: List[Dict[str, Any]] = []
def ajouter_historique(self, action: str, input_data: Any, output_data: Any):
self.historique.append({
"action": action,
"input": input_data,
"output": output_data
})
@abstractmethod
def _executer(self, *args, **kwargs) -> Any:
pass

81
llm_classes/base_llm.py Normal file
View File

@ -0,0 +1,81 @@
import abc
import json
import requests
from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional
class BaseLLM(abc.ABC):
""" Classe abstraite pour les LLM """
def __init__(self, modele: str):
self.modele: str = modele
self.prompt_system:str = ""
self.params: Dict[str, Any] = {
"temperature": 0.8,
"top_p": 0.9,
"top_k": 40,
"max_tokens": 1000,
"presence_penalty": 0,
"frequency_penalty": 0,
"stop": None
}
self.dureeTraitement: timedelta = timedelta()
self.reponseErreur: bool = False
self.heureDepart: Optional[datetime] = None
self.heureFin: Optional[datetime] = None
@abc.abstractmethod
def urlBase(self) -> str:
pass
@abc.abstractmethod
def cleAPI(self) -> str:
pass
@abc.abstractmethod
def urlFonction(self) -> str:
pass
@abc.abstractmethod
def _preparer_contenu(self, question: str) -> Dict[str, Any]:
pass
@abc.abstractmethod
def _traiter_reponse(self, reponse: requests.Response) -> str:
pass
def interroger(self, question: str) -> str:
url = self.urlBase() + self.urlFonction()
headers = {"Content-Type": "application/json"}
if self.cleAPI():
headers["Authorization"] = f"Bearer {self.cleAPI()}"
contenu = self._preparer_contenu(question)
self.heureDepart = datetime.now()
try:
response = requests.post(url=url, headers=headers, json=contenu, timeout=120)
self.heureFin = datetime.now()
self.dureeTraitement = self.heureFin - self.heureDepart
if response.status_code in [200, 201]:
self.reponseErreur = False
reponse = self._traiter_reponse(response)
else:
self.reponseErreur = True
return response.text
except Exception as e:
self.heureFin = datetime.now()
self.dureeTraitement = self.heureFin - self.heureDepart
self.reponseErreur = True
return str(e)
def configurer(self, **kwargs):# méthode pour configurer les paramètres
self.params.update(kwargs)

View File

@ -0,0 +1,31 @@
from .base_llm import BaseLLM
import requests
class MistralLarge(BaseLLM):
def __init__(self):
super().__init__("mistral-large-latest")
self.configurer(temperature=0.2, top_p=1)
def urlBase(self) -> str:
return "https://api.mistral.ai/v1/"
def cleAPI(self) -> str:
return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh"
def urlFonction(self) -> str:
return "chat/completions"
def _preparer_contenu(self, question: str) -> dict:
return {
"model": self.modele,
"messages": [
{"role": "system", "content": self.prompt_system},
{"role": "user", "content": question}
],
**self.params
}
def _traiter_reponse(self, reponse: requests.Response) -> str:
data = reponse.json()
return data["choices"][0]["message"]["content"]

View File

@ -0,0 +1,31 @@
from .base_llm import BaseLLM
import requests
class MistralLargePixtral(BaseLLM):
def __init__(self):
super().__init__("mistral-large_pixtral-2411")
self.configurer(temperature=0.2, top_p=1)
def urlBase(self) -> str:
return "https://api.mistral.ai/v1/"
def cleAPI(self) -> str:
return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh"
def urlFonction(self) -> str:
return "chat/completions"
def _preparer_contenu(self, question: str) -> dict:
return {
"model": self.modele,
"messages": [
{"role": "system", "content": self.prompt_system},
{"role": "user", "content": question}
],
**self.params
}
def _traiter_reponse(self, reponse: requests.Response) -> str:
data = reponse.json()
return data["choices"][0]["message"]["content"]

View File

@ -0,0 +1,31 @@
from .base_llm import BaseLLM
import requests
class MistralMedium(BaseLLM):
def __init__(self):
super().__init__("mistral-medium-latest")
self.configurer(temperature=0.2, top_p=1)
def urlBase(self) -> str:
return "https://api.mistral.ai/v1/"
def cleAPI(self) -> str:
return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh"
def urlFonction(self) -> str:
return "chat/completions"
def _preparer_contenu(self, question: str) -> dict:
return {
"model": self.modele,
"messages": [
{"role": "system", "content": self.prompt_system},
{"role": "user", "content": question}
],
**self.params
}
def _traiter_reponse(self, reponse: requests.Response) -> str:
data = reponse.json()
return data["choices"][0]["message"]["content"]

92
llm_classes/ollama.py Normal file
View File

@ -0,0 +1,92 @@
from .base_llm import BaseLLM
import requests
from datetime import timedelta
from typing import Dict, Any
class Ollama(BaseLLM):
"""
Classe optimisée pour interagir avec l'API Ollama.
"""
def __init__(self, modele: str = ""):
super().__init__(modele)
self.api_url = "http://217.182.105.173:11434/api/generate"
self.params: Dict[str, Any] = {
"temperature": 0.8,
"top_p": 0.9,
"top_k": 40,
"num_ctx": 2048,
"repeat_penalty": 1.1,
"repeat_last_n": 64,
"mirostat": 0,
"mirostat_eta": 0.1,
"mirostat_tau": 5,
"keep_alive": int(timedelta(minutes=5).total_seconds()),
"num_predict": -1,
"min_p": 0,
"seed": 0,
"stop": [],
"stream": False
}
def urlBase(self) -> str:
"""
Retourne l'URL de base de l'API Ollama.
"""
return "http://217.182.105.173:11434/"
def cleAPI(self) -> str:
"""
Ollama ne nécessite pas de clé API par défaut.
"""
return ""
def urlFonction(self) -> str:
"""
Retourne l'URL spécifique à Ollama pour générer une réponse.
"""
return "api/generate"
def _preparer_contenu(self, question: str) -> Dict[str, Any]:
"""
Prépare le contenu de la requête spécifique pour Ollama.
"""
contenu = {
"model": self.modele,
"prompt": question,
"options": {
"temperature": self.params["temperature"],
"top_p": self.params["top_p"],
"top_k": self.params["top_k"],
"num_ctx": self.params["num_ctx"],
"repeat_penalty": self.params["repeat_penalty"],
"repeat_last_n": self.params["repeat_last_n"],
"mirostat": self.params["mirostat"],
"mirostat_eta": self.params["mirostat_eta"],
"mirostat_tau": self.params["mirostat_tau"],
"keep_alive": self.params["keep_alive"],
"num_predict": self.params["num_predict"],
"min_p": self.params["min_p"],
"seed": self.params["seed"],
"stop": self.params["stop"],
},
"stream": self.params["stream"]
}
return contenu
def _traiter_reponse(self, reponse: requests.Response) -> str:
"""
Traite et retourne la réponse fournie par Ollama.
"""
data = reponse.json()
return data.get("response", "")
def configurer(self, **kwargs):
"""
Mise à jour facile des paramètres spécifiques à Ollama.
"""
for key, value in kwargs.items():
if key in self.params:
self.params[key] = value

View File

@ -0,0 +1,31 @@
from .base_llm import BaseLLM
import requests
class Pixtral12b(BaseLLM):
def __init__(self):
super().__init__("pixtral-12b-latest")
self.configurer(temperature=0.2, top_p=1)
def urlBase(self) -> str:
return "https://api.mistral.ai/v1/"
def cleAPI(self) -> str:
return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh"
def urlFonction(self) -> str:
return "chat/completions"
def _preparer_contenu(self, question: str) -> dict:
return {
"model": self.modele,
"messages": [
{"role": "system", "content": self.prompt_system},
{"role": "user", "content": question}
],
**self.params
}
def _traiter_reponse(self, reponse: requests.Response) -> str:
data = reponse.json()
return data["choices"][0]["message"]["content"]

View File

@ -0,0 +1,31 @@
from .base_llm import BaseLLM
import requests
class PixtralLarge(BaseLLM):
def __init__(self):
super().__init__("pixtral-large-latest")
self.configurer(temperature=0.2, top_p=1)
def urlBase(self) -> str:
return "https://api.mistral.ai/v1/"
def cleAPI(self) -> str:
return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh"
def urlFonction(self) -> str:
return "chat/completions"
def _preparer_contenu(self, question: str) -> dict:
return {
"model": self.modele,
"messages": [
{"role": "system", "content": self.prompt_system},
{"role": "user", "content": question}
],
**self.params
}
def _traiter_reponse(self, reponse: requests.Response) -> str:
data = reponse.json()
return data["choices"][0]["message"]["content"]

View File

@ -0,0 +1,31 @@
from .base_llm import BaseLLM
import requests
class PixtralMedium(BaseLLM):
def __init__(self):
super().__init__("pixtral-medium-latest")
self.configurer(temperature=0.2, top_p=1)
def urlBase(self) -> str:
return "https://api.mistral.ai/v1/"
def cleAPI(self) -> str:
return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh"
def urlFonction(self) -> str:
return "chat/completions"
def _preparer_contenu(self, question: str) -> dict:
return {
"model": self.modele,
"messages": [
{"role": "system", "content": self.prompt_system},
{"role": "user", "content": question}
],
**self.params
}
def _traiter_reponse(self, reponse: requests.Response) -> str:
data = reponse.json()
return data["choices"][0]["message"]["content"]

14
orchestrator.py Normal file
View File

@ -0,0 +1,14 @@
import os
import json
from typing import List, Dict
from agents.agent_json_analyser import AgentJsonAnalyser
from agents.agent_image_sorter import AgentImageSorter
from agents.agent_image_analyser import AgentImageAnalyser
from agents.agent_report_generator import AgentReportGenerator
from llm_classes.MistralMesh import MistralMesh
class Orchestrator:
"""
"""