mirror of
https://github.com/Ladebeze66/AIagent.git
synced 2025-12-16 00:36:51 +01:00
124 lines
3.9 KiB
Python
124 lines
3.9 KiB
Python
import json
|
|
from typing import Dict, List, Any, Optional
|
|
from .llm import LLM
|
|
|
|
class Mistral(LLM):
|
|
"""
|
|
Classe pour l'intégration avec l'API Mistral AI
|
|
Cette classe hérite de la classe de base LLM
|
|
"""
|
|
|
|
def __init__(self):
|
|
"""
|
|
Initialisation des attributs spécifiques à Mistral
|
|
"""
|
|
super().__init__()
|
|
|
|
# Attributs spécifiques à Mistral
|
|
self.maxToken: int = 1000
|
|
self.seed: int = 0
|
|
self.presence_penalty: float = 0
|
|
self.frequency_penalty: float = 0
|
|
self.n: int = 1
|
|
self.prediction: Dict[str, str] = {"type": "content", "content": ""}
|
|
self.safe_prompt: bool = False
|
|
self.o_stop: str = "string"
|
|
self._m_tabModels: List[str] = []
|
|
|
|
# Initialisation par défaut
|
|
self.Modele = "mistral-large-latest"
|
|
self.o_temperature = 0.2
|
|
self.o_top_p = 1
|
|
self.maxToken = 1000
|
|
self.presence_penalty = 0
|
|
self.frequency_penalty = 0
|
|
self.n = 1
|
|
self.prediction["type"] = "content"
|
|
self.prediction["content"] = ""
|
|
self.safe_prompt = False
|
|
self.o_stop = "string"
|
|
|
|
def urlBase(self) -> str:
|
|
"""
|
|
Retourne l'URL de base de l'API Mistral
|
|
"""
|
|
return "https://api.mistral.ai/v1/"
|
|
|
|
def cleAPI(self) -> str:
|
|
"""
|
|
Retourne la clé API Mistral
|
|
"""
|
|
return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh"
|
|
|
|
def urlFonction(self) -> str:
|
|
"""
|
|
Retourne l'URL de la fonction par défaut
|
|
"""
|
|
return "chat/completions"
|
|
|
|
def model_list(self) -> List[str]:
|
|
"""
|
|
Récupère la liste des modèles disponibles sur Mistral AI
|
|
Si la liste est déjà récupérée, retourne la liste en cache
|
|
"""
|
|
import requests
|
|
|
|
if len(self._m_tabModels) > 1:
|
|
return self._m_tabModels
|
|
|
|
url = self.urlBase() + "models"
|
|
headers = {
|
|
"Content-Type": "application/json"
|
|
}
|
|
|
|
if self.cleAPI() != "":
|
|
headers["Authorization"] = "Bearer " + self.cleAPI()
|
|
|
|
try:
|
|
response = requests.get(url=url, headers=headers)
|
|
|
|
if response.status_code == 200:
|
|
data = json.loads(response.text)
|
|
for item in data["data"]:
|
|
self._m_tabModels.append(item["id"])
|
|
else:
|
|
self._m_tabModels.append(response.text)
|
|
except Exception as e:
|
|
self._m_tabModels.append(str(e))
|
|
|
|
return self._m_tabModels
|
|
|
|
def _even_LLM_POST(self, question: str) -> None:
|
|
"""
|
|
Préparation du contenu de la requête pour Mistral
|
|
"""
|
|
self._Contenu["messages"] = [
|
|
{"role": "system", "content": self.prompt_system},
|
|
{"role": "user", "content": question}
|
|
]
|
|
|
|
self._Contenu["temperature"] = self.o_temperature
|
|
self._Contenu["top_p"] = self.o_top_p
|
|
|
|
if self.maxToken != 0:
|
|
self._Contenu["max_tokens"] = self.maxToken
|
|
|
|
if self.seed != 0:
|
|
self._Contenu["random_seed"] = self.seed
|
|
|
|
if self.format != "":
|
|
self._Contenu["response_format"] = self.format
|
|
|
|
# Paramètres supplémentaires
|
|
self._Contenu["presence_penalty"] = self.presence_penalty
|
|
self._Contenu["frequency_penalty"] = self.frequency_penalty
|
|
self._Contenu["n"] = 1
|
|
self._Contenu["prediction"] = self.prediction
|
|
self._Contenu["stop"] = self.o_stop
|
|
|
|
def _interrogerRetourneReponse(self, reponse: str) -> str:
|
|
"""
|
|
Extraction de la réponse à partir du JSON retourné par Mistral
|
|
"""
|
|
data = json.loads(reponse)
|
|
return data["choices"][0]["message"]["content"] |