import requests import json import os from datetime import datetime from typing import Dict, Any, Optional # Configuration OLLAMA_URL = "http://217.182.105.173:11434" MODEL_NAME = "llama3.2-vision:90b-instruct-q8_0" def get_ollama_info() -> Optional[Dict[str, Any]]: """ Récupère toutes les informations disponibles sur le modèle depuis Ollama. Returns: Dict contenant les informations ou None si erreur """ try: response = requests.post( f"{OLLAMA_URL}/api/show", json={"name": MODEL_NAME}, headers={"Content-Type": "application/json"} ) if response.status_code == 200: return response.json() else: print(f"❌ Erreur {response.status_code}: {response.text}") return None except requests.exceptions.RequestException as e: print(f"❌ Erreur de connexion: {str(e)}") return None def save_raw_response(data: Dict[str, Any]) -> str: """ Sauvegarde la réponse brute dans un fichier JSON. Args: data: Données à sauvegarder Returns: Nom du fichier créé """ os.makedirs('settings', exist_ok=True) timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"settings/ollama_raw_{timestamp}.json" with open(filename, 'w', encoding='utf-8') as f: json.dump(data, f, indent=2, ensure_ascii=False) print(f"✅ Réponse brute sauvegardée dans: {filename}") return filename def extract_model_parameters(data: Dict[str, Any]) -> Dict[str, Any]: """ Extrait et analyse les paramètres du modèle. Args: data: Données brutes d'Ollama Returns: Dict contenant les paramètres organisés """ model_info = { "basic_info": { "name": MODEL_NAME, "family": data.get("details", {}).get("family", ""), "parameter_size": data.get("details", {}).get("parameter_size", ""), "quantization": data.get("details", {}).get("quantization_level", ""), "modified_at": data.get("modified_at", "") }, "technical_info": { "architecture": data.get("model_info", {}).get("general.architecture", ""), "context_length": data.get("model_info", {}).get("mllama.context_length", 0), "vocab_size": data.get("model_info", {}).get("mllama.vocab_size", 0), "embedding_length": data.get("model_info", {}).get("mllama.embedding_length", 0) }, "vision_info": { "image_size": data.get("projector_info", {}).get("mllama.vision.image_size", 0), "patch_size": data.get("projector_info", {}).get("mllama.vision.patch_size", 0), "max_num_tiles": data.get("projector_info", {}).get("mllama.vision.max_num_tiles", 0) }, "default_parameters": {} } # Extraire les paramètres du modelfile if "modelfile" in data: for line in data["modelfile"].split('\n'): if line.startswith('PARAMETER'): _, key, value = line.split(None, 2) try: model_info["default_parameters"][key] = float(value) except ValueError: model_info["default_parameters"][key] = value.strip() # Ajouter les paramètres de la section parameters if "parameters" in data: for line in data["parameters"].split('\n'): if line.strip(): key, value = line.split(None, 1) try: model_info["default_parameters"][key] = float(value) except ValueError: model_info["default_parameters"][key] = value.strip() return model_info def main(): # Récupérer les informations d'Ollama print(f"🔄 Récupération des informations pour le modèle {MODEL_NAME}...") ollama_data = get_ollama_info() if ollama_data: # Sauvegarder la réponse brute raw_file = save_raw_response(ollama_data) # Extraire et organiser les informations model_info = extract_model_parameters(ollama_data) # Sauvegarder les informations organisées info_file = raw_file.replace('raw', 'info') with open(info_file, 'w', encoding='utf-8') as f: json.dump(model_info, f, indent=2, ensure_ascii=False) print(f"✅ Informations organisées sauvegardées dans: {info_file}") # Afficher un résumé print("\n📊 Résumé des informations :") print(f"• Modèle: {model_info['basic_info']['name']}") print(f"• Famille: {model_info['basic_info']['family']}") print(f"• Taille: {model_info['basic_info']['parameter_size']}") print(f"• Quantization: {model_info['basic_info']['quantization']}") print("\n🔧 Paramètres par défaut:") for key, value in model_info['default_parameters'].items(): print(f"• {key}: {value}") if __name__ == "__main__": main()