mirror of
https://github.com/Ladebeze66/llm_lab.git
synced 2025-12-13 09:06:49 +01:00
firstcommit
This commit is contained in:
commit
903bdf57ec
BIN
agents/__pycache__/roles.cpython-312.pyc
Normal file
BIN
agents/__pycache__/roles.cpython-312.pyc
Normal file
Binary file not shown.
44
agents/roles.py
Normal file
44
agents/roles.py
Normal file
@ -0,0 +1,44 @@
|
||||
# agents/roles.py
|
||||
|
||||
AGENTS = {
|
||||
"assistant_technique": {
|
||||
"system_prompt": (
|
||||
"Tu es un assistant technique spécialisé en informatique et en support aux utilisateurs. "
|
||||
"Tu réponds de manière concise, claire et structurée aux questions techniques."
|
||||
),
|
||||
"params": {
|
||||
"temperature": 0.5,
|
||||
"top_p": 0.9
|
||||
}
|
||||
},
|
||||
"juriste": {
|
||||
"system_prompt": (
|
||||
"Tu es un juriste expert en droit du travail français. "
|
||||
"Tes réponses sont précises, sourcées et adaptées au contexte légal."
|
||||
),
|
||||
"params": {
|
||||
"temperature": 0.3,
|
||||
"top_p": 0.8
|
||||
}
|
||||
},
|
||||
"chercheur": {
|
||||
"system_prompt": (
|
||||
"Tu es un chercheur scientifique qui explore des solutions innovantes. "
|
||||
"Tu analyses les problèmes en profondeur et proposes des pistes pertinentes."
|
||||
),
|
||||
"params": {
|
||||
"temperature": 0.9,
|
||||
"top_p": 1.0
|
||||
}
|
||||
},
|
||||
"formateur": {
|
||||
"system_prompt": (
|
||||
"Tu es un formateur pédagogique et bienveillant. "
|
||||
"Tu expliques des concepts de façon claire et progressive, adaptés à des débutants."
|
||||
),
|
||||
"params": {
|
||||
"temperature": 0.6,
|
||||
"top_p": 0.95
|
||||
}
|
||||
}
|
||||
}
|
||||
0
config/mistral7b_config.yaml
Normal file
0
config/mistral7b_config.yaml
Normal file
BIN
core/__pycache__/base_llm.cpython-312.pyc
Normal file
BIN
core/__pycache__/base_llm.cpython-312.pyc
Normal file
Binary file not shown.
BIN
core/__pycache__/factory.cpython-312.pyc
Normal file
BIN
core/__pycache__/factory.cpython-312.pyc
Normal file
Binary file not shown.
BIN
core/__pycache__/mistral7b.cpython-312.pyc
Normal file
BIN
core/__pycache__/mistral7b.cpython-312.pyc
Normal file
Binary file not shown.
68
core/base_llm.py
Normal file
68
core/base_llm.py
Normal file
@ -0,0 +1,68 @@
|
||||
import requests
|
||||
import os
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
|
||||
class BaseLLM:
|
||||
def __init__(self, model_name, engine="Ollama", base_params=None, stream=False):
|
||||
"""
|
||||
Classe de base pour tous les LLM. Ne contient aucune valeur fixe propre à un modèle.
|
||||
Les paramètres sont injectés depuis les classes spécifiques
|
||||
"""
|
||||
self.model = model_name
|
||||
self.engine = engine
|
||||
self.agent = None
|
||||
self.system_prompt = ""
|
||||
|
||||
self.params = base_params or {}
|
||||
self.params["model"] = self.model
|
||||
self.params["stream"] = stream
|
||||
|
||||
self.logs_dir = "logs"
|
||||
os.makedirs(self.logs_dir, exist_ok=True)
|
||||
|
||||
def set_role(self, role_name, role_config):
|
||||
"""Assigne un rôle d'agent avec prompt système et paramètres personnalisés."""
|
||||
self.agent = role_name
|
||||
self.system_prompt = role_config.get("system_prompt", "")
|
||||
self.params.update(role_config.get("params", {}))
|
||||
|
||||
def generate(self, user_prompt):
|
||||
"""Méthode à surcharger dans les lcasses enfants pour appeler le modèle."""
|
||||
raise NotImplementedError("La méthode generate() doit être implémentée dans la classe enfant.")
|
||||
|
||||
def _format_prompt(self, user_prompt):
|
||||
if self.system_prompt:
|
||||
return f"{self.system_prompt}\n\n{user_prompt}"
|
||||
return user_prompt
|
||||
|
||||
def _log_result(self, prompt, response):
|
||||
now = datetime.now()
|
||||
now_str = now.strftime("%Y-%m-%d_%H-%M-%S")
|
||||
uid = str(uuid.uuid4())[:8] # ID unique pour le log
|
||||
filename = f"{self.logs_dir}/{self.model}_{now_str}_{uid}.md"
|
||||
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(f"# Résultat génération {self.model}\n\n")
|
||||
f.write(f"** Test ID:** {uid}\n")
|
||||
f.write(f"** Date :** {now.strftime('%Y-%m-%d %H:%M:%S')}\n")
|
||||
f.write(f"** Modèle :** {self.model}\n")
|
||||
f.write(f"** Moteur :** {self.engine}\n")
|
||||
f.write(f"** Rôle :** {self.agent or 'Aucun'}\n")
|
||||
f.write(f"** Prompt :** {prompt}\n\n---\n\n")
|
||||
f.write(f"** Paramètres utilisés :**\n\n")
|
||||
for k, v in self.params.items():
|
||||
if k not in ["model", "prompt"]:
|
||||
value = " / ".join(v) if isinstance(v, list) else v or "*Aucun*"
|
||||
f.write(f"- {k.replace('_', '').title()} : {value}\n")
|
||||
f.write(f"\n---\n\n** Réponse du modèle\n\n{response.strip()}\n")
|
||||
|
||||
self._update_index(filename)
|
||||
return filename
|
||||
|
||||
def _update_index(self, filename):
|
||||
with open("log_index.md", "a", encoding="utf-8") as idx:
|
||||
idx.write(f"- **{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}** | ")
|
||||
idx.write(f"🧠 `{self.model}` ({self.engine}) | 🎭 `{self.agent or 'Aucun'}` | ")
|
||||
idx.write(f"[Voir le log]({filename})\n")
|
||||
|
||||
21
core/factory.py
Normal file
21
core/factory.py
Normal file
@ -0,0 +1,21 @@
|
||||
from core.mistral7b import Mistral7B
|
||||
|
||||
class LLMFactory:
|
||||
"""
|
||||
Factory pour créer des instances de modèles LLM dynamiquement en fonction d'un identifiant text
|
||||
"""
|
||||
_registry = {
|
||||
"mistral7b": Mistral7B
|
||||
# Ajouter d'autres modèles LLM ici
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def create(model_name: str):
|
||||
"""
|
||||
Crée une instance d'un modèle LLM en fonction de l'identifiant textuel
|
||||
"""
|
||||
model_name = model_name.lower()
|
||||
if model_name not in LLMFactory._registry:
|
||||
raise ValueError(f"Modèle LLM non supporté: {model_name}")
|
||||
return LLMFactory._registry[model_name]()
|
||||
|
||||
37
core/mistral7b.py
Normal file
37
core/mistral7b.py
Normal file
@ -0,0 +1,37 @@
|
||||
from core.base_llm import BaseLLM
|
||||
import requests
|
||||
|
||||
class Mistral7B(BaseLLM):
|
||||
def __init__(self):
|
||||
# Nom du modèle spécifique
|
||||
model_name = "mistral:latest"
|
||||
# Moteur utilisé pour l'inférence
|
||||
engine = "Ollama"
|
||||
|
||||
# Paramètres par défaut spécifiques à Mistral7B
|
||||
default_params = {
|
||||
"temperature": 0.7, # Contrôle la créativité : 0 = déterministe, 1 = plus créatif
|
||||
"top_p": 0.9, # Nucleus sampling : sélectionne les tokens jusqu'à une probabilité cumulative de top_p
|
||||
"top_k": 50, # Considère les top_k tokens les plus probables pour chaque étape de génération
|
||||
"repeat_penalty": 1.1, # Pénalise les répétitions : >1 pour réduire les répétitions, 1 pour aucune pénalité
|
||||
"num_predict": 512, # Nombre maximum de tokens à générer dans la réponse
|
||||
"stop": [], # Liste de séquences qui arrêteront la génération si rencontrées
|
||||
"seed": None, # Graine pour la reproductibilité : fixe la graine pour obtenir les mêmes résultats
|
||||
"stream": False, # Si True, la réponse est envoyée en flux (streaming)
|
||||
"raw": False # Si True, désactive le prompt système automatique
|
||||
}
|
||||
|
||||
super().__init__(model_name=model_name, engine=engine, base_params=default_params)
|
||||
|
||||
def generate(self, user_prompt):
|
||||
prompt = self._format_prompt(user_prompt)
|
||||
payload = self.params.copy()
|
||||
payload["prompt"] = prompt
|
||||
|
||||
response = requests.post("http://217.182.105.173:11434/api/generate", json=payload)
|
||||
if not response.ok:
|
||||
raise Exception(f"Erreur API Ollama : {response.status_code} - {response.text}")
|
||||
|
||||
result = response.json().get("response", "")
|
||||
self._log_result(user_prompt, result)
|
||||
return result
|
||||
247
llmlab/bin/Activate.ps1
Normal file
247
llmlab/bin/Activate.ps1
Normal file
@ -0,0 +1,247 @@
|
||||
<#
|
||||
.Synopsis
|
||||
Activate a Python virtual environment for the current PowerShell session.
|
||||
|
||||
.Description
|
||||
Pushes the python executable for a virtual environment to the front of the
|
||||
$Env:PATH environment variable and sets the prompt to signify that you are
|
||||
in a Python virtual environment. Makes use of the command line switches as
|
||||
well as the `pyvenv.cfg` file values present in the virtual environment.
|
||||
|
||||
.Parameter VenvDir
|
||||
Path to the directory that contains the virtual environment to activate. The
|
||||
default value for this is the parent of the directory that the Activate.ps1
|
||||
script is located within.
|
||||
|
||||
.Parameter Prompt
|
||||
The prompt prefix to display when this virtual environment is activated. By
|
||||
default, this prompt is the name of the virtual environment folder (VenvDir)
|
||||
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
|
||||
|
||||
.Example
|
||||
Activate.ps1
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Verbose
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and shows extra information about the activation as it executes.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
|
||||
Activates the Python virtual environment located in the specified location.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Prompt "MyPython"
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and prefixes the current prompt with the specified string (surrounded in
|
||||
parentheses) while the virtual environment is active.
|
||||
|
||||
.Notes
|
||||
On Windows, it may be required to enable this Activate.ps1 script by setting the
|
||||
execution policy for the user. You can do this by issuing the following PowerShell
|
||||
command:
|
||||
|
||||
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||
|
||||
For more information on Execution Policies:
|
||||
https://go.microsoft.com/fwlink/?LinkID=135170
|
||||
|
||||
#>
|
||||
Param(
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$VenvDir,
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$Prompt
|
||||
)
|
||||
|
||||
<# Function declarations --------------------------------------------------- #>
|
||||
|
||||
<#
|
||||
.Synopsis
|
||||
Remove all shell session elements added by the Activate script, including the
|
||||
addition of the virtual environment's Python executable from the beginning of
|
||||
the PATH variable.
|
||||
|
||||
.Parameter NonDestructive
|
||||
If present, do not remove this function from the global namespace for the
|
||||
session.
|
||||
|
||||
#>
|
||||
function global:deactivate ([switch]$NonDestructive) {
|
||||
# Revert to original values
|
||||
|
||||
# The prior prompt:
|
||||
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
|
||||
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
|
||||
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
|
||||
# The prior PYTHONHOME:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
}
|
||||
|
||||
# The prior PATH:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
|
||||
}
|
||||
|
||||
# Just remove the VIRTUAL_ENV altogether:
|
||||
if (Test-Path -Path Env:VIRTUAL_ENV) {
|
||||
Remove-Item -Path env:VIRTUAL_ENV
|
||||
}
|
||||
|
||||
# Just remove VIRTUAL_ENV_PROMPT altogether.
|
||||
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
|
||||
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
|
||||
}
|
||||
|
||||
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
|
||||
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
|
||||
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
|
||||
}
|
||||
|
||||
# Leave deactivate function in the global namespace if requested:
|
||||
if (-not $NonDestructive) {
|
||||
Remove-Item -Path function:deactivate
|
||||
}
|
||||
}
|
||||
|
||||
<#
|
||||
.Description
|
||||
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
|
||||
given folder, and returns them in a map.
|
||||
|
||||
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
|
||||
two strings separated by `=` (with any amount of whitespace surrounding the =)
|
||||
then it is considered a `key = value` line. The left hand string is the key,
|
||||
the right hand is the value.
|
||||
|
||||
If the value starts with a `'` or a `"` then the first and last character is
|
||||
stripped from the value before being captured.
|
||||
|
||||
.Parameter ConfigDir
|
||||
Path to the directory that contains the `pyvenv.cfg` file.
|
||||
#>
|
||||
function Get-PyVenvConfig(
|
||||
[String]
|
||||
$ConfigDir
|
||||
) {
|
||||
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
|
||||
|
||||
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
|
||||
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
|
||||
|
||||
# An empty map will be returned if no config file is found.
|
||||
$pyvenvConfig = @{ }
|
||||
|
||||
if ($pyvenvConfigPath) {
|
||||
|
||||
Write-Verbose "File exists, parse `key = value` lines"
|
||||
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
|
||||
|
||||
$pyvenvConfigContent | ForEach-Object {
|
||||
$keyval = $PSItem -split "\s*=\s*", 2
|
||||
if ($keyval[0] -and $keyval[1]) {
|
||||
$val = $keyval[1]
|
||||
|
||||
# Remove extraneous quotations around a string value.
|
||||
if ("'""".Contains($val.Substring(0, 1))) {
|
||||
$val = $val.Substring(1, $val.Length - 2)
|
||||
}
|
||||
|
||||
$pyvenvConfig[$keyval[0]] = $val
|
||||
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
|
||||
}
|
||||
}
|
||||
}
|
||||
return $pyvenvConfig
|
||||
}
|
||||
|
||||
|
||||
<# Begin Activate script --------------------------------------------------- #>
|
||||
|
||||
# Determine the containing directory of this script
|
||||
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
||||
$VenvExecDir = Get-Item -Path $VenvExecPath
|
||||
|
||||
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
|
||||
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
|
||||
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
|
||||
|
||||
# Set values required in priority: CmdLine, ConfigFile, Default
|
||||
# First, get the location of the virtual environment, it might not be
|
||||
# VenvExecDir if specified on the command line.
|
||||
if ($VenvDir) {
|
||||
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
|
||||
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
|
||||
Write-Verbose "VenvDir=$VenvDir"
|
||||
}
|
||||
|
||||
# Next, read the `pyvenv.cfg` file to determine any required value such
|
||||
# as `prompt`.
|
||||
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
|
||||
|
||||
# Next, set the prompt from the command line, or the config file, or
|
||||
# just use the name of the virtual environment folder.
|
||||
if ($Prompt) {
|
||||
Write-Verbose "Prompt specified as argument, using '$Prompt'"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
|
||||
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
|
||||
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
|
||||
$Prompt = $pyvenvCfg['prompt'];
|
||||
}
|
||||
else {
|
||||
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
|
||||
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
|
||||
$Prompt = Split-Path -Path $venvDir -Leaf
|
||||
}
|
||||
}
|
||||
|
||||
Write-Verbose "Prompt = '$Prompt'"
|
||||
Write-Verbose "VenvDir='$VenvDir'"
|
||||
|
||||
# Deactivate any currently active virtual environment, but leave the
|
||||
# deactivate function in place.
|
||||
deactivate -nondestructive
|
||||
|
||||
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
|
||||
# that there is an activated venv.
|
||||
$env:VIRTUAL_ENV = $VenvDir
|
||||
|
||||
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
|
||||
|
||||
Write-Verbose "Setting prompt to '$Prompt'"
|
||||
|
||||
# Set the prompt to include the env name
|
||||
# Make sure _OLD_VIRTUAL_PROMPT is global
|
||||
function global:_OLD_VIRTUAL_PROMPT { "" }
|
||||
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
|
||||
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
|
||||
|
||||
function global:prompt {
|
||||
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
|
||||
_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
$env:VIRTUAL_ENV_PROMPT = $Prompt
|
||||
}
|
||||
|
||||
# Clear PYTHONHOME
|
||||
if (Test-Path -Path Env:PYTHONHOME) {
|
||||
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
Remove-Item -Path Env:PYTHONHOME
|
||||
}
|
||||
|
||||
# Add the venv to the PATH
|
||||
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
|
||||
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
|
||||
70
llmlab/bin/activate
Normal file
70
llmlab/bin/activate
Normal file
@ -0,0 +1,70 @@
|
||||
# This file must be used with "source bin/activate" *from bash*
|
||||
# You cannot run it directly
|
||||
|
||||
deactivate () {
|
||||
# reset old environment variables
|
||||
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
|
||||
PATH="${_OLD_VIRTUAL_PATH:-}"
|
||||
export PATH
|
||||
unset _OLD_VIRTUAL_PATH
|
||||
fi
|
||||
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
|
||||
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
|
||||
export PYTHONHOME
|
||||
unset _OLD_VIRTUAL_PYTHONHOME
|
||||
fi
|
||||
|
||||
# Call hash to forget past commands. Without forgetting
|
||||
# past commands the $PATH changes we made may not be respected
|
||||
hash -r 2> /dev/null
|
||||
|
||||
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
|
||||
PS1="${_OLD_VIRTUAL_PS1:-}"
|
||||
export PS1
|
||||
unset _OLD_VIRTUAL_PS1
|
||||
fi
|
||||
|
||||
unset VIRTUAL_ENV
|
||||
unset VIRTUAL_ENV_PROMPT
|
||||
if [ ! "${1:-}" = "nondestructive" ] ; then
|
||||
# Self destruct!
|
||||
unset -f deactivate
|
||||
fi
|
||||
}
|
||||
|
||||
# unset irrelevant variables
|
||||
deactivate nondestructive
|
||||
|
||||
# on Windows, a path can contain colons and backslashes and has to be converted:
|
||||
if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then
|
||||
# transform D:\path\to\venv to /d/path/to/venv on MSYS
|
||||
# and to /cygdrive/d/path/to/venv on Cygwin
|
||||
export VIRTUAL_ENV=$(cygpath /home/fgras-ca/llm_lab-test/llmlab)
|
||||
else
|
||||
# use the path as-is
|
||||
export VIRTUAL_ENV=/home/fgras-ca/llm_lab-test/llmlab
|
||||
fi
|
||||
|
||||
_OLD_VIRTUAL_PATH="$PATH"
|
||||
PATH="$VIRTUAL_ENV/"bin":$PATH"
|
||||
export PATH
|
||||
|
||||
# unset PYTHONHOME if set
|
||||
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
|
||||
# could use `if (set -u; : $PYTHONHOME) ;` in bash
|
||||
if [ -n "${PYTHONHOME:-}" ] ; then
|
||||
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
|
||||
unset PYTHONHOME
|
||||
fi
|
||||
|
||||
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
|
||||
_OLD_VIRTUAL_PS1="${PS1:-}"
|
||||
PS1='(llmlab) '"${PS1:-}"
|
||||
export PS1
|
||||
VIRTUAL_ENV_PROMPT='(llmlab) '
|
||||
export VIRTUAL_ENV_PROMPT
|
||||
fi
|
||||
|
||||
# Call hash to forget past commands. Without forgetting
|
||||
# past commands the $PATH changes we made may not be respected
|
||||
hash -r 2> /dev/null
|
||||
27
llmlab/bin/activate.csh
Normal file
27
llmlab/bin/activate.csh
Normal file
@ -0,0 +1,27 @@
|
||||
# This file must be used with "source bin/activate.csh" *from csh*.
|
||||
# You cannot run it directly.
|
||||
|
||||
# Created by Davide Di Blasi <davidedb@gmail.com>.
|
||||
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
|
||||
|
||||
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
setenv VIRTUAL_ENV /home/fgras-ca/llm_lab-test/llmlab
|
||||
|
||||
set _OLD_VIRTUAL_PATH="$PATH"
|
||||
setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
|
||||
|
||||
|
||||
set _OLD_VIRTUAL_PROMPT="$prompt"
|
||||
|
||||
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
|
||||
set prompt = '(llmlab) '"$prompt"
|
||||
setenv VIRTUAL_ENV_PROMPT '(llmlab) '
|
||||
endif
|
||||
|
||||
alias pydoc python -m pydoc
|
||||
|
||||
rehash
|
||||
69
llmlab/bin/activate.fish
Normal file
69
llmlab/bin/activate.fish
Normal file
@ -0,0 +1,69 @@
|
||||
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
|
||||
# (https://fishshell.com/). You cannot run it directly.
|
||||
|
||||
function deactivate -d "Exit virtual environment and return to normal shell environment"
|
||||
# reset old environment variables
|
||||
if test -n "$_OLD_VIRTUAL_PATH"
|
||||
set -gx PATH $_OLD_VIRTUAL_PATH
|
||||
set -e _OLD_VIRTUAL_PATH
|
||||
end
|
||||
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
|
||||
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
|
||||
set -e _OLD_VIRTUAL_PYTHONHOME
|
||||
end
|
||||
|
||||
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
|
||||
set -e _OLD_FISH_PROMPT_OVERRIDE
|
||||
# prevents error when using nested fish instances (Issue #93858)
|
||||
if functions -q _old_fish_prompt
|
||||
functions -e fish_prompt
|
||||
functions -c _old_fish_prompt fish_prompt
|
||||
functions -e _old_fish_prompt
|
||||
end
|
||||
end
|
||||
|
||||
set -e VIRTUAL_ENV
|
||||
set -e VIRTUAL_ENV_PROMPT
|
||||
if test "$argv[1]" != "nondestructive"
|
||||
# Self-destruct!
|
||||
functions -e deactivate
|
||||
end
|
||||
end
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
set -gx VIRTUAL_ENV /home/fgras-ca/llm_lab-test/llmlab
|
||||
|
||||
set -gx _OLD_VIRTUAL_PATH $PATH
|
||||
set -gx PATH "$VIRTUAL_ENV/"bin $PATH
|
||||
|
||||
# Unset PYTHONHOME if set.
|
||||
if set -q PYTHONHOME
|
||||
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
|
||||
set -e PYTHONHOME
|
||||
end
|
||||
|
||||
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
|
||||
# fish uses a function instead of an env var to generate the prompt.
|
||||
|
||||
# Save the current fish_prompt function as the function _old_fish_prompt.
|
||||
functions -c fish_prompt _old_fish_prompt
|
||||
|
||||
# With the original prompt function renamed, we can override with our own.
|
||||
function fish_prompt
|
||||
# Save the return status of the last command.
|
||||
set -l old_status $status
|
||||
|
||||
# Output the venv prompt; color taken from the blue of the Python logo.
|
||||
printf "%s%s%s" (set_color 4B8BBE) '(llmlab) ' (set_color normal)
|
||||
|
||||
# Restore the return status of the previous command.
|
||||
echo "exit $old_status" | .
|
||||
# Output the original/"old" prompt.
|
||||
_old_fish_prompt
|
||||
end
|
||||
|
||||
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
|
||||
set -gx VIRTUAL_ENV_PROMPT '(llmlab) '
|
||||
end
|
||||
8
llmlab/bin/normalizer
Executable file
8
llmlab/bin/normalizer
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/fgras-ca/llm_lab-test/llmlab/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from charset_normalizer import cli
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(cli.cli_detect())
|
||||
8
llmlab/bin/pip
Executable file
8
llmlab/bin/pip
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/fgras-ca/llm_lab-test/llmlab/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
llmlab/bin/pip3
Executable file
8
llmlab/bin/pip3
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/fgras-ca/llm_lab-test/llmlab/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
llmlab/bin/pip3.12
Executable file
8
llmlab/bin/pip3.12
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/fgras-ca/llm_lab-test/llmlab/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
1
llmlab/bin/python
Symbolic link
1
llmlab/bin/python
Symbolic link
@ -0,0 +1 @@
|
||||
python3
|
||||
1
llmlab/bin/python3
Symbolic link
1
llmlab/bin/python3
Symbolic link
@ -0,0 +1 @@
|
||||
/usr/bin/python3
|
||||
1
llmlab/bin/python3.12
Symbolic link
1
llmlab/bin/python3.12
Symbolic link
@ -0,0 +1 @@
|
||||
python3
|
||||
@ -0,0 +1 @@
|
||||
pip
|
||||
@ -0,0 +1,44 @@
|
||||
Zope Public License (ZPL) Version 2.1
|
||||
|
||||
A copyright notice accompanies this license document that identifies the
|
||||
copyright holders.
|
||||
|
||||
This license has been certified as open source. It has also been designated as
|
||||
GPL compatible by the Free Software Foundation (FSF).
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions in source code must retain the accompanying copyright
|
||||
notice, this list of conditions, and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the accompanying copyright
|
||||
notice, this list of conditions, and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Names of the copyright holders must not be used to endorse or promote
|
||||
products derived from this software without prior written permission from the
|
||||
copyright holders.
|
||||
|
||||
4. The right to distribute this software or to use it for any purpose does not
|
||||
give you the right to use Servicemarks (sm) or Trademarks (tm) of the
|
||||
copyright
|
||||
holders. Use of them is covered by separate agreement with the copyright
|
||||
holders.
|
||||
|
||||
5. If any files are modified, you must cause the modified files to carry
|
||||
prominent notices stating that you changed the files and the date of any
|
||||
change.
|
||||
|
||||
Disclaimer
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESSED
|
||||
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
1167
llmlab/lib/python3.12/site-packages/DateTime-5.5.dist-info/METADATA
Normal file
1167
llmlab/lib/python3.12/site-packages/DateTime-5.5.dist-info/METADATA
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,22 @@
|
||||
DateTime-5.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
DateTime-5.5.dist-info/LICENSE.txt,sha256=PmcdsR32h1FswdtbPWXkqjg-rKPCDOo_r1Og9zNdCjw,2070
|
||||
DateTime-5.5.dist-info/METADATA,sha256=W1k0PqPJ6SU6QTJAu40JPtHK8XeQRL0GGEpfVGPjWGI,33735
|
||||
DateTime-5.5.dist-info/RECORD,,
|
||||
DateTime-5.5.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
DateTime-5.5.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
||||
DateTime-5.5.dist-info/top_level.txt,sha256=iVdUvuV_RIkkMzsnPGNfwojRWvuonInryaK3hA5Hh0o,9
|
||||
DateTime/DateTime.py,sha256=65LbTcnrCSsDPGoGLVkk7NC3H8Kq-PjkC1fQVR33gE8,71364
|
||||
DateTime/DateTime.txt,sha256=KZFzxoQItLsar1ZDd2vZN74Y6L4a04H8jXMwqc8KjmY,22487
|
||||
DateTime/__init__.py,sha256=trlFzEmNkmUpxZT7krPSVDayDK1bRxToccg3CcCF8wg,714
|
||||
DateTime/__pycache__/DateTime.cpython-312.pyc,,
|
||||
DateTime/__pycache__/__init__.cpython-312.pyc,,
|
||||
DateTime/__pycache__/interfaces.cpython-312.pyc,,
|
||||
DateTime/__pycache__/pytz_support.cpython-312.pyc,,
|
||||
DateTime/interfaces.py,sha256=n47sexf1eQ6YMdYB_60PgHtSzYIj4FND-RmHFiNpm1E,12187
|
||||
DateTime/pytz.txt,sha256=9Phns9ESXs9MaOKxXztX6sJ09QczGxsbYoSRSllKUfk,5619
|
||||
DateTime/pytz_support.py,sha256=inR1SO0X17fp9C2GsRw99S_MhxKiEt5dOV3-TGsBxDI,11853
|
||||
DateTime/tests/__init__.py,sha256=H7Ixo1xp-8BlJ65u14hk5i_TKEmETyi2FmLMD6H-mpo,683
|
||||
DateTime/tests/__pycache__/__init__.cpython-312.pyc,,
|
||||
DateTime/tests/__pycache__/test_datetime.cpython-312.pyc,,
|
||||
DateTime/tests/julian_testdata.txt,sha256=qxvLvabVB9ayhh5UHBvPhuqW5mRL_lizzbUh6lc3d4I,1397
|
||||
DateTime/tests/test_datetime.py,sha256=dsrfAqQpz1od1bOVPvSYfZAlduJpJIpc2F_hdN7WRAU,30385
|
||||
@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.42.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
||||
@ -0,0 +1 @@
|
||||
DateTime
|
||||
1948
llmlab/lib/python3.12/site-packages/DateTime/DateTime.py
Normal file
1948
llmlab/lib/python3.12/site-packages/DateTime/DateTime.py
Normal file
File diff suppressed because it is too large
Load Diff
785
llmlab/lib/python3.12/site-packages/DateTime/DateTime.txt
Normal file
785
llmlab/lib/python3.12/site-packages/DateTime/DateTime.txt
Normal file
@ -0,0 +1,785 @@
|
||||
The DateTime package
|
||||
====================
|
||||
|
||||
Encapsulation of date/time values.
|
||||
|
||||
|
||||
Function Timezones()
|
||||
--------------------
|
||||
|
||||
Returns the list of recognized timezone names:
|
||||
|
||||
>>> from DateTime import Timezones
|
||||
>>> zones = set(Timezones())
|
||||
|
||||
Almost all of the standard pytz timezones are included, with the exception
|
||||
of some commonly-used but ambiguous abbreviations, where historical Zope
|
||||
usage conflicts with the name used by pytz:
|
||||
|
||||
>>> import pytz
|
||||
>>> [x for x in pytz.all_timezones if x not in zones]
|
||||
['CET', 'EET', 'EST', 'MET', 'MST', 'WET']
|
||||
|
||||
Class DateTime
|
||||
--------------
|
||||
|
||||
DateTime objects represent instants in time and provide interfaces for
|
||||
controlling its representation without affecting the absolute value of
|
||||
the object.
|
||||
|
||||
DateTime objects may be created from a wide variety of string or
|
||||
numeric data, or may be computed from other DateTime objects.
|
||||
DateTimes support the ability to convert their representations to many
|
||||
major timezones, as well as the ability to create a DateTime object
|
||||
in the context of a given timezone.
|
||||
|
||||
DateTime objects provide partial numerical behavior:
|
||||
|
||||
* Two date-time objects can be subtracted to obtain a time, in days
|
||||
between the two.
|
||||
|
||||
* A date-time object and a positive or negative number may be added to
|
||||
obtain a new date-time object that is the given number of days later
|
||||
than the input date-time object.
|
||||
|
||||
* A positive or negative number and a date-time object may be added to
|
||||
obtain a new date-time object that is the given number of days later
|
||||
than the input date-time object.
|
||||
|
||||
* A positive or negative number may be subtracted from a date-time
|
||||
object to obtain a new date-time object that is the given number of
|
||||
days earlier than the input date-time object.
|
||||
|
||||
DateTime objects may be converted to integer, long, or float numbers
|
||||
of days since January 1, 1901, using the standard int, long, and float
|
||||
functions (Compatibility Note: int, long and float return the number
|
||||
of days since 1901 in GMT rather than local machine timezone).
|
||||
DateTime objects also provide access to their value in a float format
|
||||
usable with the Python time module, provided that the value of the
|
||||
object falls in the range of the epoch-based time module.
|
||||
|
||||
A DateTime object should be considered immutable; all conversion and numeric
|
||||
operations return a new DateTime object rather than modify the current object.
|
||||
|
||||
A DateTime object always maintains its value as an absolute UTC time,
|
||||
and is represented in the context of some timezone based on the
|
||||
arguments used to create the object. A DateTime object's methods
|
||||
return values based on the timezone context.
|
||||
|
||||
Note that in all cases the local machine timezone is used for
|
||||
representation if no timezone is specified.
|
||||
|
||||
Constructor for DateTime
|
||||
------------------------
|
||||
|
||||
DateTime() returns a new date-time object. DateTimes may be created
|
||||
with from zero to seven arguments:
|
||||
|
||||
* If the function is called with no arguments, then the current date/
|
||||
time is returned, represented in the timezone of the local machine.
|
||||
|
||||
* If the function is invoked with a single string argument which is a
|
||||
recognized timezone name, an object representing the current time is
|
||||
returned, represented in the specified timezone.
|
||||
|
||||
* If the function is invoked with a single string argument
|
||||
representing a valid date/time, an object representing that date/
|
||||
time will be returned.
|
||||
|
||||
As a general rule, any date-time representation that is recognized
|
||||
and unambiguous to a resident of North America is acceptable. (The
|
||||
reason for this qualification is that in North America, a date like:
|
||||
2/1/1994 is interpreted as February 1, 1994, while in some parts of
|
||||
the world, it is interpreted as January 2, 1994.) A date/ time
|
||||
string consists of two components, a date component and an optional
|
||||
time component, separated by one or more spaces. If the time
|
||||
component is omitted, 12:00am is assumed.
|
||||
|
||||
Any recognized timezone name specified as the final element of the
|
||||
date/time string will be used for computing the date/time value.
|
||||
(If you create a DateTime with the string,
|
||||
"Mar 9, 1997 1:45pm US/Pacific", the value will essentially be the
|
||||
same as if you had captured time.time() at the specified date and
|
||||
time on a machine in that timezone). If no timezone is passed, then
|
||||
the timezone configured on the local machine will be used, **except**
|
||||
that if the date format matches ISO 8601 ('YYYY-MM-DD'), the instance
|
||||
will use UTC / GMT+0 as the timezone.
|
||||
|
||||
o Returns current date/time, represented in US/Eastern:
|
||||
|
||||
>>> from DateTime import DateTime
|
||||
>>> e = DateTime('US/Eastern')
|
||||
>>> e.timezone()
|
||||
'US/Eastern'
|
||||
|
||||
o Returns specified time, represented in local machine zone:
|
||||
|
||||
>>> x = DateTime('1997/3/9 1:45pm')
|
||||
>>> x.parts() # doctest: +ELLIPSIS
|
||||
(1997, 3, 9, 13, 45, ...)
|
||||
|
||||
o Specified time in local machine zone, verbose format:
|
||||
|
||||
>>> y = DateTime('Mar 9, 1997 13:45:00')
|
||||
>>> y.parts() # doctest: +ELLIPSIS
|
||||
(1997, 3, 9, 13, 45, ...)
|
||||
>>> y == x
|
||||
True
|
||||
|
||||
o Specified time in UTC via ISO 8601 rule:
|
||||
|
||||
>>> z = DateTime('2014-03-24')
|
||||
>>> z.parts() # doctest: +ELLIPSIS
|
||||
(2014, 3, 24, 0, 0, ...)
|
||||
>>> z.timezone()
|
||||
'GMT+0'
|
||||
|
||||
The date component consists of year, month, and day values. The
|
||||
year value must be a one-, two-, or four-digit integer. If a one-
|
||||
or two-digit year is used, the year is assumed to be in the
|
||||
twentieth century. The month may an integer, from 1 to 12, a month
|
||||
name, or a month abbreviation, where a period may optionally follow
|
||||
the abbreviation. The day must be an integer from 1 to the number of
|
||||
days in the month. The year, month, and day values may be separated
|
||||
by periods, hyphens, forward slashes, or spaces. Extra spaces are
|
||||
permitted around the delimiters. Year, month, and day values may be
|
||||
given in any order as long as it is possible to distinguish the
|
||||
components. If all three components are numbers that are less than
|
||||
13, then a month-day-year ordering is assumed.
|
||||
|
||||
The time component consists of hour, minute, and second values
|
||||
separated by colons. The hour value must be an integer between 0
|
||||
and 23 inclusively. The minute value must be an integer between 0
|
||||
and 59 inclusively. The second value may be an integer value
|
||||
between 0 and 59.999 inclusively. The second value or both the
|
||||
minute and second values may be omitted. The time may be followed
|
||||
by am or pm in upper or lower case, in which case a 12-hour clock is
|
||||
assumed.
|
||||
|
||||
* If the DateTime function is invoked with a single numeric argument,
|
||||
the number is assumed to be either a floating point value such as
|
||||
that returned by time.time(), or a number of days after January 1,
|
||||
1901 00:00:00 UTC.
|
||||
|
||||
A DateTime object is returned that represents either the GMT value
|
||||
of the time.time() float represented in the local machine's
|
||||
timezone, or that number of days after January 1, 1901. Note that
|
||||
the number of days after 1901 need to be expressed from the
|
||||
viewpoint of the local machine's timezone. A negative argument will
|
||||
yield a date-time value before 1901.
|
||||
|
||||
* If the function is invoked with two numeric arguments, then the
|
||||
first is taken to be an integer year and the second argument is
|
||||
taken to be an offset in days from the beginning of the year, in the
|
||||
context of the local machine timezone. The date-time value returned
|
||||
is the given offset number of days from the beginning of the given
|
||||
year, represented in the timezone of the local machine. The offset
|
||||
may be positive or negative. Two-digit years are assumed to be in
|
||||
the twentieth century.
|
||||
|
||||
* If the function is invoked with two arguments, the first a float
|
||||
representing a number of seconds past the epoch in GMT (such as
|
||||
those returned by time.time()) and the second a string naming a
|
||||
recognized timezone, a DateTime with a value of that GMT time will
|
||||
be returned, represented in the given timezone.
|
||||
|
||||
>>> import time
|
||||
>>> t = time.time()
|
||||
|
||||
Time t represented as US/Eastern:
|
||||
|
||||
>>> now_east = DateTime(t, 'US/Eastern')
|
||||
|
||||
Time t represented as US/Pacific:
|
||||
|
||||
>>> now_west = DateTime(t, 'US/Pacific')
|
||||
|
||||
Only their representations are different:
|
||||
|
||||
>>> now_east.equalTo(now_west)
|
||||
True
|
||||
|
||||
* If the function is invoked with three or more numeric arguments,
|
||||
then the first is taken to be an integer year, the second is taken
|
||||
to be an integer month, and the third is taken to be an integer day.
|
||||
If the combination of values is not valid, then a DateTimeError is
|
||||
raised. One- or two-digit years up to 69 are assumed to be in the
|
||||
21st century, whereas values 70-99 are assumed to be 20th century.
|
||||
The fourth, fifth, and sixth arguments are floating point, positive
|
||||
or negative offsets in units of hours, minutes, and days, and
|
||||
default to zero if not given. An optional string may be given as
|
||||
the final argument to indicate timezone (the effect of this is as if
|
||||
you had taken the value of time.time() at that time on a machine in
|
||||
the specified timezone).
|
||||
|
||||
If a string argument passed to the DateTime constructor cannot be
|
||||
parsed, it will raise SyntaxError. Invalid date, time, or
|
||||
timezone components will raise a DateTimeError.
|
||||
|
||||
The module function Timezones() will return a list of the timezones
|
||||
recognized by the DateTime module. Recognition of timezone names is
|
||||
case-insensitive.
|
||||
|
||||
Instance Methods for DateTime (IDateTime interface)
|
||||
---------------------------------------------------
|
||||
|
||||
Conversion and comparison methods
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* ``timeTime()`` returns the date/time as a floating-point number in
|
||||
UTC, in the format used by the Python time module. Note that it is
|
||||
possible to create date /time values with DateTime that have no
|
||||
meaningful value to the time module, and in such cases a
|
||||
DateTimeError is raised. A DateTime object's value must generally
|
||||
be between Jan 1, 1970 (or your local machine epoch) and Jan 2038 to
|
||||
produce a valid time.time() style value.
|
||||
|
||||
>>> dt = DateTime('Mar 9, 1997 13:45:00 US/Eastern')
|
||||
>>> dt.timeTime()
|
||||
857933100.0
|
||||
|
||||
>>> DateTime('2040/01/01 UTC').timeTime()
|
||||
2208988800.0
|
||||
|
||||
>>> DateTime('1900/01/01 UTC').timeTime()
|
||||
-2208988800.0
|
||||
|
||||
* ``toZone(z)`` returns a DateTime with the value as the current
|
||||
object, represented in the indicated timezone:
|
||||
|
||||
>>> dt.toZone('UTC')
|
||||
DateTime('1997/03/09 18:45:00 UTC')
|
||||
|
||||
>>> dt.toZone('UTC').equalTo(dt)
|
||||
True
|
||||
|
||||
* ``isFuture()`` returns true if this object represents a date/time
|
||||
later than the time of the call:
|
||||
|
||||
>>> dt.isFuture()
|
||||
False
|
||||
>>> DateTime('Jan 1 3000').isFuture() # not time-machine safe!
|
||||
True
|
||||
|
||||
* ``isPast()`` returns true if this object represents a date/time
|
||||
earlier than the time of the call:
|
||||
|
||||
>>> dt.isPast()
|
||||
True
|
||||
>>> DateTime('Jan 1 3000').isPast() # not time-machine safe!
|
||||
False
|
||||
|
||||
* ``isCurrentYear()`` returns true if this object represents a
|
||||
date/time that falls within the current year, in the context of this
|
||||
object's timezone representation:
|
||||
|
||||
>>> dt.isCurrentYear()
|
||||
False
|
||||
>>> DateTime().isCurrentYear()
|
||||
True
|
||||
|
||||
* ``isCurrentMonth()`` returns true if this object represents a
|
||||
date/time that falls within the current month, in the context of
|
||||
this object's timezone representation:
|
||||
|
||||
>>> dt.isCurrentMonth()
|
||||
False
|
||||
>>> DateTime().isCurrentMonth()
|
||||
True
|
||||
|
||||
* ``isCurrentDay()`` returns true if this object represents a
|
||||
date/time that falls within the current day, in the context of this
|
||||
object's timezone representation:
|
||||
|
||||
>>> dt.isCurrentDay()
|
||||
False
|
||||
>>> DateTime().isCurrentDay()
|
||||
True
|
||||
|
||||
* ``isCurrentHour()`` returns true if this object represents a
|
||||
date/time that falls within the current hour, in the context of this
|
||||
object's timezone representation:
|
||||
|
||||
>>> dt.isCurrentHour()
|
||||
False
|
||||
|
||||
>>> DateTime().isCurrentHour()
|
||||
True
|
||||
|
||||
* ``isCurrentMinute()`` returns true if this object represents a
|
||||
date/time that falls within the current minute, in the context of
|
||||
this object's timezone representation:
|
||||
|
||||
>>> dt.isCurrentMinute()
|
||||
False
|
||||
>>> DateTime().isCurrentMinute()
|
||||
True
|
||||
|
||||
* ``isLeapYear()`` returns true if the current year (in the context of
|
||||
the object's timezone) is a leap year:
|
||||
|
||||
>>> dt.isLeapYear()
|
||||
False
|
||||
>>> DateTime('Mar 8 2004').isLeapYear()
|
||||
True
|
||||
|
||||
* ``earliestTime()`` returns a new DateTime object that represents the
|
||||
earliest possible time (in whole seconds) that still falls within
|
||||
the current object's day, in the object's timezone context:
|
||||
|
||||
>>> dt.earliestTime()
|
||||
DateTime('1997/03/09 00:00:00 US/Eastern')
|
||||
|
||||
* ``latestTime()`` return a new DateTime object that represents the
|
||||
latest possible time (in whole seconds) that still falls within the
|
||||
current object's day, in the object's timezone context
|
||||
|
||||
>>> dt.latestTime()
|
||||
DateTime('1997/03/09 23:59:59 US/Eastern')
|
||||
|
||||
Component access
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
* ``parts()`` returns a tuple containing the calendar year, month,
|
||||
day, hour, minute second and timezone of the object
|
||||
|
||||
>>> dt.parts() # doctest: +ELLIPSIS
|
||||
(1997, 3, 9, 13, 45, ... 'US/Eastern')
|
||||
|
||||
* ``timezone()`` returns the timezone in which the object is represented:
|
||||
|
||||
>>> dt.timezone() in Timezones()
|
||||
True
|
||||
|
||||
* ``tzoffset()`` returns the timezone offset for the objects timezone:
|
||||
|
||||
>>> dt.tzoffset()
|
||||
-18000
|
||||
|
||||
* ``year()`` returns the calendar year of the object:
|
||||
|
||||
>>> dt.year()
|
||||
1997
|
||||
|
||||
* ``month()`` returns the month of the object as an integer:
|
||||
|
||||
>>> dt.month()
|
||||
3
|
||||
|
||||
* ``Month()`` returns the full month name:
|
||||
|
||||
>>> dt.Month()
|
||||
'March'
|
||||
|
||||
* ``aMonth()`` returns the abbreviated month name:
|
||||
|
||||
>>> dt.aMonth()
|
||||
'Mar'
|
||||
|
||||
* ``pMonth()`` returns the abbreviated (with period) month name:
|
||||
|
||||
>>> dt.pMonth()
|
||||
'Mar.'
|
||||
|
||||
* ``day()`` returns the integer day:
|
||||
|
||||
>>> dt.day()
|
||||
9
|
||||
|
||||
* ``Day()`` returns the full name of the day of the week:
|
||||
|
||||
>>> dt.Day()
|
||||
'Sunday'
|
||||
|
||||
* ``dayOfYear()`` returns the day of the year, in context of the
|
||||
timezone representation of the object:
|
||||
|
||||
>>> dt.dayOfYear()
|
||||
68
|
||||
|
||||
* ``aDay()`` returns the abbreviated name of the day of the week:
|
||||
|
||||
>>> dt.aDay()
|
||||
'Sun'
|
||||
|
||||
* ``pDay()`` returns the abbreviated (with period) name of the day of
|
||||
the week:
|
||||
|
||||
>>> dt.pDay()
|
||||
'Sun.'
|
||||
|
||||
* ``dow()`` returns the integer day of the week, where Sunday is 0:
|
||||
|
||||
>>> dt.dow()
|
||||
0
|
||||
|
||||
* ``dow_1()`` returns the integer day of the week, where sunday is 1:
|
||||
|
||||
>>> dt.dow_1()
|
||||
1
|
||||
|
||||
* ``h_12()`` returns the 12-hour clock representation of the hour:
|
||||
|
||||
>>> dt.h_12()
|
||||
1
|
||||
|
||||
* ``h_24()`` returns the 24-hour clock representation of the hour:
|
||||
|
||||
>>> dt.h_24()
|
||||
13
|
||||
|
||||
* ``ampm()`` returns the appropriate time modifier (am or pm):
|
||||
|
||||
>>> dt.ampm()
|
||||
'pm'
|
||||
|
||||
* ``hour()`` returns the 24-hour clock representation of the hour:
|
||||
|
||||
>>> dt.hour()
|
||||
13
|
||||
|
||||
* ``minute()`` returns the minute:
|
||||
|
||||
>>> dt.minute()
|
||||
45
|
||||
|
||||
* ``second()`` returns the second:
|
||||
|
||||
>>> dt.second() == 0
|
||||
True
|
||||
|
||||
* ``millis()`` returns the milliseconds since the epoch in GMT.
|
||||
|
||||
>>> dt.millis() == 857933100000
|
||||
True
|
||||
|
||||
strftime()
|
||||
~~~~~~~~~~
|
||||
|
||||
See ``tests/test_datetime.py``.
|
||||
|
||||
General formats from previous DateTime
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* ``Date()`` return the date string for the object:
|
||||
|
||||
>>> dt.Date()
|
||||
'1997/03/09'
|
||||
|
||||
* ``Time()`` returns the time string for an object to the nearest
|
||||
second:
|
||||
|
||||
>>> dt.Time()
|
||||
'13:45:00'
|
||||
|
||||
* ``TimeMinutes()`` returns the time string for an object not showing
|
||||
seconds:
|
||||
|
||||
>>> dt.TimeMinutes()
|
||||
'13:45'
|
||||
|
||||
* ``AMPM()`` returns the time string for an object to the nearest second:
|
||||
|
||||
>>> dt.AMPM()
|
||||
'01:45:00 pm'
|
||||
|
||||
* ``AMPMMinutes()`` returns the time string for an object not showing
|
||||
seconds:
|
||||
|
||||
>>> dt.AMPMMinutes()
|
||||
'01:45 pm'
|
||||
|
||||
* ``PreciseTime()`` returns the time string for the object:
|
||||
|
||||
>>> dt.PreciseTime()
|
||||
'13:45:00.000'
|
||||
|
||||
* ``PreciseAMPM()`` returns the time string for the object:
|
||||
|
||||
>>> dt.PreciseAMPM()
|
||||
'01:45:00.000 pm'
|
||||
|
||||
* ``yy()`` returns the calendar year as a 2 digit string
|
||||
|
||||
>>> dt.yy()
|
||||
'97'
|
||||
|
||||
* ``mm()`` returns the month as a 2 digit string
|
||||
|
||||
>>> dt.mm()
|
||||
'03'
|
||||
|
||||
* ``dd()`` returns the day as a 2 digit string:
|
||||
|
||||
>>> dt.dd()
|
||||
'09'
|
||||
|
||||
* ``rfc822()`` returns the date in RFC 822 format:
|
||||
|
||||
>>> dt.rfc822()
|
||||
'Sun, 09 Mar 1997 13:45:00 -0500'
|
||||
|
||||
New formats
|
||||
~~~~~~~~~~~
|
||||
|
||||
* ``fCommon()`` returns a string representing the object's value in
|
||||
the format: March 9, 1997 1:45 pm:
|
||||
|
||||
>>> dt.fCommon()
|
||||
'March 9, 1997 1:45 pm'
|
||||
|
||||
* ``fCommonZ()`` returns a string representing the object's value in
|
||||
the format: March 9, 1997 1:45 pm US/Eastern:
|
||||
|
||||
>>> dt.fCommonZ()
|
||||
'March 9, 1997 1:45 pm US/Eastern'
|
||||
|
||||
* ``aCommon()`` returns a string representing the object's value in
|
||||
the format: Mar 9, 1997 1:45 pm:
|
||||
|
||||
>>> dt.aCommon()
|
||||
'Mar 9, 1997 1:45 pm'
|
||||
|
||||
* ``aCommonZ()`` return a string representing the object's value in
|
||||
the format: Mar 9, 1997 1:45 pm US/Eastern:
|
||||
|
||||
>>> dt.aCommonZ()
|
||||
'Mar 9, 1997 1:45 pm US/Eastern'
|
||||
|
||||
* ``pCommon()`` returns a string representing the object's value in
|
||||
the format Mar. 9, 1997 1:45 pm:
|
||||
|
||||
>>> dt.pCommon()
|
||||
'Mar. 9, 1997 1:45 pm'
|
||||
|
||||
* ``pCommonZ()`` returns a string representing the object's value in
|
||||
the format: Mar. 9, 1997 1:45 pm US/Eastern:
|
||||
|
||||
>>> dt.pCommonZ()
|
||||
'Mar. 9, 1997 1:45 pm US/Eastern'
|
||||
|
||||
* ``ISO()`` returns a string with the date/time in ISO format. Note:
|
||||
this is not ISO 8601-format! See the ISO8601 and HTML4 methods below
|
||||
for ISO 8601-compliant output. Dates are output as: YYYY-MM-DD HH:MM:SS
|
||||
|
||||
>>> dt.ISO()
|
||||
'1997-03-09 13:45:00'
|
||||
|
||||
* ``ISO8601()`` returns the object in ISO 8601-compatible format
|
||||
containing the date, time with seconds-precision and the time zone
|
||||
identifier - see http://www.w3.org/TR/NOTE-datetime. Dates are
|
||||
output as: YYYY-MM-DDTHH:MM:SSTZD (T is a literal character, TZD is
|
||||
Time Zone Designator, format +HH:MM or -HH:MM).
|
||||
|
||||
The ``HTML4()`` method below offers the same formatting, but
|
||||
converts to UTC before returning the value and sets the TZD"Z"
|
||||
|
||||
>>> dt.ISO8601()
|
||||
'1997-03-09T13:45:00-05:00'
|
||||
|
||||
|
||||
* ``HTML4()`` returns the object in the format used in the HTML4.0
|
||||
specification, one of the standard forms in ISO8601. See
|
||||
http://www.w3.org/TR/NOTE-datetime. Dates are output as:
|
||||
YYYY-MM-DDTHH:MM:SSZ (T, Z are literal characters, the time is in
|
||||
UTC.):
|
||||
|
||||
>>> dt.HTML4()
|
||||
'1997-03-09T18:45:00Z'
|
||||
|
||||
* ``JulianDay()`` returns the Julian day according to
|
||||
http://www.tondering.dk/claus/cal/node3.html#sec-calcjd
|
||||
|
||||
>>> dt.JulianDay()
|
||||
2450517
|
||||
|
||||
* ``week()`` returns the week number according to ISO
|
||||
see http://www.tondering.dk/claus/cal/node6.html#SECTION00670000000000000000
|
||||
|
||||
>>> dt.week()
|
||||
10
|
||||
|
||||
Deprecated API
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
* DayOfWeek(): see Day()
|
||||
|
||||
* Day_(): see pDay()
|
||||
|
||||
* Mon(): see aMonth()
|
||||
|
||||
* Mon_(): see pMonth
|
||||
|
||||
General Services Provided by DateTime
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
DateTimes can be repr()'ed; the result will be a string indicating how
|
||||
to make a DateTime object like this:
|
||||
|
||||
>>> repr(dt)
|
||||
"DateTime('1997/03/09 13:45:00 US/Eastern')"
|
||||
|
||||
When we convert them into a string, we get a nicer string that could
|
||||
actually be shown to a user:
|
||||
|
||||
>>> str(dt)
|
||||
'1997/03/09 13:45:00 US/Eastern'
|
||||
|
||||
The hash value of a DateTime is based on the date and time and is
|
||||
equal for different representations of the DateTime:
|
||||
|
||||
>>> hash(dt)
|
||||
3618678
|
||||
>>> hash(dt.toZone('UTC'))
|
||||
3618678
|
||||
|
||||
DateTime objects can be compared to other DateTime objects OR floating
|
||||
point numbers such as the ones which are returned by the Python time
|
||||
module by using the equalTo method. Using this API, True is returned if the
|
||||
object represents a date/time equal to the specified DateTime or time module
|
||||
style time:
|
||||
|
||||
>>> dt.equalTo(dt)
|
||||
True
|
||||
>>> dt.equalTo(dt.toZone('UTC'))
|
||||
True
|
||||
>>> dt.equalTo(dt.timeTime())
|
||||
True
|
||||
>>> dt.equalTo(DateTime())
|
||||
False
|
||||
|
||||
Same goes for inequalities:
|
||||
|
||||
>>> dt.notEqualTo(dt)
|
||||
False
|
||||
>>> dt.notEqualTo(dt.toZone('UTC'))
|
||||
False
|
||||
>>> dt.notEqualTo(dt.timeTime())
|
||||
False
|
||||
>>> dt.notEqualTo(DateTime())
|
||||
True
|
||||
|
||||
Normal equality operations only work with DateTime objects and take the
|
||||
timezone setting into account:
|
||||
|
||||
>>> dt == dt
|
||||
True
|
||||
>>> dt == dt.toZone('UTC')
|
||||
False
|
||||
>>> dt == DateTime()
|
||||
False
|
||||
|
||||
>>> dt != dt
|
||||
False
|
||||
>>> dt != dt.toZone('UTC')
|
||||
True
|
||||
>>> dt != DateTime()
|
||||
True
|
||||
|
||||
But the other comparison operations compare the referenced moment in time and
|
||||
not the representation itself:
|
||||
|
||||
>>> dt > dt
|
||||
False
|
||||
>>> DateTime() > dt
|
||||
True
|
||||
>>> dt > DateTime().timeTime()
|
||||
False
|
||||
>>> DateTime().timeTime() > dt
|
||||
True
|
||||
|
||||
>>> dt.greaterThan(dt)
|
||||
False
|
||||
>>> DateTime().greaterThan(dt)
|
||||
True
|
||||
>>> dt.greaterThan(DateTime().timeTime())
|
||||
False
|
||||
|
||||
>>> dt >= dt
|
||||
True
|
||||
>>> DateTime() >= dt
|
||||
True
|
||||
>>> dt >= DateTime().timeTime()
|
||||
False
|
||||
>>> DateTime().timeTime() >= dt
|
||||
True
|
||||
|
||||
>>> dt.greaterThanEqualTo(dt)
|
||||
True
|
||||
>>> DateTime().greaterThanEqualTo(dt)
|
||||
True
|
||||
>>> dt.greaterThanEqualTo(DateTime().timeTime())
|
||||
False
|
||||
|
||||
>>> dt < dt
|
||||
False
|
||||
>>> DateTime() < dt
|
||||
False
|
||||
>>> dt < DateTime().timeTime()
|
||||
True
|
||||
>>> DateTime().timeTime() < dt
|
||||
False
|
||||
|
||||
>>> dt.lessThan(dt)
|
||||
False
|
||||
>>> DateTime().lessThan(dt)
|
||||
False
|
||||
>>> dt.lessThan(DateTime().timeTime())
|
||||
True
|
||||
|
||||
>>> dt <= dt
|
||||
True
|
||||
>>> DateTime() <= dt
|
||||
False
|
||||
>>> dt <= DateTime().timeTime()
|
||||
True
|
||||
>>> DateTime().timeTime() <= dt
|
||||
False
|
||||
|
||||
>>> dt.lessThanEqualTo(dt)
|
||||
True
|
||||
>>> DateTime().lessThanEqualTo(dt)
|
||||
False
|
||||
>>> dt.lessThanEqualTo(DateTime().timeTime())
|
||||
True
|
||||
|
||||
Numeric Services Provided by DateTime
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A DateTime may be added to a number and a number may be added to a
|
||||
DateTime:
|
||||
|
||||
>>> dt + 5
|
||||
DateTime('1997/03/14 13:45:00 US/Eastern')
|
||||
>>> 5 + dt
|
||||
DateTime('1997/03/14 13:45:00 US/Eastern')
|
||||
|
||||
Two DateTimes cannot be added:
|
||||
|
||||
>>> from DateTime.interfaces import DateTimeError
|
||||
>>> try:
|
||||
... dt + dt
|
||||
... print('fail')
|
||||
... except DateTimeError:
|
||||
... print('ok')
|
||||
ok
|
||||
|
||||
Either a DateTime or a number may be subtracted from a DateTime,
|
||||
however, a DateTime may not be subtracted from a number:
|
||||
|
||||
>>> DateTime('1997/03/10 13:45 US/Eastern') - dt
|
||||
1.0
|
||||
>>> dt - 1
|
||||
DateTime('1997/03/08 13:45:00 US/Eastern')
|
||||
>>> 1 - dt
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
TypeError: unsupported operand type(s) for -: 'int' and 'DateTime'
|
||||
|
||||
DateTimes can also be converted to integers (number of seconds since
|
||||
the epoch) and floats:
|
||||
|
||||
>>> int(dt)
|
||||
857933100
|
||||
>>> float(dt)
|
||||
857933100.0
|
||||
18
llmlab/lib/python3.12/site-packages/DateTime/__init__.py
Normal file
18
llmlab/lib/python3.12/site-packages/DateTime/__init__.py
Normal file
@ -0,0 +1,18 @@
|
||||
##############################################################################
|
||||
#
|
||||
# Copyright (c) 2002 Zope Foundation and Contributors.
|
||||
#
|
||||
# This software is subject to the provisions of the Zope Public License,
|
||||
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||
# FOR A PARTICULAR PURPOSE
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
from .DateTime import DateTime
|
||||
from .DateTime import Timezones
|
||||
|
||||
|
||||
__all__ = ('DateTime', 'Timezones')
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
375
llmlab/lib/python3.12/site-packages/DateTime/interfaces.py
Normal file
375
llmlab/lib/python3.12/site-packages/DateTime/interfaces.py
Normal file
@ -0,0 +1,375 @@
|
||||
##############################################################################
|
||||
#
|
||||
# Copyright (c) 2005 Zope Foundation and Contributors.
|
||||
#
|
||||
# This software is subject to the provisions of the Zope Public License,
|
||||
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||
# FOR A PARTICULAR PURPOSE
|
||||
#
|
||||
##############################################################################
|
||||
from zope.interface import Interface
|
||||
|
||||
|
||||
class DateTimeError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class SyntaxError(DateTimeError):
|
||||
pass
|
||||
|
||||
|
||||
class DateError(DateTimeError):
|
||||
pass
|
||||
|
||||
|
||||
class TimeError(DateTimeError):
|
||||
pass
|
||||
|
||||
|
||||
class IDateTime(Interface):
|
||||
# Conversion and comparison methods
|
||||
|
||||
def localZone(ltm=None):
|
||||
"""Returns the time zone on the given date. The time zone
|
||||
can change according to daylight savings."""
|
||||
|
||||
def timeTime():
|
||||
"""Return the date/time as a floating-point number in UTC, in
|
||||
the format used by the Python time module. Note that it is
|
||||
possible to create date/time values with DateTime that have no
|
||||
meaningful value to the time module."""
|
||||
|
||||
def toZone(z):
|
||||
"""Return a DateTime with the value as the current object,
|
||||
represented in the indicated timezone."""
|
||||
|
||||
def isFuture():
|
||||
"""Return true if this object represents a date/time later
|
||||
than the time of the call"""
|
||||
|
||||
def isPast():
|
||||
"""Return true if this object represents a date/time earlier
|
||||
than the time of the call"""
|
||||
|
||||
def isCurrentYear():
|
||||
"""Return true if this object represents a date/time that
|
||||
falls within the current year, in the context of this
|
||||
object's timezone representation"""
|
||||
|
||||
def isCurrentMonth():
|
||||
"""Return true if this object represents a date/time that
|
||||
falls within the current month, in the context of this
|
||||
object's timezone representation"""
|
||||
|
||||
def isCurrentDay():
|
||||
"""Return true if this object represents a date/time that
|
||||
falls within the current day, in the context of this object's
|
||||
timezone representation"""
|
||||
|
||||
def isCurrentHour():
|
||||
"""Return true if this object represents a date/time that
|
||||
falls within the current hour, in the context of this object's
|
||||
timezone representation"""
|
||||
|
||||
def isCurrentMinute():
|
||||
"""Return true if this object represents a date/time that
|
||||
falls within the current minute, in the context of this
|
||||
object's timezone representation"""
|
||||
|
||||
def isLeapYear():
|
||||
"""Return true if the current year (in the context of the
|
||||
object's timezone) is a leap year"""
|
||||
|
||||
def earliestTime():
|
||||
"""Return a new DateTime object that represents the earliest
|
||||
possible time (in whole seconds) that still falls within the
|
||||
current object's day, in the object's timezone context"""
|
||||
|
||||
def latestTime():
|
||||
"""Return a new DateTime object that represents the latest
|
||||
possible time (in whole seconds) that still falls within the
|
||||
current object's day, in the object's timezone context"""
|
||||
|
||||
def greaterThan(t):
|
||||
"""Compare this DateTime object to another DateTime object OR
|
||||
a floating point number such as that which is returned by the
|
||||
Python time module. Returns true if the object represents a
|
||||
date/time greater than the specified DateTime or time module
|
||||
style time. Revised to give more correct results through
|
||||
comparison of long integer milliseconds."""
|
||||
|
||||
__gt__ = greaterThan
|
||||
|
||||
def greaterThanEqualTo(t):
|
||||
"""Compare this DateTime object to another DateTime object OR
|
||||
a floating point number such as that which is returned by the
|
||||
Python time module. Returns true if the object represents a
|
||||
date/time greater than or equal to the specified DateTime or
|
||||
time module style time. Revised to give more correct results
|
||||
through comparison of long integer milliseconds."""
|
||||
|
||||
__ge__ = greaterThanEqualTo
|
||||
|
||||
def equalTo(t):
|
||||
"""Compare this DateTime object to another DateTime object OR
|
||||
a floating point number such as that which is returned by the
|
||||
Python time module. Returns true if the object represents a
|
||||
date/time equal to the specified DateTime or time module style
|
||||
time. Revised to give more correct results through comparison
|
||||
of long integer milliseconds."""
|
||||
|
||||
__eq__ = equalTo
|
||||
|
||||
def notEqualTo(t):
|
||||
"""Compare this DateTime object to another DateTime object OR
|
||||
a floating point number such as that which is returned by the
|
||||
Python time module. Returns true if the object represents a
|
||||
date/time not equal to the specified DateTime or time module
|
||||
style time. Revised to give more correct results through
|
||||
comparison of long integer milliseconds."""
|
||||
|
||||
__ne__ = notEqualTo
|
||||
|
||||
def lessThan(t):
|
||||
"""Compare this DateTime object to another DateTime object OR
|
||||
a floating point number such as that which is returned by the
|
||||
Python time module. Returns true if the object represents a
|
||||
date/time less than the specified DateTime or time module
|
||||
style time. Revised to give more correct results through
|
||||
comparison of long integer milliseconds."""
|
||||
|
||||
__lt__ = lessThan
|
||||
|
||||
def lessThanEqualTo(t):
|
||||
"""Compare this DateTime object to another DateTime object OR
|
||||
a floating point number such as that which is returned by the
|
||||
Python time module. Returns true if the object represents a
|
||||
date/time less than or equal to the specified DateTime or time
|
||||
module style time. Revised to give more correct results
|
||||
through comparison of long integer milliseconds."""
|
||||
|
||||
__le__ = lessThanEqualTo
|
||||
|
||||
# Component access
|
||||
|
||||
def parts():
|
||||
"""Return a tuple containing the calendar year, month, day,
|
||||
hour, minute second and timezone of the object"""
|
||||
|
||||
def timezone():
|
||||
"""Return the timezone in which the object is represented."""
|
||||
|
||||
def tzoffset():
|
||||
"""Return the timezone offset for the objects timezone."""
|
||||
|
||||
def year():
|
||||
"""Return the calendar year of the object"""
|
||||
|
||||
def month():
|
||||
"""Return the month of the object as an integer"""
|
||||
|
||||
def Month():
|
||||
"""Return the full month name"""
|
||||
|
||||
def aMonth():
|
||||
"""Return the abbreviated month name."""
|
||||
|
||||
def Mon():
|
||||
"""Compatibility: see aMonth"""
|
||||
|
||||
def pMonth():
|
||||
"""Return the abbreviated (with period) month name."""
|
||||
|
||||
def Mon_():
|
||||
"""Compatibility: see pMonth"""
|
||||
|
||||
def day():
|
||||
"""Return the integer day"""
|
||||
|
||||
def Day():
|
||||
"""Return the full name of the day of the week"""
|
||||
|
||||
def DayOfWeek():
|
||||
"""Compatibility: see Day"""
|
||||
|
||||
def dayOfYear():
|
||||
"""Return the day of the year, in context of the timezone
|
||||
representation of the object"""
|
||||
|
||||
def aDay():
|
||||
"""Return the abbreviated name of the day of the week"""
|
||||
|
||||
def pDay():
|
||||
"""Return the abbreviated (with period) name of the day of the
|
||||
week"""
|
||||
|
||||
def Day_():
|
||||
"""Compatibility: see pDay"""
|
||||
|
||||
def dow():
|
||||
"""Return the integer day of the week, where sunday is 0"""
|
||||
|
||||
def dow_1():
|
||||
"""Return the integer day of the week, where sunday is 1"""
|
||||
|
||||
def h_12():
|
||||
"""Return the 12-hour clock representation of the hour"""
|
||||
|
||||
def h_24():
|
||||
"""Return the 24-hour clock representation of the hour"""
|
||||
|
||||
def ampm():
|
||||
"""Return the appropriate time modifier (am or pm)"""
|
||||
|
||||
def hour():
|
||||
"""Return the 24-hour clock representation of the hour"""
|
||||
|
||||
def minute():
|
||||
"""Return the minute"""
|
||||
|
||||
def second():
|
||||
"""Return the second"""
|
||||
|
||||
def millis():
|
||||
"""Return the millisecond since the epoch in GMT."""
|
||||
|
||||
def strftime(format):
|
||||
"""Format the date/time using the *current timezone representation*."""
|
||||
|
||||
# General formats from previous DateTime
|
||||
|
||||
def Date():
|
||||
"""Return the date string for the object."""
|
||||
|
||||
def Time():
|
||||
"""Return the time string for an object to the nearest second."""
|
||||
|
||||
def TimeMinutes():
|
||||
"""Return the time string for an object not showing seconds."""
|
||||
|
||||
def AMPM():
|
||||
"""Return the time string for an object to the nearest second."""
|
||||
|
||||
def AMPMMinutes():
|
||||
"""Return the time string for an object not showing seconds."""
|
||||
|
||||
def PreciseTime():
|
||||
"""Return the time string for the object."""
|
||||
|
||||
def PreciseAMPM():
|
||||
"""Return the time string for the object."""
|
||||
|
||||
def yy():
|
||||
"""Return calendar year as a 2 digit string"""
|
||||
|
||||
def mm():
|
||||
"""Return month as a 2 digit string"""
|
||||
|
||||
def dd():
|
||||
"""Return day as a 2 digit string"""
|
||||
|
||||
def rfc822():
|
||||
"""Return the date in RFC 822 format"""
|
||||
|
||||
# New formats
|
||||
|
||||
def fCommon():
|
||||
"""Return a string representing the object's value in the
|
||||
format: March 1, 1997 1:45 pm"""
|
||||
|
||||
def fCommonZ():
|
||||
"""Return a string representing the object's value in the
|
||||
format: March 1, 1997 1:45 pm US/Eastern"""
|
||||
|
||||
def aCommon():
|
||||
"""Return a string representing the object's value in the
|
||||
format: Mar 1, 1997 1:45 pm"""
|
||||
|
||||
def aCommonZ():
|
||||
"""Return a string representing the object's value in the
|
||||
format: Mar 1, 1997 1:45 pm US/Eastern"""
|
||||
|
||||
def pCommon():
|
||||
"""Return a string representing the object's value in the
|
||||
format: Mar. 1, 1997 1:45 pm"""
|
||||
|
||||
def pCommonZ():
|
||||
"""Return a string representing the object's value
|
||||
in the format: Mar. 1, 1997 1:45 pm US/Eastern"""
|
||||
|
||||
def ISO():
|
||||
"""Return the object in ISO standard format. Note: this is
|
||||
*not* ISO 8601-format! See the ISO8601 and HTML4 methods below
|
||||
for ISO 8601-compliant output
|
||||
|
||||
Dates are output as: YYYY-MM-DD HH:MM:SS
|
||||
"""
|
||||
|
||||
def ISO8601():
|
||||
"""Return the object in ISO 8601-compatible format containing
|
||||
the date, time with seconds-precision and the time zone
|
||||
identifier - see http://www.w3.org/TR/NOTE-datetime
|
||||
|
||||
Dates are output as: YYYY-MM-DDTHH:MM:SSTZD
|
||||
T is a literal character.
|
||||
TZD is Time Zone Designator, format +HH:MM or -HH:MM
|
||||
|
||||
The HTML4 method below offers the same formatting, but
|
||||
converts to UTC before returning the value and sets the TZD"Z"
|
||||
"""
|
||||
|
||||
def HTML4():
|
||||
"""Return the object in the format used in the HTML4.0
|
||||
specification, one of the standard forms in ISO8601. See
|
||||
http://www.w3.org/TR/NOTE-datetime
|
||||
|
||||
Dates are output as: YYYY-MM-DDTHH:MM:SSZ
|
||||
T, Z are literal characters.
|
||||
The time is in UTC.
|
||||
"""
|
||||
|
||||
def JulianDay():
|
||||
"""Return the Julian day according to
|
||||
https://www.tondering.dk/claus/cal/julperiod.php#formula
|
||||
"""
|
||||
|
||||
def week():
|
||||
"""Return the week number according to ISO.
|
||||
|
||||
See https://www.tondering.dk/claus/cal/week.php#weekno
|
||||
"""
|
||||
|
||||
# Python operator and conversion API
|
||||
|
||||
def __add__(other):
|
||||
"""A DateTime may be added to a number and a number may be
|
||||
added to a DateTime; two DateTimes cannot be added."""
|
||||
|
||||
__radd__ = __add__
|
||||
|
||||
def __sub__(other):
|
||||
"""Either a DateTime or a number may be subtracted from a
|
||||
DateTime, however, a DateTime may not be subtracted from a
|
||||
number."""
|
||||
|
||||
def __repr__():
|
||||
"""Convert a DateTime to a string that looks like a Python
|
||||
expression."""
|
||||
|
||||
def __str__():
|
||||
"""Convert a DateTime to a string."""
|
||||
|
||||
def __hash__():
|
||||
"""Compute a hash value for a DateTime"""
|
||||
|
||||
def __int__():
|
||||
"""Convert to an integer number of seconds since the epoch (gmt)"""
|
||||
|
||||
def __long__():
|
||||
"""Convert to a long-int number of seconds since the epoch (gmt)"""
|
||||
|
||||
def __float__():
|
||||
"""Convert to floating-point number of seconds since the epoch (gmt)"""
|
||||
192
llmlab/lib/python3.12/site-packages/DateTime/pytz.txt
Normal file
192
llmlab/lib/python3.12/site-packages/DateTime/pytz.txt
Normal file
@ -0,0 +1,192 @@
|
||||
Pytz Support
|
||||
============
|
||||
|
||||
Allows the pytz package to be used for time zone information. The
|
||||
advantage of using pytz is that it has a more complete and up to date
|
||||
time zone and daylight savings time database.
|
||||
|
||||
Usage
|
||||
-----
|
||||
You don't have to do anything special to make it work.
|
||||
|
||||
>>> from DateTime import DateTime, Timezones
|
||||
>>> d = DateTime('March 11, 2007 US/Eastern')
|
||||
|
||||
Daylight Savings
|
||||
----------------
|
||||
In 2007 daylight savings time in the US was changed. The Energy Policy
|
||||
Act of 2005 mandates that DST will start on the second Sunday in March
|
||||
and end on the first Sunday in November.
|
||||
|
||||
In 2007, the start and stop dates are March 11 and November 4,
|
||||
respectively. These dates are different from previous DST start and
|
||||
stop dates. In 2006, the dates were the first Sunday in April (April
|
||||
2, 2006) and the last Sunday in October (October 29, 2006).
|
||||
|
||||
Let's make sure that DateTime can deal with this, since the primary
|
||||
motivation to use pytz for time zone information is the fact that it
|
||||
is kept up to date with daylight savings changes.
|
||||
|
||||
>>> DateTime('March 11, 2007 US/Eastern').tzoffset()
|
||||
-18000
|
||||
>>> DateTime('March 12, 2007 US/Eastern').tzoffset()
|
||||
-14400
|
||||
>>> DateTime('November 4, 2007 US/Eastern').tzoffset()
|
||||
-14400
|
||||
>>> DateTime('November 5, 2007 US/Eastern').tzoffset()
|
||||
-18000
|
||||
|
||||
Let's compare this to 2006.
|
||||
|
||||
>>> DateTime('April 2, 2006 US/Eastern').tzoffset()
|
||||
-18000
|
||||
>>> DateTime('April 3, 2006 US/Eastern').tzoffset()
|
||||
-14400
|
||||
>>> DateTime('October 29, 2006 US/Eastern').tzoffset()
|
||||
-14400
|
||||
>>> DateTime('October 30, 2006 US/Eastern').tzoffset()
|
||||
-18000
|
||||
|
||||
Time Zones
|
||||
---------
|
||||
DateTime can use pytz's large database of time zones. Here are some
|
||||
examples:
|
||||
|
||||
>>> d = DateTime('Pacific/Kwajalein')
|
||||
>>> d = DateTime('America/Shiprock')
|
||||
>>> d = DateTime('Africa/Ouagadougou')
|
||||
|
||||
Of course pytz doesn't know about everything.
|
||||
|
||||
>>> from DateTime.interfaces import SyntaxError
|
||||
>>> try:
|
||||
... d = DateTime('July 21, 1969 Moon/Eastern')
|
||||
... print('fail')
|
||||
... except SyntaxError:
|
||||
... print('ok')
|
||||
ok
|
||||
|
||||
You can still use zone names that DateTime defines that aren't part of
|
||||
the pytz database.
|
||||
|
||||
>>> d = DateTime('eet')
|
||||
>>> d = DateTime('iceland')
|
||||
|
||||
These time zones use DateTimes database. So it's preferable to use the
|
||||
official time zone name.
|
||||
|
||||
One trickiness is that DateTime supports some zone name
|
||||
abbreviations. Some of these map to pytz names, so these abbreviations
|
||||
will give you time zone date from pytz. Notable among abbreviations
|
||||
that work this way are 'est', 'cst', 'mst', and 'pst'.
|
||||
|
||||
Let's verify that 'est' picks up the 2007 daylight savings time changes.
|
||||
|
||||
>>> DateTime('March 11, 2007 est').tzoffset()
|
||||
-18000
|
||||
>>> DateTime('March 12, 2007 est').tzoffset()
|
||||
-14400
|
||||
>>> DateTime('November 4, 2007 est').tzoffset()
|
||||
-14400
|
||||
>>> DateTime('November 5, 2007 est').tzoffset()
|
||||
-18000
|
||||
|
||||
You can get a list of time zones supported by calling the Timezones() function.
|
||||
|
||||
>>> Timezones() #doctest: +ELLIPSIS
|
||||
['Africa/Abidjan', 'Africa/Accra', 'Africa/Addis_Ababa', ...]
|
||||
|
||||
Note that you can mess with this list without hurting things.
|
||||
|
||||
>>> t = Timezones()
|
||||
>>> t.remove('US/Eastern')
|
||||
>>> d = DateTime('US/Eastern')
|
||||
|
||||
|
||||
Internal Components
|
||||
-------------------
|
||||
|
||||
The following are tests of internal components.
|
||||
|
||||
Cache
|
||||
~~~~~
|
||||
|
||||
The DateTime class uses a new time zone cache.
|
||||
|
||||
>>> from DateTime.DateTime import _TZINFO
|
||||
>>> _TZINFO #doctest: +ELLIPSIS
|
||||
<DateTime.pytz_support.PytzCache ...>
|
||||
|
||||
The cache maps time zone names to time zone instances.
|
||||
|
||||
>>> cache = _TZINFO
|
||||
>>> tz = cache['GMT+730']
|
||||
>>> tz = cache['US/Mountain']
|
||||
|
||||
The cache also must provide a few attributes for use by the DateTime
|
||||
class.
|
||||
|
||||
The _zlst attribute is a list of supported time zone names.
|
||||
|
||||
>>> cache._zlst #doctest: +ELLIPSIS
|
||||
['Africa/Abidjan'... 'Africa/Accra'... 'IDLE'... 'NZST'... 'NZT'...]
|
||||
|
||||
The _zidx attribute is a list of lower-case and possibly abbreviated
|
||||
time zone names that can be mapped to official zone names.
|
||||
|
||||
>>> 'australia/yancowinna' in cache._zidx
|
||||
True
|
||||
>>> 'europe/isle_of_man' in cache._zidx
|
||||
True
|
||||
>>> 'gmt+0500' in cache._zidx
|
||||
True
|
||||
|
||||
Note that there are more items in _zidx than in _zlst since there are
|
||||
multiple names for some time zones.
|
||||
|
||||
>>> len(cache._zidx) > len(cache._zlst)
|
||||
True
|
||||
|
||||
Each entry in _zlst should also be present in _zidx in lower case form.
|
||||
|
||||
>>> for name in cache._zlst:
|
||||
... if not name.lower() in cache._zidx:
|
||||
... print("Error %s not in _zidx" % name.lower())
|
||||
|
||||
The _zmap attribute maps the names in _zidx to official names in _zlst.
|
||||
|
||||
>>> cache._zmap['africa/abidjan']
|
||||
'Africa/Abidjan'
|
||||
>>> cache._zmap['gmt+1']
|
||||
'GMT+1'
|
||||
>>> cache._zmap['gmt+0100']
|
||||
'GMT+1'
|
||||
>>> cache._zmap['utc']
|
||||
'UTC'
|
||||
|
||||
Let's make sure that _zmap and _zidx agree.
|
||||
|
||||
>>> idx = set(cache._zidx)
|
||||
>>> keys = set(cache._zmap.keys())
|
||||
>>> idx == keys
|
||||
True
|
||||
|
||||
Timezone objects
|
||||
~~~~~~~~~~~~~~~~
|
||||
The timezone instances have only one public method info(). It returns
|
||||
a tuple of (offset, is_dst, name). The method takes a timestamp, which
|
||||
is used to determine dst information.
|
||||
|
||||
>>> t1 = DateTime('November 4, 00:00 2007 US/Mountain').timeTime()
|
||||
>>> t2 = DateTime('November 4, 02:00 2007 US/Mountain').timeTime()
|
||||
>>> tz.info(t1)
|
||||
(-21600, 1, 'MDT')
|
||||
>>> tz.info(t2)
|
||||
(-25200, 0, 'MST')
|
||||
|
||||
If you don't pass any arguments to info it provides daylight savings
|
||||
time information as of today.
|
||||
|
||||
>>> tz.info() in ((-21600, 1, 'MDT'), (-25200, 0, 'MST'))
|
||||
True
|
||||
|
||||
269
llmlab/lib/python3.12/site-packages/DateTime/pytz_support.py
Normal file
269
llmlab/lib/python3.12/site-packages/DateTime/pytz_support.py
Normal file
@ -0,0 +1,269 @@
|
||||
##############################################################################
|
||||
#
|
||||
# Copyright (c) 2007 Zope Foundation and Contributors.
|
||||
#
|
||||
# This software is subject to the provisions of the Zope Public License,
|
||||
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||
# FOR A PARTICULAR PURPOSE
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
|
||||
import pytz
|
||||
import pytz.reference
|
||||
from pytz.tzinfo import StaticTzInfo
|
||||
from pytz.tzinfo import memorized_timedelta
|
||||
|
||||
from .interfaces import DateTimeError
|
||||
|
||||
|
||||
EPOCH = datetime.fromtimestamp(0, tz=pytz.utc)
|
||||
|
||||
_numeric_timezone_data = {
|
||||
'GMT': ('GMT', 0, 1, [], '', [(0, 0, 0)], 'GMT\000'),
|
||||
'GMT+0': ('GMT+0', 0, 1, [], '', [(0, 0, 0)], 'GMT+0000\000'),
|
||||
'GMT+1': ('GMT+1', 0, 1, [], '', [(3600, 0, 0)], 'GMT+0100\000'),
|
||||
'GMT+2': ('GMT+2', 0, 1, [], '', [(7200, 0, 0)], 'GMT+0200\000'),
|
||||
'GMT+3': ('GMT+3', 0, 1, [], '', [(10800, 0, 0)], 'GMT+0300\000'),
|
||||
'GMT+4': ('GMT+4', 0, 1, [], '', [(14400, 0, 0)], 'GMT+0400\000'),
|
||||
'GMT+5': ('GMT+5', 0, 1, [], '', [(18000, 0, 0)], 'GMT+0500\000'),
|
||||
'GMT+6': ('GMT+6', 0, 1, [], '', [(21600, 0, 0)], 'GMT+0600\000'),
|
||||
'GMT+7': ('GMT+7', 0, 1, [], '', [(25200, 0, 0)], 'GMT+0700\000'),
|
||||
'GMT+8': ('GMT+8', 0, 1, [], '', [(28800, 0, 0)], 'GMT+0800\000'),
|
||||
'GMT+9': ('GMT+9', 0, 1, [], '', [(32400, 0, 0)], 'GMT+0900\000'),
|
||||
'GMT+10': ('GMT+10', 0, 1, [], '', [(36000, 0, 0)], 'GMT+1000\000'),
|
||||
'GMT+11': ('GMT+11', 0, 1, [], '', [(39600, 0, 0)], 'GMT+1100\000'),
|
||||
'GMT+12': ('GMT+12', 0, 1, [], '', [(43200, 0, 0)], 'GMT+1200\000'),
|
||||
'GMT+13': ('GMT+13', 0, 1, [], '', [(46800, 0, 0)], 'GMT+1300\000'),
|
||||
|
||||
'GMT-1': ('GMT-1', 0, 1, [], '', [(-3600, 0, 0)], 'GMT-0100\000'),
|
||||
'GMT-2': ('GMT-2', 0, 1, [], '', [(-7200, 0, 0)], 'GMT-0200\000'),
|
||||
'GMT-3': ('GMT-3', 0, 1, [], '', [(-10800, 0, 0)], 'GMT-0300\000'),
|
||||
'GMT-4': ('GMT-4', 0, 1, [], '', [(-14400, 0, 0)], 'GMT-0400\000'),
|
||||
'GMT-5': ('GMT-5', 0, 1, [], '', [(-18000, 0, 0)], 'GMT-0500\000'),
|
||||
'GMT-6': ('GMT-6', 0, 1, [], '', [(-21600, 0, 0)], 'GMT-0600\000'),
|
||||
'GMT-7': ('GMT-7', 0, 1, [], '', [(-25200, 0, 0)], 'GMT-0700\000'),
|
||||
'GMT-8': ('GMT-8', 0, 1, [], '', [(-28800, 0, 0)], 'GMT-0800\000'),
|
||||
'GMT-9': ('GMT-9', 0, 1, [], '', [(-32400, 0, 0)], 'GMT-0900\000'),
|
||||
'GMT-10': ('GMT-10', 0, 1, [], '', [(-36000, 0, 0)], 'GMT-1000\000'),
|
||||
'GMT-11': ('GMT-11', 0, 1, [], '', [(-39600, 0, 0)], 'GMT-1100\000'),
|
||||
'GMT-12': ('GMT-12', 0, 1, [], '', [(-43200, 0, 0)], 'GMT-1200\000'),
|
||||
|
||||
'GMT+0130': ('GMT+0130', 0, 1, [], '', [(5400, 0, 0)], 'GMT+0130\000'),
|
||||
'GMT+0230': ('GMT+0230', 0, 1, [], '', [(9000, 0, 0)], 'GMT+0230\000'),
|
||||
'GMT+0330': ('GMT+0330', 0, 1, [], '', [(12600, 0, 0)], 'GMT+0330\000'),
|
||||
'GMT+0430': ('GMT+0430', 0, 1, [], '', [(16200, 0, 0)], 'GMT+0430\000'),
|
||||
'GMT+0530': ('GMT+0530', 0, 1, [], '', [(19800, 0, 0)], 'GMT+0530\000'),
|
||||
'GMT+0630': ('GMT+0630', 0, 1, [], '', [(23400, 0, 0)], 'GMT+0630\000'),
|
||||
'GMT+0730': ('GMT+0730', 0, 1, [], '', [(27000, 0, 0)], 'GMT+0730\000'),
|
||||
'GMT+0830': ('GMT+0830', 0, 1, [], '', [(30600, 0, 0)], 'GMT+0830\000'),
|
||||
'GMT+0930': ('GMT+0930', 0, 1, [], '', [(34200, 0, 0)], 'GMT+0930\000'),
|
||||
'GMT+1030': ('GMT+1030', 0, 1, [], '', [(37800, 0, 0)], 'GMT+1030\000'),
|
||||
'GMT+1130': ('GMT+1130', 0, 1, [], '', [(41400, 0, 0)], 'GMT+1130\000'),
|
||||
'GMT+1230': ('GMT+1230', 0, 1, [], '', [(45000, 0, 0)], 'GMT+1230\000'),
|
||||
|
||||
'GMT-0130': ('GMT-0130', 0, 1, [], '', [(-5400, 0, 0)], 'GMT-0130\000'),
|
||||
'GMT-0230': ('GMT-0230', 0, 1, [], '', [(-9000, 0, 0)], 'GMT-0230\000'),
|
||||
'GMT-0330': ('GMT-0330', 0, 1, [], '', [(-12600, 0, 0)], 'GMT-0330\000'),
|
||||
'GMT-0430': ('GMT-0430', 0, 1, [], '', [(-16200, 0, 0)], 'GMT-0430\000'),
|
||||
'GMT-0530': ('GMT-0530', 0, 1, [], '', [(-19800, 0, 0)], 'GMT-0530\000'),
|
||||
'GMT-0630': ('GMT-0630', 0, 1, [], '', [(-23400, 0, 0)], 'GMT-0630\000'),
|
||||
'GMT-0730': ('GMT-0730', 0, 1, [], '', [(-27000, 0, 0)], 'GMT-0730\000'),
|
||||
'GMT-0830': ('GMT-0830', 0, 1, [], '', [(-30600, 0, 0)], 'GMT-0830\000'),
|
||||
'GMT-0930': ('GMT-0930', 0, 1, [], '', [(-34200, 0, 0)], 'GMT-0930\000'),
|
||||
'GMT-1030': ('GMT-1030', 0, 1, [], '', [(-37800, 0, 0)], 'GMT-1030\000'),
|
||||
'GMT-1130': ('GMT-1130', 0, 1, [], '', [(-41400, 0, 0)], 'GMT-1130\000'),
|
||||
'GMT-1230': ('GMT-1230', 0, 1, [], '', [(-45000, 0, 0)], 'GMT-1230\000'),
|
||||
}
|
||||
|
||||
# These are the timezones not in pytz.common_timezones
|
||||
_old_zlst = [
|
||||
'AST', 'AT', 'BST', 'BT', 'CCT',
|
||||
'CET', 'CST', 'Cuba', 'EADT', 'EAST',
|
||||
'EEST', 'EET', 'EST', 'Egypt', 'FST',
|
||||
'FWT', 'GB-Eire', 'GMT+0100', 'GMT+0130', 'GMT+0200',
|
||||
'GMT+0230', 'GMT+0300', 'GMT+0330', 'GMT+0400', 'GMT+0430',
|
||||
'GMT+0500', 'GMT+0530', 'GMT+0600', 'GMT+0630', 'GMT+0700',
|
||||
'GMT+0730', 'GMT+0800', 'GMT+0830', 'GMT+0900', 'GMT+0930',
|
||||
'GMT+1', 'GMT+1000', 'GMT+1030', 'GMT+1100', 'GMT+1130',
|
||||
'GMT+1200', 'GMT+1230', 'GMT+1300', 'GMT-0100', 'GMT-0130',
|
||||
'GMT-0200', 'GMT-0300', 'GMT-0400', 'GMT-0500', 'GMT-0600',
|
||||
'GMT-0630', 'GMT-0700', 'GMT-0730', 'GMT-0800', 'GMT-0830',
|
||||
'GMT-0900', 'GMT-0930', 'GMT-1000', 'GMT-1030', 'GMT-1100',
|
||||
'GMT-1130', 'GMT-1200', 'GMT-1230', 'GST', 'Greenwich',
|
||||
'Hongkong', 'IDLE', 'IDLW', 'Iceland', 'Iran',
|
||||
'Israel', 'JST', 'Jamaica', 'Japan', 'MEST',
|
||||
'MET', 'MEWT', 'MST', 'NT', 'NZDT',
|
||||
'NZST', 'NZT', 'PST', 'Poland', 'SST',
|
||||
'SWT', 'Singapore', 'Turkey', 'UCT', 'UT',
|
||||
'Universal', 'WADT', 'WAST', 'WAT', 'WET',
|
||||
'ZP4', 'ZP5', 'ZP6',
|
||||
]
|
||||
|
||||
_old_zmap = {
|
||||
'aest': 'GMT+10', 'aedt': 'GMT+11',
|
||||
'aus eastern standard time': 'GMT+10',
|
||||
'sydney standard time': 'GMT+10',
|
||||
'tasmania standard time': 'GMT+10',
|
||||
'e. australia standard time': 'GMT+10',
|
||||
'aus central standard time': 'GMT+0930',
|
||||
'cen. australia standard time': 'GMT+0930',
|
||||
'w. australia standard time': 'GMT+8',
|
||||
|
||||
'central europe standard time': 'GMT+1',
|
||||
'eastern standard time': 'US/Eastern',
|
||||
'us eastern standard time': 'US/Eastern',
|
||||
'central standard time': 'US/Central',
|
||||
'mountain standard time': 'US/Mountain',
|
||||
'pacific standard time': 'US/Pacific',
|
||||
'mst': 'US/Mountain', 'pst': 'US/Pacific',
|
||||
'cst': 'US/Central', 'est': 'US/Eastern',
|
||||
|
||||
'gmt+0000': 'GMT+0', 'gmt+0': 'GMT+0',
|
||||
|
||||
'gmt+0100': 'GMT+1', 'gmt+0200': 'GMT+2', 'gmt+0300': 'GMT+3',
|
||||
'gmt+0400': 'GMT+4', 'gmt+0500': 'GMT+5', 'gmt+0600': 'GMT+6',
|
||||
'gmt+0700': 'GMT+7', 'gmt+0800': 'GMT+8', 'gmt+0900': 'GMT+9',
|
||||
'gmt+1000': 'GMT+10', 'gmt+1100': 'GMT+11', 'gmt+1200': 'GMT+12',
|
||||
'gmt+1300': 'GMT+13',
|
||||
'gmt-0100': 'GMT-1', 'gmt-0200': 'GMT-2', 'gmt-0300': 'GMT-3',
|
||||
'gmt-0400': 'GMT-4', 'gmt-0500': 'GMT-5', 'gmt-0600': 'GMT-6',
|
||||
'gmt-0700': 'GMT-7', 'gmt-0800': 'GMT-8', 'gmt-0900': 'GMT-9',
|
||||
'gmt-1000': 'GMT-10', 'gmt-1100': 'GMT-11', 'gmt-1200': 'GMT-12',
|
||||
|
||||
'gmt+1': 'GMT+1', 'gmt+2': 'GMT+2', 'gmt+3': 'GMT+3',
|
||||
'gmt+4': 'GMT+4', 'gmt+5': 'GMT+5', 'gmt+6': 'GMT+6',
|
||||
'gmt+7': 'GMT+7', 'gmt+8': 'GMT+8', 'gmt+9': 'GMT+9',
|
||||
'gmt+10': 'GMT+10', 'gmt+11': 'GMT+11', 'gmt+12': 'GMT+12',
|
||||
'gmt+13': 'GMT+13',
|
||||
'gmt-1': 'GMT-1', 'gmt-2': 'GMT-2', 'gmt-3': 'GMT-3',
|
||||
'gmt-4': 'GMT-4', 'gmt-5': 'GMT-5', 'gmt-6': 'GMT-6',
|
||||
'gmt-7': 'GMT-7', 'gmt-8': 'GMT-8', 'gmt-9': 'GMT-9',
|
||||
'gmt-10': 'GMT-10', 'gmt-11': 'GMT-11', 'gmt-12': 'GMT-12',
|
||||
|
||||
'gmt+130': 'GMT+0130', 'gmt+0130': 'GMT+0130',
|
||||
'gmt+230': 'GMT+0230', 'gmt+0230': 'GMT+0230',
|
||||
'gmt+330': 'GMT+0330', 'gmt+0330': 'GMT+0330',
|
||||
'gmt+430': 'GMT+0430', 'gmt+0430': 'GMT+0430',
|
||||
'gmt+530': 'GMT+0530', 'gmt+0530': 'GMT+0530',
|
||||
'gmt+630': 'GMT+0630', 'gmt+0630': 'GMT+0630',
|
||||
'gmt+730': 'GMT+0730', 'gmt+0730': 'GMT+0730',
|
||||
'gmt+830': 'GMT+0830', 'gmt+0830': 'GMT+0830',
|
||||
'gmt+930': 'GMT+0930', 'gmt+0930': 'GMT+0930',
|
||||
'gmt+1030': 'GMT+1030',
|
||||
'gmt+1130': 'GMT+1130',
|
||||
'gmt+1230': 'GMT+1230',
|
||||
|
||||
'gmt-130': 'GMT-0130', 'gmt-0130': 'GMT-0130',
|
||||
'gmt-230': 'GMT-0230', 'gmt-0230': 'GMT-0230',
|
||||
'gmt-330': 'GMT-0330', 'gmt-0330': 'GMT-0330',
|
||||
'gmt-430': 'GMT-0430', 'gmt-0430': 'GMT-0430',
|
||||
'gmt-530': 'GMT-0530', 'gmt-0530': 'GMT-0530',
|
||||
'gmt-630': 'GMT-0630', 'gmt-0630': 'GMT-0630',
|
||||
'gmt-730': 'GMT-0730', 'gmt-0730': 'GMT-0730',
|
||||
'gmt-830': 'GMT-0830', 'gmt-0830': 'GMT-0830',
|
||||
'gmt-930': 'GMT-0930', 'gmt-0930': 'GMT-0930',
|
||||
'gmt-1030': 'GMT-1030',
|
||||
'gmt-1130': 'GMT-1130',
|
||||
'gmt-1230': 'GMT-1230',
|
||||
|
||||
'ut': 'Universal',
|
||||
'bst': 'GMT+1', 'mest': 'GMT+2', 'sst': 'GMT+2',
|
||||
'fst': 'GMT+2', 'wadt': 'GMT+8', 'eadt': 'GMT+11', 'nzdt': 'GMT+13',
|
||||
'wet': 'GMT', 'wat': 'GMT+1', 'at': 'GMT-2', 'ast': 'GMT-4',
|
||||
'nt': 'GMT-11', 'idlw': 'GMT-12', 'cet': 'GMT+1', 'cest': 'GMT+2',
|
||||
'met': 'GMT+1',
|
||||
'mewt': 'GMT+1', 'swt': 'GMT+1', 'fwt': 'GMT+1', 'eet': 'GMT+2',
|
||||
'eest': 'GMT+3',
|
||||
'bt': 'GMT+3', 'zp4': 'GMT+4', 'zp5': 'GMT+5', 'zp6': 'GMT+6',
|
||||
'wast': 'GMT+7', 'cct': 'GMT+8', 'jst': 'GMT+9', 'east': 'GMT+10',
|
||||
'gst': 'GMT+10', 'nzt': 'GMT+12', 'nzst': 'GMT+12', 'idle': 'GMT+12',
|
||||
'ret': 'GMT+4', 'ist': 'GMT+0530', 'edt': 'GMT-4',
|
||||
|
||||
}
|
||||
|
||||
|
||||
# some timezone definitions of the "-0400" are not working
|
||||
# when upgrading
|
||||
for hour in range(0, 13):
|
||||
hour = hour
|
||||
fhour = str(hour)
|
||||
if len(fhour) == 1:
|
||||
fhour = '0' + fhour
|
||||
_old_zmap['-%s00' % fhour] = 'GMT-%i' % hour
|
||||
_old_zmap['+%s00' % fhour] = 'GMT+%i' % hour
|
||||
|
||||
|
||||
def _p(zone):
|
||||
return _numeric_timezones[zone]
|
||||
|
||||
|
||||
def _static_timezone_factory(data):
|
||||
zone = data[0]
|
||||
cls = type(zone, (StaticTzInfo,), dict(
|
||||
__reduce__=lambda _: (_p, (zone, )),
|
||||
zone=zone,
|
||||
_utcoffset=memorized_timedelta(data[5][0][0]),
|
||||
_tzname=data[6][:-1])) # strip the trailing null
|
||||
return cls()
|
||||
|
||||
|
||||
_numeric_timezones = {key: _static_timezone_factory(data)
|
||||
for key, data in _numeric_timezone_data.items()}
|
||||
|
||||
|
||||
class Timezone:
|
||||
"""
|
||||
Timezone information returned by PytzCache.__getitem__
|
||||
Adapts datetime.tzinfo object to DateTime._timezone interface
|
||||
"""
|
||||
|
||||
def __init__(self, tzinfo):
|
||||
self.tzinfo = tzinfo
|
||||
|
||||
def info(self, t=None):
|
||||
if t is None:
|
||||
dt = datetime.now(tz=pytz.utc)
|
||||
else:
|
||||
# can't use utcfromtimestamp past 2038
|
||||
dt = EPOCH + timedelta(0, t)
|
||||
|
||||
# need to normalize tzinfo for the datetime to deal with
|
||||
# daylight savings time.
|
||||
normalized_dt = self.tzinfo.normalize(dt.astimezone(self.tzinfo))
|
||||
normalized_tzinfo = normalized_dt.tzinfo
|
||||
|
||||
offset = normalized_tzinfo.utcoffset(normalized_dt)
|
||||
secs = offset.days * 24 * 60 * 60 + offset.seconds
|
||||
dst = normalized_tzinfo.dst(normalized_dt)
|
||||
if dst == timedelta(0):
|
||||
is_dst = 0
|
||||
else:
|
||||
is_dst = 1
|
||||
return secs, is_dst, normalized_tzinfo.tzname(normalized_dt)
|
||||
|
||||
|
||||
class PytzCache:
|
||||
"""
|
||||
Reimplementation of the DateTime._cache class that uses for timezone info
|
||||
"""
|
||||
|
||||
_zlst = pytz.common_timezones + _old_zlst # used by DateTime.TimeZones
|
||||
_zmap = {name.lower(): name for name in pytz.all_timezones}
|
||||
_zmap.update(_old_zmap) # These must take priority
|
||||
_zidx = _zmap.keys()
|
||||
|
||||
def __getitem__(self, key):
|
||||
name = self._zmap.get(key.lower(), key) # fallback to key
|
||||
try:
|
||||
return Timezone(pytz.timezone(name))
|
||||
except pytz.UnknownTimeZoneError:
|
||||
try:
|
||||
return Timezone(_numeric_timezones[name])
|
||||
except KeyError:
|
||||
raise DateTimeError('Unrecognized timezone: %s' % key)
|
||||
@ -0,0 +1,15 @@
|
||||
##############################################################################
|
||||
#
|
||||
# Copyright (c) 2003 Zope Foundation and Contributors.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# This software is subject to the provisions of the Zope Public License,
|
||||
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||
# FOR A PARTICULAR PURPOSE.
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
# This file is needed to make this a package.
|
||||
Binary file not shown.
Binary file not shown.
@ -0,0 +1,57 @@
|
||||
1970-01-01 (1970, 1, 4)
|
||||
1970-01-02 (1970, 1, 5)
|
||||
1970-01-30 (1970, 5, 5)
|
||||
1970-01-31 (1970, 5, 6)
|
||||
1970-02-01 (1970, 5, 7)
|
||||
1970-02-02 (1970, 6, 1)
|
||||
1970-02-28 (1970, 9, 6)
|
||||
1970-03-01 (1970, 9, 7)
|
||||
1970-03-30 (1970, 14, 1)
|
||||
1970-03-31 (1970, 14, 2)
|
||||
1970-04-01 (1970, 14, 3)
|
||||
1970-09-30 (1970, 40, 3)
|
||||
1970-10-01 (1970, 40, 4)
|
||||
1970-10-02 (1970, 40, 5)
|
||||
1970-10-03 (1970, 40, 6)
|
||||
1970-10-04 (1970, 40, 7)
|
||||
1970-10-05 (1970, 41, 1)
|
||||
1971-01-02 (1970, 53, 6)
|
||||
1971-01-03 (1970, 53, 7)
|
||||
1971-01-04 (1971, 1, 1)
|
||||
1971-01-05 (1971, 1, 2)
|
||||
1971-12-31 (1971, 52, 5)
|
||||
1972-01-01 (1971, 52, 6)
|
||||
1972-01-02 (1971, 52, 7)
|
||||
1972-01-03 (1972, 1, 1)
|
||||
1972-01-04 (1972, 1, 2)
|
||||
1972-12-30 (1972, 52, 6)
|
||||
1972-12-31 (1972, 52, 7)
|
||||
1973-01-01 (1973, 1, 1)
|
||||
1973-01-02 (1973, 1, 2)
|
||||
1973-12-29 (1973, 52, 6)
|
||||
1973-12-30 (1973, 52, 7)
|
||||
1973-12-31 (1974, 1, 1)
|
||||
1974-01-01 (1974, 1, 2)
|
||||
1998-12-30 (1998, 53, 3)
|
||||
1998-12-31 (1998, 53, 4)
|
||||
1999-01-01 (1998, 53, 5)
|
||||
1999-01-02 (1998, 53, 6)
|
||||
1999-01-03 (1998, 53, 7)
|
||||
1999-01-04 (1999, 1, 1)
|
||||
1999-01-05 (1999, 1, 2)
|
||||
1999-12-30 (1999, 52, 4)
|
||||
1999-12-31 (1999, 52, 5)
|
||||
2000-01-01 (1999, 52, 6)
|
||||
2000-01-02 (1999, 52, 7)
|
||||
2000-01-03 (2000, 1, 1)
|
||||
2000-01-04 (2000, 1, 2)
|
||||
2000-01-05 (2000, 1, 3)
|
||||
2000-01-06 (2000, 1, 4)
|
||||
2000-01-07 (2000, 1, 5)
|
||||
2000-01-08 (2000, 1, 6)
|
||||
2000-01-09 (2000, 1, 7)
|
||||
2000-01-10 (2000, 2, 1)
|
||||
2019-12-28 (2019, 52, 6)
|
||||
2019-12-29 (2019, 52, 7)
|
||||
2019-12-30 (2020, 1, 1)
|
||||
2019-12-31 (2020, 1, 2)
|
||||
@ -0,0 +1,764 @@
|
||||
##############################################################################
|
||||
#
|
||||
# Copyright (c) 2003 Zope Foundation and Contributors.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# This software is subject to the provisions of the Zope Public License,
|
||||
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
||||
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
||||
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
||||
# FOR A PARTICULAR PURPOSE.
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
import math
|
||||
import os
|
||||
import pickle
|
||||
import platform
|
||||
import sys
|
||||
import time
|
||||
import unittest
|
||||
from datetime import date
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
from datetime import tzinfo
|
||||
|
||||
import pytz
|
||||
|
||||
from DateTime import DateTime
|
||||
from DateTime.DateTime import _findLocalTimeZoneName
|
||||
|
||||
|
||||
try:
|
||||
__file__
|
||||
except NameError: # pragma: no cover
|
||||
f = sys.argv[0]
|
||||
else:
|
||||
f = __file__
|
||||
|
||||
IS_PYPY = getattr(platform, 'python_implementation', lambda: None)() == 'PyPy'
|
||||
|
||||
DATADIR = os.path.dirname(os.path.abspath(f))
|
||||
del f
|
||||
|
||||
ZERO = timedelta(0)
|
||||
|
||||
|
||||
class FixedOffset(tzinfo):
|
||||
"""Fixed offset in minutes east from UTC."""
|
||||
|
||||
def __init__(self, offset, name):
|
||||
self.__offset = timedelta(minutes=offset)
|
||||
self.__name = name
|
||||
|
||||
def utcoffset(self, dt):
|
||||
return self.__offset
|
||||
|
||||
def tzname(self, dt):
|
||||
return self.__name
|
||||
|
||||
def dst(self, dt):
|
||||
return ZERO
|
||||
|
||||
|
||||
class DateTimeTests(unittest.TestCase):
|
||||
|
||||
def _compare(self, dt1, dt2):
|
||||
'''Compares the internal representation of dt1 with
|
||||
the representation in dt2. Allows sub-millisecond variations.
|
||||
Primarily for testing.'''
|
||||
self.assertEqual(round(dt1._t, 3), round(dt2._t, 3))
|
||||
self.assertEqual(round(dt1._d, 9), round(dt2._d, 9))
|
||||
self.assertEqual(round(dt1.time, 9), round(dt2.time, 9))
|
||||
self.assertEqual(dt1.millis(), dt2.millis())
|
||||
self.assertEqual(dt1._micros, dt2._micros)
|
||||
|
||||
def testBug1203(self):
|
||||
# 01:59:60 occurred in old DateTime
|
||||
dt = DateTime(7200, 'GMT')
|
||||
self.assertTrue(str(dt).find('60') < 0, dt)
|
||||
|
||||
def testDSTInEffect(self):
|
||||
# Checks GMT offset for a DST date in the US/Eastern time zone
|
||||
dt = DateTime(2000, 5, 9, 15, 0, 0, 'US/Eastern')
|
||||
self.assertEqual(dt.toZone('GMT').hour(), 19,
|
||||
(dt, dt.toZone('GMT')))
|
||||
|
||||
def testDSTNotInEffect(self):
|
||||
# Checks GMT offset for a non-DST date in the US/Eastern time zone
|
||||
dt = DateTime(2000, 11, 9, 15, 0, 0, 'US/Eastern')
|
||||
self.assertEqual(dt.toZone('GMT').hour(), 20,
|
||||
(dt, dt.toZone('GMT')))
|
||||
|
||||
def testAddPrecision(self):
|
||||
# Precision of serial additions
|
||||
dt = DateTime()
|
||||
self.assertEqual(str(dt + 0.10 + 3.14 + 6.76 - 10), str(dt),
|
||||
dt)
|
||||
# checks problem reported in
|
||||
# https://github.com/zopefoundation/DateTime/issues/41
|
||||
dt = DateTime(2038, 10, 7, 8, 52, 44.959840, "UTC")
|
||||
self.assertEqual(str(dt + 0.10 + 3.14 + 6.76 - 10), str(dt),
|
||||
dt)
|
||||
|
||||
def testConsistentSecondMicroRounding(self):
|
||||
dt = DateTime(2038, 10, 7, 8, 52, 44.9598398, "UTC")
|
||||
self.assertEqual(int(dt.second() * 1000000),
|
||||
dt.micros() % 60000000)
|
||||
|
||||
def testConstructor3(self):
|
||||
# Constructor from date/time string
|
||||
dt = DateTime()
|
||||
dt1s = '%d/%d/%d %d:%d:%f %s' % (
|
||||
dt.year(),
|
||||
dt.month(),
|
||||
dt.day(),
|
||||
dt.hour(),
|
||||
dt.minute(),
|
||||
dt.second(),
|
||||
dt.timezone())
|
||||
dt1 = DateTime(dt1s)
|
||||
# Compare representations as it's the
|
||||
# only way to compare the dates to the same accuracy
|
||||
self.assertEqual(repr(dt), repr(dt1))
|
||||
|
||||
def testConstructor4(self):
|
||||
# Constructor from time float
|
||||
dt = DateTime()
|
||||
dt1 = DateTime(float(dt))
|
||||
self._compare(dt, dt1)
|
||||
|
||||
def testConstructor5(self):
|
||||
# Constructor from time float and timezone
|
||||
dt = DateTime()
|
||||
dt1 = DateTime(float(dt), dt.timezone())
|
||||
self.assertEqual(str(dt), str(dt1), (dt, dt1))
|
||||
dt1 = DateTime(float(dt), str(dt.timezone()))
|
||||
self.assertEqual(str(dt), str(dt1), (dt, dt1))
|
||||
|
||||
def testConstructor6(self):
|
||||
# Constructor from year and julian date
|
||||
# This test must normalize the time zone, or it *will* break when
|
||||
# DST changes!
|
||||
dt1 = DateTime(2000, 5.500000578705)
|
||||
dt = DateTime('2000/1/5 12:00:00.050 pm %s' % dt1.localZone())
|
||||
self._compare(dt, dt1)
|
||||
|
||||
def testConstructor7(self):
|
||||
# Constructor from parts
|
||||
dt = DateTime()
|
||||
dt1 = DateTime(
|
||||
dt.year(),
|
||||
dt.month(),
|
||||
dt.day(),
|
||||
dt.hour(),
|
||||
dt.minute(),
|
||||
dt.second(),
|
||||
dt.timezone())
|
||||
# Compare representations as it's the
|
||||
# only way to compare the dates to the same accuracy
|
||||
self.assertEqual(repr(dt), repr(dt1))
|
||||
|
||||
def testDayOfWeek(self):
|
||||
# Compare to the datetime.date value to make it locale independent
|
||||
expected = date(2000, 6, 16).strftime('%A')
|
||||
# strftime() used to always be passed a day of week of 0
|
||||
dt = DateTime('2000/6/16')
|
||||
s = dt.strftime('%A')
|
||||
self.assertEqual(s, expected, (dt, s))
|
||||
|
||||
def testOldDate(self):
|
||||
# Fails when an 1800 date is displayed with negative signs
|
||||
dt = DateTime('1830/5/6 12:31:46.213 pm')
|
||||
dt1 = dt.toZone('GMT+6')
|
||||
self.assertTrue(str(dt1).find('-') < 0, (dt, dt1))
|
||||
|
||||
def testSubtraction(self):
|
||||
# Reconstruction of a DateTime from its parts, with subtraction
|
||||
# this also tests the accuracy of addition and reconstruction
|
||||
dt = DateTime()
|
||||
dt1 = dt - 3.141592653
|
||||
dt2 = DateTime(
|
||||
dt.year(),
|
||||
dt.month(),
|
||||
dt.day(),
|
||||
dt.hour(),
|
||||
dt.minute(),
|
||||
dt.second())
|
||||
dt3 = dt2 - 3.141592653
|
||||
self.assertEqual(dt1, dt3, (dt, dt1, dt2, dt3))
|
||||
|
||||
def testTZ1add(self):
|
||||
# Time zone manipulation: add to a date
|
||||
dt = DateTime('1997/3/8 1:45am GMT-4')
|
||||
dt1 = DateTime('1997/3/9 1:45pm GMT+8')
|
||||
self.assertTrue((dt + 1.0).equalTo(dt1))
|
||||
|
||||
def testTZ1sub(self):
|
||||
# Time zone manipulation: subtract from a date
|
||||
dt = DateTime('1997/3/8 1:45am GMT-4')
|
||||
dt1 = DateTime('1997/3/9 1:45pm GMT+8')
|
||||
self.assertTrue((dt1 - 1.0).equalTo(dt))
|
||||
|
||||
def testTZ1diff(self):
|
||||
# Time zone manipulation: diff two dates
|
||||
dt = DateTime('1997/3/8 1:45am GMT-4')
|
||||
dt1 = DateTime('1997/3/9 1:45pm GMT+8')
|
||||
self.assertEqual(dt1 - dt, 1.0, (dt, dt1))
|
||||
|
||||
def test_compare_methods(self):
|
||||
# Compare two dates using several methods
|
||||
dt = DateTime('1997/1/1')
|
||||
dt1 = DateTime('1997/2/2')
|
||||
self.assertTrue(dt1.greaterThan(dt))
|
||||
self.assertTrue(dt1.greaterThanEqualTo(dt))
|
||||
self.assertTrue(dt.lessThan(dt1))
|
||||
self.assertTrue(dt.lessThanEqualTo(dt1))
|
||||
self.assertTrue(dt.notEqualTo(dt1))
|
||||
self.assertFalse(dt.equalTo(dt1))
|
||||
# Compare a date to float
|
||||
dt = DateTime(1.0)
|
||||
self.assertTrue(dt == DateTime(1.0)) # testing __eq__
|
||||
self.assertFalse(dt != DateTime(1.0)) # testing __ne__
|
||||
self.assertFalse(dt.greaterThan(1.0))
|
||||
self.assertTrue(dt.greaterThanEqualTo(1.0))
|
||||
self.assertFalse(dt.lessThan(1.0))
|
||||
self.assertTrue(dt.lessThanEqualTo(1.0))
|
||||
self.assertFalse(dt.notEqualTo(1.0))
|
||||
self.assertTrue(dt.equalTo(1.0))
|
||||
# Compare a date to int
|
||||
dt = DateTime(1)
|
||||
self.assertEqual(dt, DateTime(1.0))
|
||||
self.assertTrue(dt == DateTime(1)) # testing __eq__
|
||||
self.assertFalse(dt != DateTime(1)) # testing __ne__
|
||||
self.assertFalse(dt.greaterThan(1))
|
||||
self.assertTrue(dt.greaterThanEqualTo(1))
|
||||
self.assertFalse(dt.lessThan(1))
|
||||
self.assertTrue(dt.lessThanEqualTo(1))
|
||||
self.assertFalse(dt.notEqualTo(1))
|
||||
self.assertTrue(dt.equalTo(1))
|
||||
# Compare a date to string; there is no implicit type conversion
|
||||
# but behavior if consistent as when comparing, for example, an int
|
||||
# and a string.
|
||||
dt = DateTime("2023")
|
||||
self.assertFalse(dt == "2023") # testing __eq__
|
||||
self.assertTrue(dt != "2023") # testing __ne__
|
||||
self.assertRaises(TypeError, dt.greaterThan, "2023")
|
||||
self.assertRaises(TypeError, dt.greaterThanEqualTo, "2023")
|
||||
self.assertRaises(TypeError, dt.lessThan, "2023")
|
||||
self.assertRaises(TypeError, dt.lessThanEqualTo, "2023")
|
||||
self.assertTrue(dt.notEqualTo("2023"))
|
||||
self.assertFalse(dt.equalTo("2023"))
|
||||
|
||||
def test_compare_methods_none(self):
|
||||
# Compare a date to None
|
||||
for dt in (DateTime('1997/1/1'), DateTime(0)):
|
||||
self.assertTrue(dt.greaterThan(None))
|
||||
self.assertTrue(dt.greaterThanEqualTo(None))
|
||||
self.assertFalse(dt.lessThan(None))
|
||||
self.assertFalse(dt.lessThanEqualTo(None))
|
||||
self.assertTrue(dt.notEqualTo(None))
|
||||
self.assertFalse(dt.equalTo(None))
|
||||
|
||||
def test_pickle(self):
|
||||
dt = DateTime()
|
||||
data = pickle.dumps(dt, 1)
|
||||
new = pickle.loads(data)
|
||||
for key in DateTime.__slots__:
|
||||
self.assertEqual(getattr(dt, key), getattr(new, key))
|
||||
|
||||
def test_pickle_with_tz(self):
|
||||
dt = DateTime('2002/5/2 8:00am GMT+8')
|
||||
data = pickle.dumps(dt, 1)
|
||||
new = pickle.loads(data)
|
||||
for key in DateTime.__slots__:
|
||||
self.assertEqual(getattr(dt, key), getattr(new, key))
|
||||
|
||||
def test_pickle_asdatetime_with_tz(self):
|
||||
dt = DateTime('2002/5/2 8:00am GMT+8')
|
||||
data = pickle.dumps(dt.asdatetime(), 1)
|
||||
new = DateTime(pickle.loads(data))
|
||||
for key in DateTime.__slots__:
|
||||
self.assertEqual(getattr(dt, key), getattr(new, key))
|
||||
|
||||
def test_pickle_with_numerical_tz(self):
|
||||
for dt_str in ('2007/01/02 12:34:56.789 +0300',
|
||||
'2007/01/02 12:34:56.789 +0430',
|
||||
'2007/01/02 12:34:56.789 -1234'):
|
||||
dt = DateTime(dt_str)
|
||||
data = pickle.dumps(dt, 1)
|
||||
new = pickle.loads(data)
|
||||
for key in DateTime.__slots__:
|
||||
self.assertEqual(getattr(dt, key), getattr(new, key))
|
||||
|
||||
def test_pickle_with_micros(self):
|
||||
dt = DateTime('2002/5/2 8:00:14.123 GMT+8')
|
||||
data = pickle.dumps(dt, 1)
|
||||
new = pickle.loads(data)
|
||||
for key in DateTime.__slots__:
|
||||
self.assertEqual(getattr(dt, key), getattr(new, key))
|
||||
|
||||
def test_pickle_old(self):
|
||||
dt = DateTime('2002/5/2 8:00am GMT+0')
|
||||
data = (
|
||||
'(cDateTime.DateTime\nDateTime\nq\x01Noq\x02}q\x03(U\x05'
|
||||
'_amonq\x04U\x03Mayq\x05U\x05_adayq\x06U\x03Thuq\x07U\x05_pmonq'
|
||||
'\x08h\x05U\x05_hourq\tK\x08U\x05_fmonq\nh\x05U\x05_pdayq\x0bU'
|
||||
'\x04Thu.q\x0cU\x05_fdayq\rU\x08Thursdayq\x0eU\x03_pmq\x0fU\x02amq'
|
||||
'\x10U\x02_tq\x11GA\xcehy\x00\x00\x00\x00U\x07_minuteq\x12K\x00U'
|
||||
'\x07_microsq\x13L1020326400000000L\nU\x02_dq\x14G@\xe2\x12j\xaa'
|
||||
'\xaa\xaa\xabU\x07_secondq\x15G\x00\x00\x00\x00\x00\x00\x00\x00U'
|
||||
'\x03_tzq\x16U\x05GMT+0q\x17U\x06_monthq\x18K\x05U'
|
||||
'\x0f_timezone_naiveq\x19I00\nU\x04_dayq\x1aK\x02U\x05_yearq'
|
||||
'\x1bM\xd2\x07U\x08_nearsecq\x1cG\x00\x00\x00\x00\x00\x00\x00'
|
||||
'\x00U\x07_pmhourq\x1dK\x08U\n_dayoffsetq\x1eK\x04U\x04timeq'
|
||||
'\x1fG?\xd5UUUV\x00\x00ub.')
|
||||
data = data.encode('latin-1')
|
||||
new = pickle.loads(data)
|
||||
for key in DateTime.__slots__:
|
||||
self.assertEqual(getattr(dt, key), getattr(new, key))
|
||||
|
||||
def test_pickle_old_without_micros(self):
|
||||
dt = DateTime('2002/5/2 8:00am GMT+0')
|
||||
data = (
|
||||
'(cDateTime.DateTime\nDateTime\nq\x01Noq\x02}q\x03(U\x05'
|
||||
'_amonq\x04U\x03Mayq\x05U\x05_adayq\x06U\x03Thuq\x07U\x05_pmonq'
|
||||
'\x08h\x05U\x05_hourq\tK\x08U\x05_fmonq\nh\x05U\x05_pdayq\x0bU'
|
||||
'\x04Thu.q\x0cU\x05_fdayq\rU\x08Thursdayq\x0eU\x03_pmq\x0fU'
|
||||
'\x02amq\x10U\x02_tq\x11GA\xcehy\x00\x00\x00\x00U\x07_minuteq'
|
||||
'\x12K\x00U\x02_dq\x13G@\xe2\x12j\xaa\xaa\xaa\xabU\x07_secondq'
|
||||
'\x14G\x00\x00\x00\x00\x00\x00\x00\x00U\x03_tzq\x15U\x05GMT+0q'
|
||||
'\x16U\x06_monthq\x17K\x05U\x0f_timezone_naiveq\x18I00\nU'
|
||||
'\x04_dayq\x19K\x02U\x05_yearq\x1aM\xd2\x07U\x08_nearsecq'
|
||||
'\x1bG\x00\x00\x00\x00\x00\x00\x00\x00U\x07_pmhourq\x1cK\x08U'
|
||||
'\n_dayoffsetq\x1dK\x04U\x04timeq\x1eG?\xd5UUUV\x00\x00ub.')
|
||||
data = data.encode('latin-1')
|
||||
new = pickle.loads(data)
|
||||
for key in DateTime.__slots__:
|
||||
self.assertEqual(getattr(dt, key), getattr(new, key))
|
||||
|
||||
def test_pickle_dates_after_2038(self):
|
||||
dt = DateTime('2039/09/02 07:07:6.235027 GMT+1')
|
||||
data = pickle.dumps(dt, 1)
|
||||
new = pickle.loads(data)
|
||||
for key in DateTime.__slots__:
|
||||
self.assertEqual(getattr(dt, key), getattr(new, key))
|
||||
|
||||
def test_pickle_old_with_micros_as_float(self):
|
||||
dt = DateTime('2002/5/2 8:00am GMT+0')
|
||||
data = (
|
||||
'ccopy_reg\n_reconstructor\nq\x00(cDateTime.DateTime\nDateTime'
|
||||
'\nq\x01c__builtin__\nobject\nq\x02Ntq\x03Rq\x04(GA\xcehy\x00\x00'
|
||||
'\x00\x00I00\nX\x05\x00\x00\x00GMT+0q\x05tq\x06b.')
|
||||
data = data.encode('latin-1')
|
||||
new = pickle.loads(data)
|
||||
for key in DateTime.__slots__:
|
||||
self.assertEqual(getattr(dt, key), getattr(new, key))
|
||||
|
||||
def testTZ2(self):
|
||||
# Time zone manipulation test 2
|
||||
dt = DateTime()
|
||||
dt1 = dt.toZone('GMT')
|
||||
s = dt.second()
|
||||
s1 = dt1.second()
|
||||
self.assertEqual(s, s1, (dt, dt1, s, s1))
|
||||
|
||||
def testTZDiffDaylight(self):
|
||||
# Diff dates across daylight savings dates
|
||||
dt = DateTime('2000/6/8 1:45am US/Eastern')
|
||||
dt1 = DateTime('2000/12/8 12:45am US/Eastern')
|
||||
self.assertEqual(dt1 - dt, 183, (dt, dt1, dt1 - dt))
|
||||
|
||||
def testY10KDate(self):
|
||||
# Comparison of a Y10K date and a Y2K date
|
||||
dt = DateTime('10213/09/21')
|
||||
dt1 = DateTime(2000, 1, 1)
|
||||
|
||||
dsec = (dt.millis() - dt1.millis()) / 1000.0
|
||||
ddays = math.floor((dsec / 86400.0) + 0.5)
|
||||
|
||||
self.assertEqual(ddays, 3000000, ddays)
|
||||
|
||||
def test_tzoffset(self):
|
||||
# Test time-zone given as an offset
|
||||
|
||||
# GMT
|
||||
dt = DateTime('Tue, 10 Sep 2001 09:41:03 GMT')
|
||||
self.assertEqual(dt.tzoffset(), 0)
|
||||
|
||||
# Timezone by name, a timezone that hasn't got daylightsaving.
|
||||
dt = DateTime('Tue, 2 Mar 2001 09:41:03 GMT+3')
|
||||
self.assertEqual(dt.tzoffset(), 10800)
|
||||
|
||||
# Timezone by name, has daylightsaving but is not in effect.
|
||||
dt = DateTime('Tue, 21 Jan 2001 09:41:03 PST')
|
||||
self.assertEqual(dt.tzoffset(), -28800)
|
||||
|
||||
# Timezone by name, with daylightsaving in effect
|
||||
dt = DateTime('Tue, 24 Aug 2001 09:41:03 PST')
|
||||
self.assertEqual(dt.tzoffset(), -25200)
|
||||
|
||||
# A negative numerical timezone
|
||||
dt = DateTime('Tue, 24 Jul 2001 09:41:03 -0400')
|
||||
self.assertEqual(dt.tzoffset(), -14400)
|
||||
|
||||
# A positive numerical timzone
|
||||
dt = DateTime('Tue, 6 Dec 1966 01:41:03 +0200')
|
||||
self.assertEqual(dt.tzoffset(), 7200)
|
||||
|
||||
# A negative numerical timezone with minutes.
|
||||
dt = DateTime('Tue, 24 Jul 2001 09:41:03 -0637')
|
||||
self.assertEqual(dt.tzoffset(), -23820)
|
||||
|
||||
# A positive numerical timezone with minutes.
|
||||
dt = DateTime('Tue, 24 Jul 2001 09:41:03 +0425')
|
||||
self.assertEqual(dt.tzoffset(), 15900)
|
||||
|
||||
def testISO8601(self):
|
||||
# ISO8601 reference dates
|
||||
ref0 = DateTime('2002/5/2 8:00am GMT')
|
||||
ref1 = DateTime('2002/5/2 8:00am US/Eastern')
|
||||
ref2 = DateTime('2006/11/6 10:30 GMT')
|
||||
ref3 = DateTime('2004/06/14 14:30:15 GMT-3')
|
||||
ref4 = DateTime('2006/01/01 GMT')
|
||||
|
||||
# Basic tests
|
||||
# Though this is timezone naive and according to specification should
|
||||
# be interpreted in the local timezone, to preserve backwards
|
||||
# compatibility with previously expected behaviour.
|
||||
isoDt = DateTime('2002-05-02T08:00:00')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('2002-05-02T08:00:00Z')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('2002-05-02T08:00:00+00:00')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('2002-05-02T08:00:00-04:00')
|
||||
self.assertTrue(ref1.equalTo(isoDt))
|
||||
isoDt = DateTime('2002-05-02 08:00:00-04:00')
|
||||
self.assertTrue(ref1.equalTo(isoDt))
|
||||
|
||||
# Bug 1386: the colon in the timezone offset is optional
|
||||
isoDt = DateTime('2002-05-02T08:00:00-0400')
|
||||
self.assertTrue(ref1.equalTo(isoDt))
|
||||
|
||||
# Bug 2191: date reduced formats
|
||||
isoDt = DateTime('2006-01-01')
|
||||
self.assertTrue(ref4.equalTo(isoDt))
|
||||
isoDt = DateTime('200601-01')
|
||||
self.assertTrue(ref4.equalTo(isoDt))
|
||||
isoDt = DateTime('20060101')
|
||||
self.assertTrue(ref4.equalTo(isoDt))
|
||||
isoDt = DateTime('2006-01')
|
||||
self.assertTrue(ref4.equalTo(isoDt))
|
||||
isoDt = DateTime('200601')
|
||||
self.assertTrue(ref4.equalTo(isoDt))
|
||||
isoDt = DateTime('2006')
|
||||
self.assertTrue(ref4.equalTo(isoDt))
|
||||
|
||||
# Bug 2191: date/time separators are also optional
|
||||
isoDt = DateTime('20020502T08:00:00')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('2002-05-02T080000')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('20020502T080000')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
|
||||
# Bug 2191: timezones with only one digit for hour
|
||||
isoDt = DateTime('20020502T080000+0')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('20020502 080000-4')
|
||||
self.assertTrue(ref1.equalTo(isoDt))
|
||||
isoDt = DateTime('20020502T080000-400')
|
||||
self.assertTrue(ref1.equalTo(isoDt))
|
||||
isoDt = DateTime('20020502T080000-4:00')
|
||||
self.assertTrue(ref1.equalTo(isoDt))
|
||||
|
||||
# Bug 2191: optional seconds/minutes
|
||||
isoDt = DateTime('2002-05-02T0800')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('2002-05-02T08')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
|
||||
# Bug 2191: week format
|
||||
isoDt = DateTime('2002-W18-4T0800')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('2002-W184T0800')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('2002W18-4T0800')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('2002W184T08')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('2004-W25-1T14:30:15-03:00')
|
||||
self.assertTrue(ref3.equalTo(isoDt))
|
||||
isoDt = DateTime('2004-W25T14:30:15-03:00')
|
||||
self.assertTrue(ref3.equalTo(isoDt))
|
||||
|
||||
# Bug 2191: day of year format
|
||||
isoDt = DateTime('2002-122T0800')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
isoDt = DateTime('2002122T0800')
|
||||
self.assertTrue(ref0.equalTo(isoDt))
|
||||
|
||||
# Bug 2191: hours/minutes fractions
|
||||
isoDt = DateTime('2006-11-06T10.5')
|
||||
self.assertTrue(ref2.equalTo(isoDt))
|
||||
isoDt = DateTime('2006-11-06T10,5')
|
||||
self.assertTrue(ref2.equalTo(isoDt))
|
||||
isoDt = DateTime('20040614T1430.25-3')
|
||||
self.assertTrue(ref3.equalTo(isoDt))
|
||||
isoDt = DateTime('2004-06-14T1430,25-3')
|
||||
self.assertTrue(ref3.equalTo(isoDt))
|
||||
isoDt = DateTime('2004-06-14T14:30.25-3')
|
||||
self.assertTrue(ref3.equalTo(isoDt))
|
||||
isoDt = DateTime('20040614T14:30,25-3')
|
||||
self.assertTrue(ref3.equalTo(isoDt))
|
||||
|
||||
# ISO8601 standard format
|
||||
iso8601_string = '2002-05-02T08:00:00-04:00'
|
||||
iso8601DT = DateTime(iso8601_string)
|
||||
self.assertEqual(iso8601_string, iso8601DT.ISO8601())
|
||||
|
||||
# ISO format with no timezone
|
||||
isoDt = DateTime('2006-01-01 00:00:00')
|
||||
self.assertTrue(ref4.equalTo(isoDt))
|
||||
|
||||
def testJulianWeek(self):
|
||||
# Check JulianDayWeek function
|
||||
fn = os.path.join(DATADIR, 'julian_testdata.txt')
|
||||
with open(fn) as fd:
|
||||
lines = fd.readlines()
|
||||
for line in lines:
|
||||
d = DateTime(line[:10])
|
||||
result_from_mx = tuple(map(int, line[12:-2].split(',')))
|
||||
self.assertEqual(result_from_mx[1], d.week())
|
||||
|
||||
def testCopyConstructor(self):
|
||||
d = DateTime('2004/04/04')
|
||||
self.assertEqual(DateTime(d), d)
|
||||
self.assertEqual(str(DateTime(d)), str(d))
|
||||
d2 = DateTime('1999/04/12 01:00:00')
|
||||
self.assertEqual(DateTime(d2), d2)
|
||||
self.assertEqual(str(DateTime(d2)), str(d2))
|
||||
|
||||
def testCopyConstructorPreservesTimezone(self):
|
||||
# test for https://bugs.launchpad.net/zope2/+bug/200007
|
||||
# This always worked in the local timezone, so we need at least
|
||||
# two tests with different zones to be sure at least one of them
|
||||
# is not local.
|
||||
d = DateTime('2004/04/04')
|
||||
self.assertEqual(DateTime(d).timezone(), d.timezone())
|
||||
d2 = DateTime('2008/04/25 12:00:00 EST')
|
||||
self.assertEqual(DateTime(d2).timezone(), d2.timezone())
|
||||
self.assertEqual(str(DateTime(d2)), str(d2))
|
||||
d3 = DateTime('2008/04/25 12:00:00 PST')
|
||||
self.assertEqual(DateTime(d3).timezone(), d3.timezone())
|
||||
self.assertEqual(str(DateTime(d3)), str(d3))
|
||||
|
||||
def testRFC822(self):
|
||||
# rfc822 conversion
|
||||
dt = DateTime('2002-05-02T08:00:00+00:00')
|
||||
self.assertEqual(dt.rfc822(), 'Thu, 02 May 2002 08:00:00 +0000')
|
||||
|
||||
dt = DateTime('2002-05-02T08:00:00+02:00')
|
||||
self.assertEqual(dt.rfc822(), 'Thu, 02 May 2002 08:00:00 +0200')
|
||||
|
||||
dt = DateTime('2002-05-02T08:00:00-02:00')
|
||||
self.assertEqual(dt.rfc822(), 'Thu, 02 May 2002 08:00:00 -0200')
|
||||
|
||||
# Checking that conversion from local time is working.
|
||||
dt = DateTime()
|
||||
dts = dt.rfc822().split(' ')
|
||||
times = dts[4].split(':')
|
||||
_isDST = time.localtime(time.time())[8]
|
||||
if _isDST:
|
||||
offset = time.altzone
|
||||
else:
|
||||
offset = time.timezone
|
||||
self.assertEqual(dts[0], dt.aDay() + ',')
|
||||
self.assertEqual(int(dts[1]), dt.day())
|
||||
self.assertEqual(dts[2], dt.aMonth())
|
||||
self.assertEqual(int(dts[3]), dt.year())
|
||||
self.assertEqual(int(times[0]), dt.h_24())
|
||||
self.assertEqual(int(times[1]), dt.minute())
|
||||
self.assertEqual(int(times[2]), int(dt.second()))
|
||||
self.assertEqual(dts[5], "%+03d%02d" % divmod((-offset / 60), 60))
|
||||
|
||||
def testInternationalDateformat(self):
|
||||
for year in (1990, 2001, 2020):
|
||||
for month in (1, 12):
|
||||
for day in (1, 12, 28, 31):
|
||||
try:
|
||||
d_us = DateTime("%d/%d/%d" % (year, month, day))
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
d_int = DateTime("%d.%d.%d" % (day, month, year),
|
||||
datefmt="international")
|
||||
self.assertEqual(d_us, d_int)
|
||||
|
||||
d_int = DateTime("%d/%d/%d" % (day, month, year),
|
||||
datefmt="international")
|
||||
self.assertEqual(d_us, d_int)
|
||||
|
||||
def test_intl_format_hyphen(self):
|
||||
d_jan = DateTime('2011-01-11 GMT')
|
||||
d_nov = DateTime('2011-11-01 GMT')
|
||||
d_us = DateTime('11-01-2011 GMT')
|
||||
d_int = DateTime('11-01-2011 GMT', datefmt="international")
|
||||
self.assertNotEqual(d_us, d_int)
|
||||
self.assertEqual(d_us, d_nov)
|
||||
self.assertEqual(d_int, d_jan)
|
||||
|
||||
def test_calcTimezoneName(self):
|
||||
from DateTime.interfaces import TimeError
|
||||
timezone_dependent_epoch = 2177452800
|
||||
try:
|
||||
DateTime()._calcTimezoneName(timezone_dependent_epoch, 0)
|
||||
except TimeError:
|
||||
self.fail('Zope Collector issue #484 (negative time bug): '
|
||||
'TimeError raised')
|
||||
|
||||
def testStrftimeTZhandling(self):
|
||||
# strftime timezone testing
|
||||
# This is a test for collector issue #1127
|
||||
format = '%Y-%m-%d %H:%M %Z'
|
||||
dt = DateTime('Wed, 19 Nov 2003 18:32:07 -0215')
|
||||
dt_string = dt.strftime(format)
|
||||
dt_local = dt.toZone(_findLocalTimeZoneName(0))
|
||||
dt_localstring = dt_local.strftime(format)
|
||||
self.assertEqual(dt_string, dt_localstring)
|
||||
|
||||
def testStrftimeFarDates(self):
|
||||
# Checks strftime in dates <= 1900 or >= 2038
|
||||
dt = DateTime('1900/01/30')
|
||||
self.assertEqual(dt.strftime('%d/%m/%Y'), '30/01/1900')
|
||||
dt = DateTime('2040/01/30')
|
||||
self.assertEqual(dt.strftime('%d/%m/%Y'), '30/01/2040')
|
||||
|
||||
def testZoneInFarDates(self):
|
||||
# Checks time zone in dates <= 1900 or >= 2038
|
||||
dt1 = DateTime('2040/01/30 14:33 GMT+1')
|
||||
dt2 = DateTime('2040/01/30 11:33 GMT-2')
|
||||
self.assertEqual(dt1.strftime('%d/%m/%Y %H:%M'),
|
||||
dt2.strftime('%d/%m/%Y %H:%M'))
|
||||
|
||||
@unittest.skipIf(
|
||||
IS_PYPY,
|
||||
"Using Non-Ascii characters for strftime doesn't work in PyPy"
|
||||
"https://bitbucket.org/pypy/pypy/issues/2161/pypy3-strftime-does-not-accept-unicode" # noqa: E501 line too long
|
||||
)
|
||||
def testStrftimeStr(self):
|
||||
dt = DateTime('2002-05-02T08:00:00+00:00')
|
||||
uchar = b'\xc3\xa0'.decode('utf-8')
|
||||
ok = dt.strftime('Le %d/%m/%Y a %Hh%M').replace('a', uchar)
|
||||
ustr = b'Le %d/%m/%Y \xc3\xa0 %Hh%M'.decode('utf-8')
|
||||
self.assertEqual(dt.strftime(ustr), ok)
|
||||
|
||||
def testTimezoneNaiveHandling(self):
|
||||
# checks that we assign timezone naivity correctly
|
||||
dt = DateTime('2007-10-04T08:00:00+00:00')
|
||||
self.assertFalse(dt.timezoneNaive(),
|
||||
'error with naivity handling in __parse_iso8601')
|
||||
dt = DateTime('2007-10-04T08:00:00Z')
|
||||
self.assertFalse(dt.timezoneNaive(),
|
||||
'error with naivity handling in __parse_iso8601')
|
||||
dt = DateTime('2007-10-04T08:00:00')
|
||||
self.assertTrue(dt.timezoneNaive(),
|
||||
'error with naivity handling in __parse_iso8601')
|
||||
dt = DateTime('2007/10/04 15:12:33.487618 GMT+1')
|
||||
self.assertFalse(dt.timezoneNaive(),
|
||||
'error with naivity handling in _parse')
|
||||
dt = DateTime('2007/10/04 15:12:33.487618')
|
||||
self.assertTrue(dt.timezoneNaive(),
|
||||
'error with naivity handling in _parse')
|
||||
dt = DateTime()
|
||||
self.assertFalse(dt.timezoneNaive(),
|
||||
'error with naivity for current time')
|
||||
s = '2007-10-04T08:00:00'
|
||||
dt = DateTime(s)
|
||||
self.assertEqual(s, dt.ISO8601())
|
||||
s = '2007-10-04T08:00:00+00:00'
|
||||
dt = DateTime(s)
|
||||
self.assertEqual(s, dt.ISO8601())
|
||||
|
||||
def testConversions(self):
|
||||
sdt0 = datetime.now() # this is a timezone naive datetime
|
||||
dt0 = DateTime(sdt0)
|
||||
self.assertTrue(dt0.timezoneNaive(), (sdt0, dt0))
|
||||
sdt1 = datetime(2007, 10, 4, 18, 14, 42, 580, pytz.utc)
|
||||
dt1 = DateTime(sdt1)
|
||||
self.assertFalse(dt1.timezoneNaive(), (sdt1, dt1))
|
||||
|
||||
# convert back
|
||||
sdt2 = dt0.asdatetime()
|
||||
self.assertEqual(sdt0, sdt2)
|
||||
sdt3 = dt1.utcdatetime() # this returns a timezone naive datetime
|
||||
self.assertEqual(sdt1.hour, sdt3.hour)
|
||||
|
||||
dt4 = DateTime('2007-10-04T10:00:00+05:00')
|
||||
sdt4 = datetime(2007, 10, 4, 5, 0)
|
||||
self.assertEqual(dt4.utcdatetime(), sdt4)
|
||||
self.assertEqual(dt4.asdatetime(), sdt4.replace(tzinfo=pytz.utc))
|
||||
|
||||
dt5 = DateTime('2007-10-23 10:00:00 US/Eastern')
|
||||
tz = pytz.timezone('US/Eastern')
|
||||
sdt5 = datetime(2007, 10, 23, 10, 0, tzinfo=tz)
|
||||
dt6 = DateTime(sdt5)
|
||||
self.assertEqual(dt5.asdatetime(), sdt5)
|
||||
self.assertEqual(dt6.asdatetime(), sdt5)
|
||||
self.assertEqual(dt5, dt6)
|
||||
self.assertEqual(dt5.asdatetime().tzinfo, tz)
|
||||
self.assertEqual(dt6.asdatetime().tzinfo, tz)
|
||||
|
||||
def testBasicTZ(self):
|
||||
# psycopg2 supplies it's own tzinfo instances, with no `zone` attribute
|
||||
tz = FixedOffset(60, 'GMT+1')
|
||||
dt1 = datetime(2008, 8, 5, 12, 0, tzinfo=tz)
|
||||
DT = DateTime(dt1)
|
||||
dt2 = DT.asdatetime()
|
||||
offset1 = dt1.tzinfo.utcoffset(dt1)
|
||||
offset2 = dt2.tzinfo.utcoffset(dt2)
|
||||
self.assertEqual(offset1, offset2)
|
||||
|
||||
def testEDTTimezone(self):
|
||||
# should be able to parse EDT timezones: see lp:599856.
|
||||
dt = DateTime("Mon, 28 Jun 2010 10:12:25 EDT")
|
||||
self.assertEqual(dt.Day(), 'Monday')
|
||||
self.assertEqual(dt.day(), 28)
|
||||
self.assertEqual(dt.Month(), 'June')
|
||||
self.assertEqual(dt.timezone(), 'GMT-4')
|
||||
|
||||
def testParseISO8601(self):
|
||||
parsed = DateTime()._parse_iso8601('2010-10-10')
|
||||
self.assertEqual(parsed, (2010, 10, 10, 0, 0, 0, 'GMT+0000'))
|
||||
|
||||
def test_interface(self):
|
||||
from DateTime.interfaces import IDateTime
|
||||
self.assertTrue(IDateTime.providedBy(DateTime()))
|
||||
|
||||
def test_security(self):
|
||||
dt = DateTime()
|
||||
self.assertEqual(dt.__roles__, None)
|
||||
self.assertEqual(dt.__allow_access_to_unprotected_subobjects__, 1)
|
||||
|
||||
def test_format(self):
|
||||
dt = DateTime(1968, 3, 10, 23, 45, 0, 'Europe/Vienna')
|
||||
fmt = '%d.%m.%Y %H:%M'
|
||||
result = dt.strftime(fmt)
|
||||
unformatted_result = '1968/03/10 23:45:00 Europe/Vienna'
|
||||
self.assertEqual(result, f'{dt:%d.%m.%Y %H:%M}')
|
||||
self.assertEqual(unformatted_result, f'{dt}')
|
||||
self.assertEqual(unformatted_result, f'{dt}')
|
||||
self.assertEqual(result, f'{dt:{fmt}}')
|
||||
self.assertEqual(unformatted_result, f'{dt:}')
|
||||
self.assertEqual(unformatted_result, f'{dt}')
|
||||
|
||||
|
||||
def test_suite():
|
||||
import doctest
|
||||
return unittest.TestSuite([
|
||||
unittest.defaultTestLoader.loadTestsFromTestCase(DateTimeTests),
|
||||
doctest.DocFileSuite('DateTime.txt', package='DateTime'),
|
||||
doctest.DocFileSuite('pytz.txt', package='DateTime'),
|
||||
])
|
||||
239
llmlab/lib/python3.12/site-packages/_distutils_hack/__init__.py
Normal file
239
llmlab/lib/python3.12/site-packages/_distutils_hack/__init__.py
Normal file
@ -0,0 +1,239 @@
|
||||
# don't import any costly modules
|
||||
import os
|
||||
import sys
|
||||
|
||||
report_url = (
|
||||
"https://github.com/pypa/setuptools/issues/new?template=distutils-deprecation.yml"
|
||||
)
|
||||
|
||||
|
||||
def warn_distutils_present():
|
||||
if 'distutils' not in sys.modules:
|
||||
return
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"Distutils was imported before Setuptools, but importing Setuptools "
|
||||
"also replaces the `distutils` module in `sys.modules`. This may lead "
|
||||
"to undesirable behaviors or errors. To avoid these issues, avoid "
|
||||
"using distutils directly, ensure that setuptools is installed in the "
|
||||
"traditional way (e.g. not an editable install), and/or make sure "
|
||||
"that setuptools is always imported before distutils."
|
||||
)
|
||||
|
||||
|
||||
def clear_distutils():
|
||||
if 'distutils' not in sys.modules:
|
||||
return
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"Setuptools is replacing distutils. Support for replacing "
|
||||
"an already imported distutils is deprecated. In the future, "
|
||||
"this condition will fail. "
|
||||
f"Register concerns at {report_url}"
|
||||
)
|
||||
mods = [
|
||||
name
|
||||
for name in sys.modules
|
||||
if name == "distutils" or name.startswith("distutils.")
|
||||
]
|
||||
for name in mods:
|
||||
del sys.modules[name]
|
||||
|
||||
|
||||
def enabled():
|
||||
"""
|
||||
Allow selection of distutils by environment variable.
|
||||
"""
|
||||
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
|
||||
if which == 'stdlib':
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"Reliance on distutils from stdlib is deprecated. Users "
|
||||
"must rely on setuptools to provide the distutils module. "
|
||||
"Avoid importing distutils or import setuptools first, "
|
||||
"and avoid setting SETUPTOOLS_USE_DISTUTILS=stdlib. "
|
||||
f"Register concerns at {report_url}"
|
||||
)
|
||||
return which == 'local'
|
||||
|
||||
|
||||
def ensure_local_distutils():
|
||||
import importlib
|
||||
|
||||
clear_distutils()
|
||||
|
||||
# With the DistutilsMetaFinder in place,
|
||||
# perform an import to cause distutils to be
|
||||
# loaded from setuptools._distutils. Ref #2906.
|
||||
with shim():
|
||||
importlib.import_module('distutils')
|
||||
|
||||
# check that submodules load as expected
|
||||
core = importlib.import_module('distutils.core')
|
||||
assert '_distutils' in core.__file__, core.__file__
|
||||
assert 'setuptools._distutils.log' not in sys.modules
|
||||
|
||||
|
||||
def do_override():
|
||||
"""
|
||||
Ensure that the local copy of distutils is preferred over stdlib.
|
||||
|
||||
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
|
||||
for more motivation.
|
||||
"""
|
||||
if enabled():
|
||||
warn_distutils_present()
|
||||
ensure_local_distutils()
|
||||
|
||||
|
||||
class _TrivialRe:
|
||||
def __init__(self, *patterns) -> None:
|
||||
self._patterns = patterns
|
||||
|
||||
def match(self, string):
|
||||
return all(pat in string for pat in self._patterns)
|
||||
|
||||
|
||||
class DistutilsMetaFinder:
|
||||
def find_spec(self, fullname, path, target=None):
|
||||
# optimization: only consider top level modules and those
|
||||
# found in the CPython test suite.
|
||||
if path is not None and not fullname.startswith('test.'):
|
||||
return None
|
||||
|
||||
method_name = 'spec_for_{fullname}'.format(**locals())
|
||||
method = getattr(self, method_name, lambda: None)
|
||||
return method()
|
||||
|
||||
def spec_for_distutils(self):
|
||||
if self.is_cpython():
|
||||
return None
|
||||
|
||||
import importlib
|
||||
import importlib.abc
|
||||
import importlib.util
|
||||
|
||||
try:
|
||||
mod = importlib.import_module('setuptools._distutils')
|
||||
except Exception:
|
||||
# There are a couple of cases where setuptools._distutils
|
||||
# may not be present:
|
||||
# - An older Setuptools without a local distutils is
|
||||
# taking precedence. Ref #2957.
|
||||
# - Path manipulation during sitecustomize removes
|
||||
# setuptools from the path but only after the hook
|
||||
# has been loaded. Ref #2980.
|
||||
# In either case, fall back to stdlib behavior.
|
||||
return None
|
||||
|
||||
class DistutilsLoader(importlib.abc.Loader):
|
||||
def create_module(self, spec):
|
||||
mod.__name__ = 'distutils'
|
||||
return mod
|
||||
|
||||
def exec_module(self, module):
|
||||
pass
|
||||
|
||||
return importlib.util.spec_from_loader(
|
||||
'distutils', DistutilsLoader(), origin=mod.__file__
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def is_cpython():
|
||||
"""
|
||||
Suppress supplying distutils for CPython (build and tests).
|
||||
Ref #2965 and #3007.
|
||||
"""
|
||||
return os.path.isfile('pybuilddir.txt')
|
||||
|
||||
def spec_for_pip(self):
|
||||
"""
|
||||
Ensure stdlib distutils when running under pip.
|
||||
See pypa/pip#8761 for rationale.
|
||||
"""
|
||||
if sys.version_info >= (3, 12) or self.pip_imported_during_build():
|
||||
return
|
||||
clear_distutils()
|
||||
self.spec_for_distutils = lambda: None
|
||||
|
||||
@classmethod
|
||||
def pip_imported_during_build(cls):
|
||||
"""
|
||||
Detect if pip is being imported in a build script. Ref #2355.
|
||||
"""
|
||||
import traceback
|
||||
|
||||
return any(
|
||||
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def frame_file_is_setup(frame):
|
||||
"""
|
||||
Return True if the indicated frame suggests a setup.py file.
|
||||
"""
|
||||
# some frames may not have __file__ (#2940)
|
||||
return frame.f_globals.get('__file__', '').endswith('setup.py')
|
||||
|
||||
def spec_for_sensitive_tests(self):
|
||||
"""
|
||||
Ensure stdlib distutils when running select tests under CPython.
|
||||
|
||||
python/cpython#91169
|
||||
"""
|
||||
clear_distutils()
|
||||
self.spec_for_distutils = lambda: None
|
||||
|
||||
sensitive_tests = (
|
||||
[
|
||||
'test.test_distutils',
|
||||
'test.test_peg_generator',
|
||||
'test.test_importlib',
|
||||
]
|
||||
if sys.version_info < (3, 10)
|
||||
else [
|
||||
'test.test_distutils',
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
for name in DistutilsMetaFinder.sensitive_tests:
|
||||
setattr(
|
||||
DistutilsMetaFinder,
|
||||
f'spec_for_{name}',
|
||||
DistutilsMetaFinder.spec_for_sensitive_tests,
|
||||
)
|
||||
|
||||
|
||||
DISTUTILS_FINDER = DistutilsMetaFinder()
|
||||
|
||||
|
||||
def add_shim():
|
||||
DISTUTILS_FINDER in sys.meta_path or insert_shim()
|
||||
|
||||
|
||||
class shim:
|
||||
def __enter__(self) -> None:
|
||||
insert_shim()
|
||||
|
||||
def __exit__(self, exc: object, value: object, tb: object) -> None:
|
||||
_remove_shim()
|
||||
|
||||
|
||||
def insert_shim():
|
||||
sys.meta_path.insert(0, DISTUTILS_FINDER)
|
||||
|
||||
|
||||
def _remove_shim():
|
||||
try:
|
||||
sys.meta_path.remove(DISTUTILS_FINDER)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
if sys.version_info < (3, 12):
|
||||
# DistutilsMetaFinder can only be disabled in Python < 3.12 (PEP 632)
|
||||
remove_shim = _remove_shim
|
||||
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@
|
||||
__import__('_distutils_hack').do_override()
|
||||
@ -0,0 +1 @@
|
||||
pip
|
||||
@ -0,0 +1,20 @@
|
||||
This package contains a modified version of ca-bundle.crt:
|
||||
|
||||
ca-bundle.crt -- Bundle of CA Root Certificates
|
||||
|
||||
This is a bundle of X.509 certificates of public Certificate Authorities
|
||||
(CA). These were automatically extracted from Mozilla's root certificates
|
||||
file (certdata.txt). This file can be found in the mozilla source tree:
|
||||
https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt
|
||||
It contains the certificates in PEM format and therefore
|
||||
can be directly used with curl / libcurl / php_curl, or with
|
||||
an Apache+mod_ssl webserver for SSL client authentication.
|
||||
Just configure this file as the SSLCACertificateFile.#
|
||||
|
||||
***** BEGIN LICENSE BLOCK *****
|
||||
This Source Code Form is subject to the terms of the Mozilla Public License,
|
||||
v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
|
||||
one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
***** END LICENSE BLOCK *****
|
||||
@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
|
||||
@ -0,0 +1,77 @@
|
||||
Metadata-Version: 2.2
|
||||
Name: certifi
|
||||
Version: 2025.1.31
|
||||
Summary: Python package for providing Mozilla's CA Bundle.
|
||||
Home-page: https://github.com/certifi/python-certifi
|
||||
Author: Kenneth Reitz
|
||||
Author-email: me@kennethreitz.com
|
||||
License: MPL-2.0
|
||||
Project-URL: Source, https://github.com/certifi/python-certifi
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
|
||||
Classifier: Natural Language :: English
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Programming Language :: Python :: 3.13
|
||||
Requires-Python: >=3.6
|
||||
License-File: LICENSE
|
||||
Dynamic: author
|
||||
Dynamic: author-email
|
||||
Dynamic: classifier
|
||||
Dynamic: description
|
||||
Dynamic: home-page
|
||||
Dynamic: license
|
||||
Dynamic: project-url
|
||||
Dynamic: requires-python
|
||||
Dynamic: summary
|
||||
|
||||
Certifi: Python SSL Certificates
|
||||
================================
|
||||
|
||||
Certifi provides Mozilla's carefully curated collection of Root Certificates for
|
||||
validating the trustworthiness of SSL certificates while verifying the identity
|
||||
of TLS hosts. It has been extracted from the `Requests`_ project.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
``certifi`` is available on PyPI. Simply install it with ``pip``::
|
||||
|
||||
$ pip install certifi
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
To reference the installed certificate authority (CA) bundle, you can use the
|
||||
built-in function::
|
||||
|
||||
>>> import certifi
|
||||
|
||||
>>> certifi.where()
|
||||
'/usr/local/lib/python3.7/site-packages/certifi/cacert.pem'
|
||||
|
||||
Or from the command line::
|
||||
|
||||
$ python -m certifi
|
||||
/usr/local/lib/python3.7/site-packages/certifi/cacert.pem
|
||||
|
||||
Enjoy!
|
||||
|
||||
.. _`Requests`: https://requests.readthedocs.io/en/master/
|
||||
|
||||
Addition/Removal of Certificates
|
||||
--------------------------------
|
||||
|
||||
Certifi does not support any addition/removal or other modification of the
|
||||
CA trust store content. This project is intended to provide a reliable and
|
||||
highly portable root of trust to python deployments. Look to upstream projects
|
||||
for methods to use alternate trust.
|
||||
@ -0,0 +1,14 @@
|
||||
certifi-2025.1.31.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
certifi-2025.1.31.dist-info/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989
|
||||
certifi-2025.1.31.dist-info/METADATA,sha256=t5kcT5aGu0dQ6_psUNZYTqnC0uCRnponewm3uYjeHbg,2451
|
||||
certifi-2025.1.31.dist-info/RECORD,,
|
||||
certifi-2025.1.31.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
||||
certifi-2025.1.31.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
|
||||
certifi/__init__.py,sha256=neIaAf7BM36ygmQCmy-ZsSyjnvjWghFeu13wwEAnjj0,94
|
||||
certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
|
||||
certifi/__pycache__/__init__.cpython-312.pyc,,
|
||||
certifi/__pycache__/__main__.cpython-312.pyc,,
|
||||
certifi/__pycache__/core.cpython-312.pyc,,
|
||||
certifi/cacert.pem,sha256=xVsh-Qf3-G1IrdCTVS-1ZRdJ_1-GBQjMu0I9bB-9gMc,297255
|
||||
certifi/core.py,sha256=qRDDFyXVJwTB_EmoGppaXU_R9qCZvhl-EzxPMuV3nTA,4426
|
||||
certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: setuptools (75.8.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
||||
@ -0,0 +1 @@
|
||||
certifi
|
||||
4
llmlab/lib/python3.12/site-packages/certifi/__init__.py
Normal file
4
llmlab/lib/python3.12/site-packages/certifi/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
from .core import contents, where
|
||||
|
||||
__all__ = ["contents", "where"]
|
||||
__version__ = "2025.01.31"
|
||||
12
llmlab/lib/python3.12/site-packages/certifi/__main__.py
Normal file
12
llmlab/lib/python3.12/site-packages/certifi/__main__.py
Normal file
@ -0,0 +1,12 @@
|
||||
import argparse
|
||||
|
||||
from certifi import contents, where
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-c", "--contents", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.contents:
|
||||
print(contents())
|
||||
else:
|
||||
print(where())
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
4897
llmlab/lib/python3.12/site-packages/certifi/cacert.pem
Normal file
4897
llmlab/lib/python3.12/site-packages/certifi/cacert.pem
Normal file
File diff suppressed because it is too large
Load Diff
114
llmlab/lib/python3.12/site-packages/certifi/core.py
Normal file
114
llmlab/lib/python3.12/site-packages/certifi/core.py
Normal file
@ -0,0 +1,114 @@
|
||||
"""
|
||||
certifi.py
|
||||
~~~~~~~~~~
|
||||
|
||||
This module returns the installation location of cacert.pem or its contents.
|
||||
"""
|
||||
import sys
|
||||
import atexit
|
||||
|
||||
def exit_cacert_ctx() -> None:
|
||||
_CACERT_CTX.__exit__(None, None, None) # type: ignore[union-attr]
|
||||
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
|
||||
from importlib.resources import as_file, files
|
||||
|
||||
_CACERT_CTX = None
|
||||
_CACERT_PATH = None
|
||||
|
||||
def where() -> str:
|
||||
# This is slightly terrible, but we want to delay extracting the file
|
||||
# in cases where we're inside of a zipimport situation until someone
|
||||
# actually calls where(), but we don't want to re-extract the file
|
||||
# on every call of where(), so we'll do it once then store it in a
|
||||
# global variable.
|
||||
global _CACERT_CTX
|
||||
global _CACERT_PATH
|
||||
if _CACERT_PATH is None:
|
||||
# This is slightly janky, the importlib.resources API wants you to
|
||||
# manage the cleanup of this file, so it doesn't actually return a
|
||||
# path, it returns a context manager that will give you the path
|
||||
# when you enter it and will do any cleanup when you leave it. In
|
||||
# the common case of not needing a temporary file, it will just
|
||||
# return the file system location and the __exit__() is a no-op.
|
||||
#
|
||||
# We also have to hold onto the actual context manager, because
|
||||
# it will do the cleanup whenever it gets garbage collected, so
|
||||
# we will also store that at the global level as well.
|
||||
_CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem"))
|
||||
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
||||
atexit.register(exit_cacert_ctx)
|
||||
|
||||
return _CACERT_PATH
|
||||
|
||||
def contents() -> str:
|
||||
return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii")
|
||||
|
||||
elif sys.version_info >= (3, 7):
|
||||
|
||||
from importlib.resources import path as get_path, read_text
|
||||
|
||||
_CACERT_CTX = None
|
||||
_CACERT_PATH = None
|
||||
|
||||
def where() -> str:
|
||||
# This is slightly terrible, but we want to delay extracting the
|
||||
# file in cases where we're inside of a zipimport situation until
|
||||
# someone actually calls where(), but we don't want to re-extract
|
||||
# the file on every call of where(), so we'll do it once then store
|
||||
# it in a global variable.
|
||||
global _CACERT_CTX
|
||||
global _CACERT_PATH
|
||||
if _CACERT_PATH is None:
|
||||
# This is slightly janky, the importlib.resources API wants you
|
||||
# to manage the cleanup of this file, so it doesn't actually
|
||||
# return a path, it returns a context manager that will give
|
||||
# you the path when you enter it and will do any cleanup when
|
||||
# you leave it. In the common case of not needing a temporary
|
||||
# file, it will just return the file system location and the
|
||||
# __exit__() is a no-op.
|
||||
#
|
||||
# We also have to hold onto the actual context manager, because
|
||||
# it will do the cleanup whenever it gets garbage collected, so
|
||||
# we will also store that at the global level as well.
|
||||
_CACERT_CTX = get_path("certifi", "cacert.pem")
|
||||
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
||||
atexit.register(exit_cacert_ctx)
|
||||
|
||||
return _CACERT_PATH
|
||||
|
||||
def contents() -> str:
|
||||
return read_text("certifi", "cacert.pem", encoding="ascii")
|
||||
|
||||
else:
|
||||
import os
|
||||
import types
|
||||
from typing import Union
|
||||
|
||||
Package = Union[types.ModuleType, str]
|
||||
Resource = Union[str, "os.PathLike"]
|
||||
|
||||
# This fallback will work for Python versions prior to 3.7 that lack the
|
||||
# importlib.resources module but relies on the existing `where` function
|
||||
# so won't address issues with environments like PyOxidizer that don't set
|
||||
# __file__ on modules.
|
||||
def read_text(
|
||||
package: Package,
|
||||
resource: Resource,
|
||||
encoding: str = 'utf-8',
|
||||
errors: str = 'strict'
|
||||
) -> str:
|
||||
with open(where(), encoding=encoding) as data:
|
||||
return data.read()
|
||||
|
||||
# If we don't have importlib.resources, then we will just do the old logic
|
||||
# of assuming we're on the filesystem and munge the path directly.
|
||||
def where() -> str:
|
||||
f = os.path.dirname(__file__)
|
||||
|
||||
return os.path.join(f, "cacert.pem")
|
||||
|
||||
def contents() -> str:
|
||||
return read_text("certifi", "cacert.pem", encoding="ascii")
|
||||
@ -0,0 +1 @@
|
||||
pip
|
||||
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 TAHRI Ahmed R.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@ -0,0 +1,721 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: charset-normalizer
|
||||
Version: 3.4.1
|
||||
Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet.
|
||||
Author-email: "Ahmed R. TAHRI" <tahri.ahmed@proton.me>
|
||||
Maintainer-email: "Ahmed R. TAHRI" <tahri.ahmed@proton.me>
|
||||
License: MIT
|
||||
Project-URL: Changelog, https://github.com/jawah/charset_normalizer/blob/master/CHANGELOG.md
|
||||
Project-URL: Documentation, https://charset-normalizer.readthedocs.io/
|
||||
Project-URL: Code, https://github.com/jawah/charset_normalizer
|
||||
Project-URL: Issue tracker, https://github.com/jawah/charset_normalizer/issues
|
||||
Keywords: encoding,charset,charset-detector,detector,normalization,unicode,chardet,detect
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Programming Language :: Python :: 3.13
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: Implementation :: CPython
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Classifier: Topic :: Text Processing :: Linguistic
|
||||
Classifier: Topic :: Utilities
|
||||
Classifier: Typing :: Typed
|
||||
Requires-Python: >=3.7
|
||||
Description-Content-Type: text/markdown
|
||||
License-File: LICENSE
|
||||
Provides-Extra: unicode-backport
|
||||
|
||||
<h1 align="center">Charset Detection, for Everyone 👋</h1>
|
||||
|
||||
<p align="center">
|
||||
<sup>The Real First Universal Charset Detector</sup><br>
|
||||
<a href="https://pypi.org/project/charset-normalizer">
|
||||
<img src="https://img.shields.io/pypi/pyversions/charset_normalizer.svg?orange=blue" />
|
||||
</a>
|
||||
<a href="https://pepy.tech/project/charset-normalizer/">
|
||||
<img alt="Download Count Total" src="https://static.pepy.tech/badge/charset-normalizer/month" />
|
||||
</a>
|
||||
<a href="https://bestpractices.coreinfrastructure.org/projects/7297">
|
||||
<img src="https://bestpractices.coreinfrastructure.org/projects/7297/badge">
|
||||
</a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<sup><i>Featured Packages</i></sup><br>
|
||||
<a href="https://github.com/jawah/niquests">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Niquests-Best_HTTP_Client-cyan">
|
||||
</a>
|
||||
<a href="https://github.com/jawah/wassima">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Wassima-Certifi_Killer-cyan">
|
||||
</a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<sup><i>In other language (unofficial port - by the community)</i></sup><br>
|
||||
<a href="https://github.com/nickspring/charset-normalizer-rs">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Rust-red">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
> A library that helps you read text from an unknown charset encoding.<br /> Motivated by `chardet`,
|
||||
> I'm trying to resolve the issue by taking a new approach.
|
||||
> All IANA character set names for which the Python core library provides codecs are supported.
|
||||
|
||||
<p align="center">
|
||||
>>>>> <a href="https://charsetnormalizerweb.ousret.now.sh" target="_blank">👉 Try Me Online Now, Then Adopt Me 👈 </a> <<<<<
|
||||
</p>
|
||||
|
||||
This project offers you an alternative to **Universal Charset Encoding Detector**, also known as **Chardet**.
|
||||
|
||||
| Feature | [Chardet](https://github.com/chardet/chardet) | Charset Normalizer | [cChardet](https://github.com/PyYoshi/cChardet) |
|
||||
|--------------------------------------------------|:---------------------------------------------:|:--------------------------------------------------------------------------------------------------:|:-----------------------------------------------:|
|
||||
| `Fast` | ❌ | ✅ | ✅ |
|
||||
| `Universal**` | ❌ | ✅ | ❌ |
|
||||
| `Reliable` **without** distinguishable standards | ❌ | ✅ | ✅ |
|
||||
| `Reliable` **with** distinguishable standards | ✅ | ✅ | ✅ |
|
||||
| `License` | LGPL-2.1<br>_restrictive_ | MIT | MPL-1.1<br>_restrictive_ |
|
||||
| `Native Python` | ✅ | ✅ | ❌ |
|
||||
| `Detect spoken language` | ❌ | ✅ | N/A |
|
||||
| `UnicodeDecodeError Safety` | ❌ | ✅ | ❌ |
|
||||
| `Whl Size (min)` | 193.6 kB | 42 kB | ~200 kB |
|
||||
| `Supported Encoding` | 33 | 🎉 [99](https://charset-normalizer.readthedocs.io/en/latest/user/support.html#supported-encodings) | 40 |
|
||||
|
||||
<p align="center">
|
||||
<img src="https://i.imgflip.com/373iay.gif" alt="Reading Normalized Text" width="226"/><img src="https://media.tenor.com/images/c0180f70732a18b4965448d33adba3d0/tenor.gif" alt="Cat Reading Text" width="200"/>
|
||||
</p>
|
||||
|
||||
*\*\* : They are clearly using specific code for a specific encoding even if covering most of used one*<br>
|
||||
|
||||
## ⚡ Performance
|
||||
|
||||
This package offer better performance than its counterpart Chardet. Here are some numbers.
|
||||
|
||||
| Package | Accuracy | Mean per file (ms) | File per sec (est) |
|
||||
|-----------------------------------------------|:--------:|:------------------:|:------------------:|
|
||||
| [chardet](https://github.com/chardet/chardet) | 86 % | 63 ms | 16 file/sec |
|
||||
| charset-normalizer | **98 %** | **10 ms** | 100 file/sec |
|
||||
|
||||
| Package | 99th percentile | 95th percentile | 50th percentile |
|
||||
|-----------------------------------------------|:---------------:|:---------------:|:---------------:|
|
||||
| [chardet](https://github.com/chardet/chardet) | 265 ms | 71 ms | 7 ms |
|
||||
| charset-normalizer | 100 ms | 50 ms | 5 ms |
|
||||
|
||||
_updated as of december 2024 using CPython 3.12_
|
||||
|
||||
Chardet's performance on larger file (1MB+) are very poor. Expect huge difference on large payload.
|
||||
|
||||
> Stats are generated using 400+ files using default parameters. More details on used files, see GHA workflows.
|
||||
> And yes, these results might change at any time. The dataset can be updated to include more files.
|
||||
> The actual delays heavily depends on your CPU capabilities. The factors should remain the same.
|
||||
> Keep in mind that the stats are generous and that Chardet accuracy vs our is measured using Chardet initial capability
|
||||
> (e.g. Supported Encoding) Challenge-them if you want.
|
||||
|
||||
## ✨ Installation
|
||||
|
||||
Using pip:
|
||||
|
||||
```sh
|
||||
pip install charset-normalizer -U
|
||||
```
|
||||
|
||||
## 🚀 Basic Usage
|
||||
|
||||
### CLI
|
||||
This package comes with a CLI.
|
||||
|
||||
```
|
||||
usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD]
|
||||
file [file ...]
|
||||
|
||||
The Real First Universal Charset Detector. Discover originating encoding used
|
||||
on text file. Normalize text to unicode.
|
||||
|
||||
positional arguments:
|
||||
files File(s) to be analysed
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-v, --verbose Display complementary information about file if any.
|
||||
Stdout will contain logs about the detection process.
|
||||
-a, --with-alternative
|
||||
Output complementary possibilities if any. Top-level
|
||||
JSON WILL be a list.
|
||||
-n, --normalize Permit to normalize input file. If not set, program
|
||||
does not write anything.
|
||||
-m, --minimal Only output the charset detected to STDOUT. Disabling
|
||||
JSON output.
|
||||
-r, --replace Replace file when trying to normalize it instead of
|
||||
creating a new one.
|
||||
-f, --force Replace file without asking if you are sure, use this
|
||||
flag with caution.
|
||||
-t THRESHOLD, --threshold THRESHOLD
|
||||
Define a custom maximum amount of chaos allowed in
|
||||
decoded content. 0. <= chaos <= 1.
|
||||
--version Show version information and exit.
|
||||
```
|
||||
|
||||
```bash
|
||||
normalizer ./data/sample.1.fr.srt
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
python -m charset_normalizer ./data/sample.1.fr.srt
|
||||
```
|
||||
|
||||
🎉 Since version 1.4.0 the CLI produce easily usable stdout result in JSON format.
|
||||
|
||||
```json
|
||||
{
|
||||
"path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt",
|
||||
"encoding": "cp1252",
|
||||
"encoding_aliases": [
|
||||
"1252",
|
||||
"windows_1252"
|
||||
],
|
||||
"alternative_encodings": [
|
||||
"cp1254",
|
||||
"cp1256",
|
||||
"cp1258",
|
||||
"iso8859_14",
|
||||
"iso8859_15",
|
||||
"iso8859_16",
|
||||
"iso8859_3",
|
||||
"iso8859_9",
|
||||
"latin_1",
|
||||
"mbcs"
|
||||
],
|
||||
"language": "French",
|
||||
"alphabets": [
|
||||
"Basic Latin",
|
||||
"Latin-1 Supplement"
|
||||
],
|
||||
"has_sig_or_bom": false,
|
||||
"chaos": 0.149,
|
||||
"coherence": 97.152,
|
||||
"unicode_path": null,
|
||||
"is_preferred": true
|
||||
}
|
||||
```
|
||||
|
||||
### Python
|
||||
*Just print out normalized text*
|
||||
```python
|
||||
from charset_normalizer import from_path
|
||||
|
||||
results = from_path('./my_subtitle.srt')
|
||||
|
||||
print(str(results.best()))
|
||||
```
|
||||
|
||||
*Upgrade your code without effort*
|
||||
```python
|
||||
from charset_normalizer import detect
|
||||
```
|
||||
|
||||
The above code will behave the same as **chardet**. We ensure that we offer the best (reasonable) BC result possible.
|
||||
|
||||
See the docs for advanced usage : [readthedocs.io](https://charset-normalizer.readthedocs.io/en/latest/)
|
||||
|
||||
## 😇 Why
|
||||
|
||||
When I started using Chardet, I noticed that it was not suited to my expectations, and I wanted to propose a
|
||||
reliable alternative using a completely different method. Also! I never back down on a good challenge!
|
||||
|
||||
I **don't care** about the **originating charset** encoding, because **two different tables** can
|
||||
produce **two identical rendered string.**
|
||||
What I want is to get readable text, the best I can.
|
||||
|
||||
In a way, **I'm brute forcing text decoding.** How cool is that ? 😎
|
||||
|
||||
Don't confuse package **ftfy** with charset-normalizer or chardet. ftfy goal is to repair Unicode string whereas charset-normalizer to convert raw file in unknown encoding to unicode.
|
||||
|
||||
## 🍰 How
|
||||
|
||||
- Discard all charset encoding table that could not fit the binary content.
|
||||
- Measure noise, or the mess once opened (by chunks) with a corresponding charset encoding.
|
||||
- Extract matches with the lowest mess detected.
|
||||
- Additionally, we measure coherence / probe for a language.
|
||||
|
||||
**Wait a minute**, what is noise/mess and coherence according to **YOU ?**
|
||||
|
||||
*Noise :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then
|
||||
**I established** some ground rules about **what is obvious** when **it seems like** a mess (aka. defining noise in rendered text).
|
||||
I know that my interpretation of what is noise is probably incomplete, feel free to contribute in order to
|
||||
improve or rewrite it.
|
||||
|
||||
*Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought
|
||||
that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design.
|
||||
|
||||
## ⚡ Known limitations
|
||||
|
||||
- Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters))
|
||||
- Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content.
|
||||
|
||||
## ⚠️ About Python EOLs
|
||||
|
||||
**If you are running:**
|
||||
|
||||
- Python >=2.7,<3.5: Unsupported
|
||||
- Python 3.5: charset-normalizer < 2.1
|
||||
- Python 3.6: charset-normalizer < 3.1
|
||||
- Python 3.7: charset-normalizer < 4.0
|
||||
|
||||
Upgrade your Python interpreter as soon as possible.
|
||||
|
||||
## 👤 Contributing
|
||||
|
||||
Contributions, issues and feature requests are very much welcome.<br />
|
||||
Feel free to check [issues page](https://github.com/ousret/charset_normalizer/issues) if you want to contribute.
|
||||
|
||||
## 📝 License
|
||||
|
||||
Copyright © [Ahmed TAHRI @Ousret](https://github.com/Ousret).<br />
|
||||
This project is [MIT](https://github.com/Ousret/charset_normalizer/blob/master/LICENSE) licensed.
|
||||
|
||||
Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/)
|
||||
|
||||
## 💼 For Enterprise
|
||||
|
||||
Professional support for charset-normalizer is available as part of the [Tidelift
|
||||
Subscription][1]. Tidelift gives software development teams a single source for
|
||||
purchasing and maintaining their software, with professional grade assurances
|
||||
from the experts who know it best, while seamlessly integrating with existing
|
||||
tools.
|
||||
|
||||
[1]: https://tidelift.com/subscription/pkg/pypi-charset-normalizer?utm_source=pypi-charset-normalizer&utm_medium=readme
|
||||
|
||||
[](https://www.bestpractices.dev/projects/7297)
|
||||
|
||||
# Changelog
|
||||
All notable changes to charset-normalizer will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
||||
|
||||
## [3.4.1](https://github.com/Ousret/charset_normalizer/compare/3.4.0...3.4.1) (2024-12-24)
|
||||
|
||||
### Changed
|
||||
- Project metadata are now stored using `pyproject.toml` instead of `setup.cfg` using setuptools as the build backend.
|
||||
- Enforce annotation delayed loading for a simpler and consistent types in the project.
|
||||
- Optional mypyc compilation upgraded to version 1.14 for Python >= 3.8
|
||||
|
||||
### Added
|
||||
- pre-commit configuration.
|
||||
- noxfile.
|
||||
|
||||
### Removed
|
||||
- `build-requirements.txt` as per using `pyproject.toml` native build configuration.
|
||||
- `bin/integration.py` and `bin/serve.py` in favor of downstream integration test (see noxfile).
|
||||
- `setup.cfg` in favor of `pyproject.toml` metadata configuration.
|
||||
- Unused `utils.range_scan` function.
|
||||
|
||||
### Fixed
|
||||
- Converting content to Unicode bytes may insert `utf_8` instead of preferred `utf-8`. (#572)
|
||||
- Deprecation warning "'count' is passed as positional argument" when converting to Unicode bytes on Python 3.13+
|
||||
|
||||
## [3.4.0](https://github.com/Ousret/charset_normalizer/compare/3.3.2...3.4.0) (2024-10-08)
|
||||
|
||||
### Added
|
||||
- Argument `--no-preemptive` in the CLI to prevent the detector to search for hints.
|
||||
- Support for Python 3.13 (#512)
|
||||
|
||||
### Fixed
|
||||
- Relax the TypeError exception thrown when trying to compare a CharsetMatch with anything else than a CharsetMatch.
|
||||
- Improved the general reliability of the detector based on user feedbacks. (#520) (#509) (#498) (#407) (#537)
|
||||
- Declared charset in content (preemptive detection) not changed when converting to utf-8 bytes. (#381)
|
||||
|
||||
## [3.3.2](https://github.com/Ousret/charset_normalizer/compare/3.3.1...3.3.2) (2023-10-31)
|
||||
|
||||
### Fixed
|
||||
- Unintentional memory usage regression when using large payload that match several encoding (#376)
|
||||
- Regression on some detection case showcased in the documentation (#371)
|
||||
|
||||
### Added
|
||||
- Noise (md) probe that identify malformed arabic representation due to the presence of letters in isolated form (credit to my wife)
|
||||
|
||||
## [3.3.1](https://github.com/Ousret/charset_normalizer/compare/3.3.0...3.3.1) (2023-10-22)
|
||||
|
||||
### Changed
|
||||
- Optional mypyc compilation upgraded to version 1.6.1 for Python >= 3.8
|
||||
- Improved the general detection reliability based on reports from the community
|
||||
|
||||
## [3.3.0](https://github.com/Ousret/charset_normalizer/compare/3.2.0...3.3.0) (2023-09-30)
|
||||
|
||||
### Added
|
||||
- Allow to execute the CLI (e.g. normalizer) through `python -m charset_normalizer.cli` or `python -m charset_normalizer`
|
||||
- Support for 9 forgotten encoding that are supported by Python but unlisted in `encoding.aliases` as they have no alias (#323)
|
||||
|
||||
### Removed
|
||||
- (internal) Redundant utils.is_ascii function and unused function is_private_use_only
|
||||
- (internal) charset_normalizer.assets is moved inside charset_normalizer.constant
|
||||
|
||||
### Changed
|
||||
- (internal) Unicode code blocks in constants are updated using the latest v15.0.0 definition to improve detection
|
||||
- Optional mypyc compilation upgraded to version 1.5.1 for Python >= 3.8
|
||||
|
||||
### Fixed
|
||||
- Unable to properly sort CharsetMatch when both chaos/noise and coherence were close due to an unreachable condition in \_\_lt\_\_ (#350)
|
||||
|
||||
## [3.2.0](https://github.com/Ousret/charset_normalizer/compare/3.1.0...3.2.0) (2023-06-07)
|
||||
|
||||
### Changed
|
||||
- Typehint for function `from_path` no longer enforce `PathLike` as its first argument
|
||||
- Minor improvement over the global detection reliability
|
||||
|
||||
### Added
|
||||
- Introduce function `is_binary` that relies on main capabilities, and optimized to detect binaries
|
||||
- Propagate `enable_fallback` argument throughout `from_bytes`, `from_path`, and `from_fp` that allow a deeper control over the detection (default True)
|
||||
- Explicit support for Python 3.12
|
||||
|
||||
### Fixed
|
||||
- Edge case detection failure where a file would contain 'very-long' camel cased word (Issue #289)
|
||||
|
||||
## [3.1.0](https://github.com/Ousret/charset_normalizer/compare/3.0.1...3.1.0) (2023-03-06)
|
||||
|
||||
### Added
|
||||
- Argument `should_rename_legacy` for legacy function `detect` and disregard any new arguments without errors (PR #262)
|
||||
|
||||
### Removed
|
||||
- Support for Python 3.6 (PR #260)
|
||||
|
||||
### Changed
|
||||
- Optional speedup provided by mypy/c 1.0.1
|
||||
|
||||
## [3.0.1](https://github.com/Ousret/charset_normalizer/compare/3.0.0...3.0.1) (2022-11-18)
|
||||
|
||||
### Fixed
|
||||
- Multi-bytes cutter/chunk generator did not always cut correctly (PR #233)
|
||||
|
||||
### Changed
|
||||
- Speedup provided by mypy/c 0.990 on Python >= 3.7
|
||||
|
||||
## [3.0.0](https://github.com/Ousret/charset_normalizer/compare/2.1.1...3.0.0) (2022-10-20)
|
||||
|
||||
### Added
|
||||
- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
|
||||
- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
|
||||
- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
|
||||
- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
|
||||
|
||||
### Changed
|
||||
- Build with static metadata using 'build' frontend
|
||||
- Make the language detection stricter
|
||||
- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
|
||||
|
||||
### Fixed
|
||||
- CLI with opt --normalize fail when using full path for files
|
||||
- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
|
||||
- Sphinx warnings when generating the documentation
|
||||
|
||||
### Removed
|
||||
- Coherence detector no longer return 'Simple English' instead return 'English'
|
||||
- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
|
||||
- Breaking: Method `first()` and `best()` from CharsetMatch
|
||||
- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
|
||||
- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
|
||||
- Breaking: Top-level function `normalize`
|
||||
- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
|
||||
- Support for the backport `unicodedata2`
|
||||
|
||||
## [3.0.0rc1](https://github.com/Ousret/charset_normalizer/compare/3.0.0b2...3.0.0rc1) (2022-10-18)
|
||||
|
||||
### Added
|
||||
- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
|
||||
- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
|
||||
- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
|
||||
|
||||
### Changed
|
||||
- Build with static metadata using 'build' frontend
|
||||
- Make the language detection stricter
|
||||
|
||||
### Fixed
|
||||
- CLI with opt --normalize fail when using full path for files
|
||||
- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
|
||||
|
||||
### Removed
|
||||
- Coherence detector no longer return 'Simple English' instead return 'English'
|
||||
- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
|
||||
|
||||
## [3.0.0b2](https://github.com/Ousret/charset_normalizer/compare/3.0.0b1...3.0.0b2) (2022-08-21)
|
||||
|
||||
### Added
|
||||
- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
|
||||
|
||||
### Removed
|
||||
- Breaking: Method `first()` and `best()` from CharsetMatch
|
||||
- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
|
||||
|
||||
### Fixed
|
||||
- Sphinx warnings when generating the documentation
|
||||
|
||||
## [3.0.0b1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...3.0.0b1) (2022-08-15)
|
||||
|
||||
### Changed
|
||||
- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
|
||||
|
||||
### Removed
|
||||
- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
|
||||
- Breaking: Top-level function `normalize`
|
||||
- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
|
||||
- Support for the backport `unicodedata2`
|
||||
|
||||
## [2.1.1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...2.1.1) (2022-08-19)
|
||||
|
||||
### Deprecated
|
||||
- Function `normalize` scheduled for removal in 3.0
|
||||
|
||||
### Changed
|
||||
- Removed useless call to decode in fn is_unprintable (#206)
|
||||
|
||||
### Fixed
|
||||
- Third-party library (i18n xgettext) crashing not recognizing utf_8 (PEP 263) with underscore from [@aleksandernovikov](https://github.com/aleksandernovikov) (#204)
|
||||
|
||||
## [2.1.0](https://github.com/Ousret/charset_normalizer/compare/2.0.12...2.1.0) (2022-06-19)
|
||||
|
||||
### Added
|
||||
- Output the Unicode table version when running the CLI with `--version` (PR #194)
|
||||
|
||||
### Changed
|
||||
- Re-use decoded buffer for single byte character sets from [@nijel](https://github.com/nijel) (PR #175)
|
||||
- Fixing some performance bottlenecks from [@deedy5](https://github.com/deedy5) (PR #183)
|
||||
|
||||
### Fixed
|
||||
- Workaround potential bug in cpython with Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space (PR #175)
|
||||
- CLI default threshold aligned with the API threshold from [@oleksandr-kuzmenko](https://github.com/oleksandr-kuzmenko) (PR #181)
|
||||
|
||||
### Removed
|
||||
- Support for Python 3.5 (PR #192)
|
||||
|
||||
### Deprecated
|
||||
- Use of backport unicodedata from `unicodedata2` as Python is quickly catching up, scheduled for removal in 3.0 (PR #194)
|
||||
|
||||
## [2.0.12](https://github.com/Ousret/charset_normalizer/compare/2.0.11...2.0.12) (2022-02-12)
|
||||
|
||||
### Fixed
|
||||
- ASCII miss-detection on rare cases (PR #170)
|
||||
|
||||
## [2.0.11](https://github.com/Ousret/charset_normalizer/compare/2.0.10...2.0.11) (2022-01-30)
|
||||
|
||||
### Added
|
||||
- Explicit support for Python 3.11 (PR #164)
|
||||
|
||||
### Changed
|
||||
- The logging behavior have been completely reviewed, now using only TRACE and DEBUG levels (PR #163 #165)
|
||||
|
||||
## [2.0.10](https://github.com/Ousret/charset_normalizer/compare/2.0.9...2.0.10) (2022-01-04)
|
||||
|
||||
### Fixed
|
||||
- Fallback match entries might lead to UnicodeDecodeError for large bytes sequence (PR #154)
|
||||
|
||||
### Changed
|
||||
- Skipping the language-detection (CD) on ASCII (PR #155)
|
||||
|
||||
## [2.0.9](https://github.com/Ousret/charset_normalizer/compare/2.0.8...2.0.9) (2021-12-03)
|
||||
|
||||
### Changed
|
||||
- Moderating the logging impact (since 2.0.8) for specific environments (PR #147)
|
||||
|
||||
### Fixed
|
||||
- Wrong logging level applied when setting kwarg `explain` to True (PR #146)
|
||||
|
||||
## [2.0.8](https://github.com/Ousret/charset_normalizer/compare/2.0.7...2.0.8) (2021-11-24)
|
||||
### Changed
|
||||
- Improvement over Vietnamese detection (PR #126)
|
||||
- MD improvement on trailing data and long foreign (non-pure latin) data (PR #124)
|
||||
- Efficiency improvements in cd/alphabet_languages from [@adbar](https://github.com/adbar) (PR #122)
|
||||
- call sum() without an intermediary list following PEP 289 recommendations from [@adbar](https://github.com/adbar) (PR #129)
|
||||
- Code style as refactored by Sourcery-AI (PR #131)
|
||||
- Minor adjustment on the MD around european words (PR #133)
|
||||
- Remove and replace SRTs from assets / tests (PR #139)
|
||||
- Initialize the library logger with a `NullHandler` by default from [@nmaynes](https://github.com/nmaynes) (PR #135)
|
||||
- Setting kwarg `explain` to True will add provisionally (bounded to function lifespan) a specific stream handler (PR #135)
|
||||
|
||||
### Fixed
|
||||
- Fix large (misleading) sequence giving UnicodeDecodeError (PR #137)
|
||||
- Avoid using too insignificant chunk (PR #137)
|
||||
|
||||
### Added
|
||||
- Add and expose function `set_logging_handler` to configure a specific StreamHandler from [@nmaynes](https://github.com/nmaynes) (PR #135)
|
||||
- Add `CHANGELOG.md` entries, format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) (PR #141)
|
||||
|
||||
## [2.0.7](https://github.com/Ousret/charset_normalizer/compare/2.0.6...2.0.7) (2021-10-11)
|
||||
### Added
|
||||
- Add support for Kazakh (Cyrillic) language detection (PR #109)
|
||||
|
||||
### Changed
|
||||
- Further, improve inferring the language from a given single-byte code page (PR #112)
|
||||
- Vainly trying to leverage PEP263 when PEP3120 is not supported (PR #116)
|
||||
- Refactoring for potential performance improvements in loops from [@adbar](https://github.com/adbar) (PR #113)
|
||||
- Various detection improvement (MD+CD) (PR #117)
|
||||
|
||||
### Removed
|
||||
- Remove redundant logging entry about detected language(s) (PR #115)
|
||||
|
||||
### Fixed
|
||||
- Fix a minor inconsistency between Python 3.5 and other versions regarding language detection (PR #117 #102)
|
||||
|
||||
## [2.0.6](https://github.com/Ousret/charset_normalizer/compare/2.0.5...2.0.6) (2021-09-18)
|
||||
### Fixed
|
||||
- Unforeseen regression with the loss of the backward-compatibility with some older minor of Python 3.5.x (PR #100)
|
||||
- Fix CLI crash when using --minimal output in certain cases (PR #103)
|
||||
|
||||
### Changed
|
||||
- Minor improvement to the detection efficiency (less than 1%) (PR #106 #101)
|
||||
|
||||
## [2.0.5](https://github.com/Ousret/charset_normalizer/compare/2.0.4...2.0.5) (2021-09-14)
|
||||
### Changed
|
||||
- The project now comply with: flake8, mypy, isort and black to ensure a better overall quality (PR #81)
|
||||
- The BC-support with v1.x was improved, the old staticmethods are restored (PR #82)
|
||||
- The Unicode detection is slightly improved (PR #93)
|
||||
- Add syntax sugar \_\_bool\_\_ for results CharsetMatches list-container (PR #91)
|
||||
|
||||
### Removed
|
||||
- The project no longer raise warning on tiny content given for detection, will be simply logged as warning instead (PR #92)
|
||||
|
||||
### Fixed
|
||||
- In some rare case, the chunks extractor could cut in the middle of a multi-byte character and could mislead the mess detection (PR #95)
|
||||
- Some rare 'space' characters could trip up the UnprintablePlugin/Mess detection (PR #96)
|
||||
- The MANIFEST.in was not exhaustive (PR #78)
|
||||
|
||||
## [2.0.4](https://github.com/Ousret/charset_normalizer/compare/2.0.3...2.0.4) (2021-07-30)
|
||||
### Fixed
|
||||
- The CLI no longer raise an unexpected exception when no encoding has been found (PR #70)
|
||||
- Fix accessing the 'alphabets' property when the payload contains surrogate characters (PR #68)
|
||||
- The logger could mislead (explain=True) on detected languages and the impact of one MBCS match (PR #72)
|
||||
- Submatch factoring could be wrong in rare edge cases (PR #72)
|
||||
- Multiple files given to the CLI were ignored when publishing results to STDOUT. (After the first path) (PR #72)
|
||||
- Fix line endings from CRLF to LF for certain project files (PR #67)
|
||||
|
||||
### Changed
|
||||
- Adjust the MD to lower the sensitivity, thus improving the global detection reliability (PR #69 #76)
|
||||
- Allow fallback on specified encoding if any (PR #71)
|
||||
|
||||
## [2.0.3](https://github.com/Ousret/charset_normalizer/compare/2.0.2...2.0.3) (2021-07-16)
|
||||
### Changed
|
||||
- Part of the detection mechanism has been improved to be less sensitive, resulting in more accurate detection results. Especially ASCII. (PR #63)
|
||||
- According to the community wishes, the detection will fall back on ASCII or UTF-8 in a last-resort case. (PR #64)
|
||||
|
||||
## [2.0.2](https://github.com/Ousret/charset_normalizer/compare/2.0.1...2.0.2) (2021-07-15)
|
||||
### Fixed
|
||||
- Empty/Too small JSON payload miss-detection fixed. Report from [@tseaver](https://github.com/tseaver) (PR #59)
|
||||
|
||||
### Changed
|
||||
- Don't inject unicodedata2 into sys.modules from [@akx](https://github.com/akx) (PR #57)
|
||||
|
||||
## [2.0.1](https://github.com/Ousret/charset_normalizer/compare/2.0.0...2.0.1) (2021-07-13)
|
||||
### Fixed
|
||||
- Make it work where there isn't a filesystem available, dropping assets frequencies.json. Report from [@sethmlarson](https://github.com/sethmlarson). (PR #55)
|
||||
- Using explain=False permanently disable the verbose output in the current runtime (PR #47)
|
||||
- One log entry (language target preemptive) was not show in logs when using explain=True (PR #47)
|
||||
- Fix undesired exception (ValueError) on getitem of instance CharsetMatches (PR #52)
|
||||
|
||||
### Changed
|
||||
- Public function normalize default args values were not aligned with from_bytes (PR #53)
|
||||
|
||||
### Added
|
||||
- You may now use charset aliases in cp_isolation and cp_exclusion arguments (PR #47)
|
||||
|
||||
## [2.0.0](https://github.com/Ousret/charset_normalizer/compare/1.4.1...2.0.0) (2021-07-02)
|
||||
### Changed
|
||||
- 4x to 5 times faster than the previous 1.4.0 release. At least 2x faster than Chardet.
|
||||
- Accent has been made on UTF-8 detection, should perform rather instantaneous.
|
||||
- The backward compatibility with Chardet has been greatly improved. The legacy detect function returns an identical charset name whenever possible.
|
||||
- The detection mechanism has been slightly improved, now Turkish content is detected correctly (most of the time)
|
||||
- The program has been rewritten to ease the readability and maintainability. (+Using static typing)+
|
||||
- utf_7 detection has been reinstated.
|
||||
|
||||
### Removed
|
||||
- This package no longer require anything when used with Python 3.5 (Dropped cached_property)
|
||||
- Removed support for these languages: Catalan, Esperanto, Kazakh, Baque, Volapük, Azeri, Galician, Nynorsk, Macedonian, and Serbocroatian.
|
||||
- The exception hook on UnicodeDecodeError has been removed.
|
||||
|
||||
### Deprecated
|
||||
- Methods coherence_non_latin, w_counter, chaos_secondary_pass of the class CharsetMatch are now deprecated and scheduled for removal in v3.0
|
||||
|
||||
### Fixed
|
||||
- The CLI output used the relative path of the file(s). Should be absolute.
|
||||
|
||||
## [1.4.1](https://github.com/Ousret/charset_normalizer/compare/1.4.0...1.4.1) (2021-05-28)
|
||||
### Fixed
|
||||
- Logger configuration/usage no longer conflict with others (PR #44)
|
||||
|
||||
## [1.4.0](https://github.com/Ousret/charset_normalizer/compare/1.3.9...1.4.0) (2021-05-21)
|
||||
### Removed
|
||||
- Using standard logging instead of using the package loguru.
|
||||
- Dropping nose test framework in favor of the maintained pytest.
|
||||
- Choose to not use dragonmapper package to help with gibberish Chinese/CJK text.
|
||||
- Require cached_property only for Python 3.5 due to constraint. Dropping for every other interpreter version.
|
||||
- Stop support for UTF-7 that does not contain a SIG.
|
||||
- Dropping PrettyTable, replaced with pure JSON output in CLI.
|
||||
|
||||
### Fixed
|
||||
- BOM marker in a CharsetNormalizerMatch instance could be False in rare cases even if obviously present. Due to the sub-match factoring process.
|
||||
- Not searching properly for the BOM when trying utf32/16 parent codec.
|
||||
|
||||
### Changed
|
||||
- Improving the package final size by compressing frequencies.json.
|
||||
- Huge improvement over the larges payload.
|
||||
|
||||
### Added
|
||||
- CLI now produces JSON consumable output.
|
||||
- Return ASCII if given sequences fit. Given reasonable confidence.
|
||||
|
||||
## [1.3.9](https://github.com/Ousret/charset_normalizer/compare/1.3.8...1.3.9) (2021-05-13)
|
||||
|
||||
### Fixed
|
||||
- In some very rare cases, you may end up getting encode/decode errors due to a bad bytes payload (PR #40)
|
||||
|
||||
## [1.3.8](https://github.com/Ousret/charset_normalizer/compare/1.3.7...1.3.8) (2021-05-12)
|
||||
|
||||
### Fixed
|
||||
- Empty given payload for detection may cause an exception if trying to access the `alphabets` property. (PR #39)
|
||||
|
||||
## [1.3.7](https://github.com/Ousret/charset_normalizer/compare/1.3.6...1.3.7) (2021-05-12)
|
||||
|
||||
### Fixed
|
||||
- The legacy detect function should return UTF-8-SIG if sig is present in the payload. (PR #38)
|
||||
|
||||
## [1.3.6](https://github.com/Ousret/charset_normalizer/compare/1.3.5...1.3.6) (2021-02-09)
|
||||
|
||||
### Changed
|
||||
- Amend the previous release to allow prettytable 2.0 (PR #35)
|
||||
|
||||
## [1.3.5](https://github.com/Ousret/charset_normalizer/compare/1.3.4...1.3.5) (2021-02-08)
|
||||
|
||||
### Fixed
|
||||
- Fix error while using the package with a python pre-release interpreter (PR #33)
|
||||
|
||||
### Changed
|
||||
- Dependencies refactoring, constraints revised.
|
||||
|
||||
### Added
|
||||
- Add python 3.9 and 3.10 to the supported interpreters
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 TAHRI Ahmed R.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@ -0,0 +1,35 @@
|
||||
../../../bin/normalizer,sha256=9WdyCIqYgSbUxt46Njhg9YwQf8x-EAzNrJo206alMfY,256
|
||||
charset_normalizer-3.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
charset_normalizer-3.4.1.dist-info/LICENSE,sha256=bQ1Bv-FwrGx9wkjJpj4lTQ-0WmDVCoJX0K-SxuJJuIc,1071
|
||||
charset_normalizer-3.4.1.dist-info/METADATA,sha256=JbyHzhmqZh_ugEn1Y7TY7CDYZA9FoU6BP25hrCNDf50,35313
|
||||
charset_normalizer-3.4.1.dist-info/RECORD,,
|
||||
charset_normalizer-3.4.1.dist-info/WHEEL,sha256=tRzqFuK6eFjpbf2xTNvU7E3xL2y00S_NWJvyqxej3BA,151
|
||||
charset_normalizer-3.4.1.dist-info/entry_points.txt,sha256=8C-Y3iXIfyXQ83Tpir2B8t-XLJYpxF5xbb38d_js-h4,65
|
||||
charset_normalizer-3.4.1.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19
|
||||
charset_normalizer/__init__.py,sha256=OKRxRv2Zhnqk00tqkN0c1BtJjm165fWXLydE52IKuHc,1590
|
||||
charset_normalizer/__main__.py,sha256=yzYxMR-IhKRHYwcSlavEv8oGdwxsR89mr2X09qXGdps,109
|
||||
charset_normalizer/__pycache__/__init__.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/__main__.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/api.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/cd.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/constant.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/legacy.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/md.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/models.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/utils.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/version.cpython-312.pyc,,
|
||||
charset_normalizer/api.py,sha256=qBRz8mJ_R5E713R6TOyqHEdnmyxbEDnCSHvx32ubDGg,22617
|
||||
charset_normalizer/cd.py,sha256=WKTo1HDb-H9HfCDc3Bfwq5jzS25Ziy9SE2a74SgTq88,12522
|
||||
charset_normalizer/cli/__init__.py,sha256=D8I86lFk2-py45JvqxniTirSj_sFyE6sjaY_0-G1shc,136
|
||||
charset_normalizer/cli/__main__.py,sha256=VGC9klOoi6_R2z8rmyrc936kv7u2A1udjjHtlmNPDTM,10410
|
||||
charset_normalizer/cli/__pycache__/__init__.cpython-312.pyc,,
|
||||
charset_normalizer/cli/__pycache__/__main__.cpython-312.pyc,,
|
||||
charset_normalizer/constant.py,sha256=4VuTcZNLew1j_8ixA-Rt_VVqNWD4pwgHOHMCMlr0964,40477
|
||||
charset_normalizer/legacy.py,sha256=yhNXsPHkBfqPXKRb-sPXNj3Bscp9-mFGcYOkJ62tg9c,2328
|
||||
charset_normalizer/md.cpython-312-x86_64-linux-gnu.so,sha256=W654QTU3QZI6eWJ0fanScAr0_O6sL0I61fyRSdC-39Y,16064
|
||||
charset_normalizer/md.py,sha256=iyXXQGWl54nnLQLueMWTmUtlivO0-rTBgVkmJxIIAGU,20036
|
||||
charset_normalizer/md__mypyc.cpython-312-x86_64-linux-gnu.so,sha256=02IBduHhrAfIJteTWMlJulQK2gKMGP64dy8bVubEw3M,280904
|
||||
charset_normalizer/models.py,sha256=lKXhOnIPtiakbK3i__J9wpOfzx3JDTKj7Dn3Rg0VaRI,12394
|
||||
charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
charset_normalizer/utils.py,sha256=T5UHo8AS7NVMmgruWoZyqEf0WrZVcQpgUNetRoborSk,12002
|
||||
charset_normalizer/version.py,sha256=Ambcj3O8FfvdLfDLc8dkaxZx97O1IM_R4_aKGD_TDdE,115
|
||||
@ -0,0 +1,6 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: setuptools (75.6.0)
|
||||
Root-Is-Purelib: false
|
||||
Tag: cp312-cp312-manylinux_2_17_x86_64
|
||||
Tag: cp312-cp312-manylinux2014_x86_64
|
||||
|
||||
@ -0,0 +1,2 @@
|
||||
[console_scripts]
|
||||
normalizer = charset_normalizer:cli.cli_detect
|
||||
@ -0,0 +1 @@
|
||||
charset_normalizer
|
||||
@ -0,0 +1,48 @@
|
||||
"""
|
||||
Charset-Normalizer
|
||||
~~~~~~~~~~~~~~
|
||||
The Real First Universal Charset Detector.
|
||||
A library that helps you read text from an unknown charset encoding.
|
||||
Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
|
||||
All IANA character set names for which the Python core library provides codecs are supported.
|
||||
|
||||
Basic usage:
|
||||
>>> from charset_normalizer import from_bytes
|
||||
>>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
|
||||
>>> best_guess = results.best()
|
||||
>>> str(best_guess)
|
||||
'Bсеки човек има право на образование. Oбразованието!'
|
||||
|
||||
Others methods and usages are available - see the full documentation
|
||||
at <https://github.com/Ousret/charset_normalizer>.
|
||||
:copyright: (c) 2021 by Ahmed TAHRI
|
||||
:license: MIT, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from .api import from_bytes, from_fp, from_path, is_binary
|
||||
from .legacy import detect
|
||||
from .models import CharsetMatch, CharsetMatches
|
||||
from .utils import set_logging_handler
|
||||
from .version import VERSION, __version__
|
||||
|
||||
__all__ = (
|
||||
"from_fp",
|
||||
"from_path",
|
||||
"from_bytes",
|
||||
"is_binary",
|
||||
"detect",
|
||||
"CharsetMatch",
|
||||
"CharsetMatches",
|
||||
"__version__",
|
||||
"VERSION",
|
||||
"set_logging_handler",
|
||||
)
|
||||
|
||||
# Attach a NullHandler to the top level logger by default
|
||||
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
|
||||
|
||||
logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
|
||||
@ -0,0 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .cli import cli_detect
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli_detect()
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
668
llmlab/lib/python3.12/site-packages/charset_normalizer/api.py
Normal file
668
llmlab/lib/python3.12/site-packages/charset_normalizer/api.py
Normal file
@ -0,0 +1,668 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from os import PathLike
|
||||
from typing import BinaryIO
|
||||
|
||||
from .cd import (
|
||||
coherence_ratio,
|
||||
encoding_languages,
|
||||
mb_encoding_languages,
|
||||
merge_coherence_ratios,
|
||||
)
|
||||
from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
|
||||
from .md import mess_ratio
|
||||
from .models import CharsetMatch, CharsetMatches
|
||||
from .utils import (
|
||||
any_specified_encoding,
|
||||
cut_sequence_chunks,
|
||||
iana_name,
|
||||
identify_sig_or_bom,
|
||||
is_cp_similar,
|
||||
is_multi_byte_encoding,
|
||||
should_strip_sig_or_bom,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("charset_normalizer")
|
||||
explain_handler = logging.StreamHandler()
|
||||
explain_handler.setFormatter(
|
||||
logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
|
||||
)
|
||||
|
||||
|
||||
def from_bytes(
|
||||
sequences: bytes | bytearray,
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.2,
|
||||
cp_isolation: list[str] | None = None,
|
||||
cp_exclusion: list[str] | None = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = True,
|
||||
) -> CharsetMatches:
|
||||
"""
|
||||
Given a raw bytes sequence, return the best possibles charset usable to render str objects.
|
||||
If there is no results, it is a strong indicator that the source is binary/not text.
|
||||
By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
|
||||
And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
|
||||
|
||||
The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
|
||||
but never take it for granted. Can improve the performance.
|
||||
|
||||
You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
|
||||
purpose.
|
||||
|
||||
This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
|
||||
By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
|
||||
toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
|
||||
Custom logging format and handler can be set manually.
|
||||
"""
|
||||
|
||||
if not isinstance(sequences, (bytearray, bytes)):
|
||||
raise TypeError(
|
||||
"Expected object of type bytes or bytearray, got: {}".format(
|
||||
type(sequences)
|
||||
)
|
||||
)
|
||||
|
||||
if explain:
|
||||
previous_logger_level: int = logger.level
|
||||
logger.addHandler(explain_handler)
|
||||
logger.setLevel(TRACE)
|
||||
|
||||
length: int = len(sequences)
|
||||
|
||||
if length == 0:
|
||||
logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
|
||||
if explain: # Defensive: ensure exit path clean handler
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level or logging.WARNING)
|
||||
return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
|
||||
|
||||
if cp_isolation is not None:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"cp_isolation is set. use this flag for debugging purpose. "
|
||||
"limited list of encoding allowed : %s.",
|
||||
", ".join(cp_isolation),
|
||||
)
|
||||
cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
|
||||
else:
|
||||
cp_isolation = []
|
||||
|
||||
if cp_exclusion is not None:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"cp_exclusion is set. use this flag for debugging purpose. "
|
||||
"limited list of encoding excluded : %s.",
|
||||
", ".join(cp_exclusion),
|
||||
)
|
||||
cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
|
||||
else:
|
||||
cp_exclusion = []
|
||||
|
||||
if length <= (chunk_size * steps):
|
||||
logger.log(
|
||||
TRACE,
|
||||
"override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
|
||||
steps,
|
||||
chunk_size,
|
||||
length,
|
||||
)
|
||||
steps = 1
|
||||
chunk_size = length
|
||||
|
||||
if steps > 1 and length / steps < chunk_size:
|
||||
chunk_size = int(length / steps)
|
||||
|
||||
is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
|
||||
is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
|
||||
|
||||
if is_too_small_sequence:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
|
||||
length
|
||||
),
|
||||
)
|
||||
elif is_too_large_sequence:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
|
||||
length
|
||||
),
|
||||
)
|
||||
|
||||
prioritized_encodings: list[str] = []
|
||||
|
||||
specified_encoding: str | None = (
|
||||
any_specified_encoding(sequences) if preemptive_behaviour else None
|
||||
)
|
||||
|
||||
if specified_encoding is not None:
|
||||
prioritized_encodings.append(specified_encoding)
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Detected declarative mark in sequence. Priority +1 given for %s.",
|
||||
specified_encoding,
|
||||
)
|
||||
|
||||
tested: set[str] = set()
|
||||
tested_but_hard_failure: list[str] = []
|
||||
tested_but_soft_failure: list[str] = []
|
||||
|
||||
fallback_ascii: CharsetMatch | None = None
|
||||
fallback_u8: CharsetMatch | None = None
|
||||
fallback_specified: CharsetMatch | None = None
|
||||
|
||||
results: CharsetMatches = CharsetMatches()
|
||||
|
||||
early_stop_results: CharsetMatches = CharsetMatches()
|
||||
|
||||
sig_encoding, sig_payload = identify_sig_or_bom(sequences)
|
||||
|
||||
if sig_encoding is not None:
|
||||
prioritized_encodings.append(sig_encoding)
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
|
||||
len(sig_payload),
|
||||
sig_encoding,
|
||||
)
|
||||
|
||||
prioritized_encodings.append("ascii")
|
||||
|
||||
if "utf_8" not in prioritized_encodings:
|
||||
prioritized_encodings.append("utf_8")
|
||||
|
||||
for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
|
||||
if cp_isolation and encoding_iana not in cp_isolation:
|
||||
continue
|
||||
|
||||
if cp_exclusion and encoding_iana in cp_exclusion:
|
||||
continue
|
||||
|
||||
if encoding_iana in tested:
|
||||
continue
|
||||
|
||||
tested.add(encoding_iana)
|
||||
|
||||
decoded_payload: str | None = None
|
||||
bom_or_sig_available: bool = sig_encoding == encoding_iana
|
||||
strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
|
||||
encoding_iana
|
||||
)
|
||||
|
||||
if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
|
||||
encoding_iana,
|
||||
)
|
||||
continue
|
||||
if encoding_iana in {"utf_7"} and not bom_or_sig_available:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
|
||||
encoding_iana,
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Encoding %s does not provide an IncrementalDecoder",
|
||||
encoding_iana,
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
if is_too_large_sequence and is_multi_byte_decoder is False:
|
||||
str(
|
||||
(
|
||||
sequences[: int(50e4)]
|
||||
if strip_sig_or_bom is False
|
||||
else sequences[len(sig_payload) : int(50e4)]
|
||||
),
|
||||
encoding=encoding_iana,
|
||||
)
|
||||
else:
|
||||
decoded_payload = str(
|
||||
(
|
||||
sequences
|
||||
if strip_sig_or_bom is False
|
||||
else sequences[len(sig_payload) :]
|
||||
),
|
||||
encoding=encoding_iana,
|
||||
)
|
||||
except (UnicodeDecodeError, LookupError) as e:
|
||||
if not isinstance(e, LookupError):
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Code page %s does not fit given bytes sequence at ALL. %s",
|
||||
encoding_iana,
|
||||
str(e),
|
||||
)
|
||||
tested_but_hard_failure.append(encoding_iana)
|
||||
continue
|
||||
|
||||
similar_soft_failure_test: bool = False
|
||||
|
||||
for encoding_soft_failed in tested_but_soft_failure:
|
||||
if is_cp_similar(encoding_iana, encoding_soft_failed):
|
||||
similar_soft_failure_test = True
|
||||
break
|
||||
|
||||
if similar_soft_failure_test:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
|
||||
encoding_iana,
|
||||
encoding_soft_failed,
|
||||
)
|
||||
continue
|
||||
|
||||
r_ = range(
|
||||
0 if not bom_or_sig_available else len(sig_payload),
|
||||
length,
|
||||
int(length / steps),
|
||||
)
|
||||
|
||||
multi_byte_bonus: bool = (
|
||||
is_multi_byte_decoder
|
||||
and decoded_payload is not None
|
||||
and len(decoded_payload) < length
|
||||
)
|
||||
|
||||
if multi_byte_bonus:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Code page %s is a multi byte encoding table and it appear that at least one character "
|
||||
"was encoded using n-bytes.",
|
||||
encoding_iana,
|
||||
)
|
||||
|
||||
max_chunk_gave_up: int = int(len(r_) / 4)
|
||||
|
||||
max_chunk_gave_up = max(max_chunk_gave_up, 2)
|
||||
early_stop_count: int = 0
|
||||
lazy_str_hard_failure = False
|
||||
|
||||
md_chunks: list[str] = []
|
||||
md_ratios = []
|
||||
|
||||
try:
|
||||
for chunk in cut_sequence_chunks(
|
||||
sequences,
|
||||
encoding_iana,
|
||||
r_,
|
||||
chunk_size,
|
||||
bom_or_sig_available,
|
||||
strip_sig_or_bom,
|
||||
sig_payload,
|
||||
is_multi_byte_decoder,
|
||||
decoded_payload,
|
||||
):
|
||||
md_chunks.append(chunk)
|
||||
|
||||
md_ratios.append(
|
||||
mess_ratio(
|
||||
chunk,
|
||||
threshold,
|
||||
explain is True and 1 <= len(cp_isolation) <= 2,
|
||||
)
|
||||
)
|
||||
|
||||
if md_ratios[-1] >= threshold:
|
||||
early_stop_count += 1
|
||||
|
||||
if (early_stop_count >= max_chunk_gave_up) or (
|
||||
bom_or_sig_available and strip_sig_or_bom is False
|
||||
):
|
||||
break
|
||||
except (
|
||||
UnicodeDecodeError
|
||||
) as e: # Lazy str loading may have missed something there
|
||||
logger.log(
|
||||
TRACE,
|
||||
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
|
||||
encoding_iana,
|
||||
str(e),
|
||||
)
|
||||
early_stop_count = max_chunk_gave_up
|
||||
lazy_str_hard_failure = True
|
||||
|
||||
# We might want to check the sequence again with the whole content
|
||||
# Only if initial MD tests passes
|
||||
if (
|
||||
not lazy_str_hard_failure
|
||||
and is_too_large_sequence
|
||||
and not is_multi_byte_decoder
|
||||
):
|
||||
try:
|
||||
sequences[int(50e3) :].decode(encoding_iana, errors="strict")
|
||||
except UnicodeDecodeError as e:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
|
||||
encoding_iana,
|
||||
str(e),
|
||||
)
|
||||
tested_but_hard_failure.append(encoding_iana)
|
||||
continue
|
||||
|
||||
mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
|
||||
if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
|
||||
tested_but_soft_failure.append(encoding_iana)
|
||||
logger.log(
|
||||
TRACE,
|
||||
"%s was excluded because of initial chaos probing. Gave up %i time(s). "
|
||||
"Computed mean chaos is %f %%.",
|
||||
encoding_iana,
|
||||
early_stop_count,
|
||||
round(mean_mess_ratio * 100, ndigits=3),
|
||||
)
|
||||
# Preparing those fallbacks in case we got nothing.
|
||||
if (
|
||||
enable_fallback
|
||||
and encoding_iana in ["ascii", "utf_8", specified_encoding]
|
||||
and not lazy_str_hard_failure
|
||||
):
|
||||
fallback_entry = CharsetMatch(
|
||||
sequences,
|
||||
encoding_iana,
|
||||
threshold,
|
||||
False,
|
||||
[],
|
||||
decoded_payload,
|
||||
preemptive_declaration=specified_encoding,
|
||||
)
|
||||
if encoding_iana == specified_encoding:
|
||||
fallback_specified = fallback_entry
|
||||
elif encoding_iana == "ascii":
|
||||
fallback_ascii = fallback_entry
|
||||
else:
|
||||
fallback_u8 = fallback_entry
|
||||
continue
|
||||
|
||||
logger.log(
|
||||
TRACE,
|
||||
"%s passed initial chaos probing. Mean measured chaos is %f %%",
|
||||
encoding_iana,
|
||||
round(mean_mess_ratio * 100, ndigits=3),
|
||||
)
|
||||
|
||||
if not is_multi_byte_decoder:
|
||||
target_languages: list[str] = encoding_languages(encoding_iana)
|
||||
else:
|
||||
target_languages = mb_encoding_languages(encoding_iana)
|
||||
|
||||
if target_languages:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"{} should target any language(s) of {}".format(
|
||||
encoding_iana, str(target_languages)
|
||||
),
|
||||
)
|
||||
|
||||
cd_ratios = []
|
||||
|
||||
# We shall skip the CD when its about ASCII
|
||||
# Most of the time its not relevant to run "language-detection" on it.
|
||||
if encoding_iana != "ascii":
|
||||
for chunk in md_chunks:
|
||||
chunk_languages = coherence_ratio(
|
||||
chunk,
|
||||
language_threshold,
|
||||
",".join(target_languages) if target_languages else None,
|
||||
)
|
||||
|
||||
cd_ratios.append(chunk_languages)
|
||||
|
||||
cd_ratios_merged = merge_coherence_ratios(cd_ratios)
|
||||
|
||||
if cd_ratios_merged:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"We detected language {} using {}".format(
|
||||
cd_ratios_merged, encoding_iana
|
||||
),
|
||||
)
|
||||
|
||||
current_match = CharsetMatch(
|
||||
sequences,
|
||||
encoding_iana,
|
||||
mean_mess_ratio,
|
||||
bom_or_sig_available,
|
||||
cd_ratios_merged,
|
||||
(
|
||||
decoded_payload
|
||||
if (
|
||||
is_too_large_sequence is False
|
||||
or encoding_iana in [specified_encoding, "ascii", "utf_8"]
|
||||
)
|
||||
else None
|
||||
),
|
||||
preemptive_declaration=specified_encoding,
|
||||
)
|
||||
|
||||
results.append(current_match)
|
||||
|
||||
if (
|
||||
encoding_iana in [specified_encoding, "ascii", "utf_8"]
|
||||
and mean_mess_ratio < 0.1
|
||||
):
|
||||
# If md says nothing to worry about, then... stop immediately!
|
||||
if mean_mess_ratio == 0.0:
|
||||
logger.debug(
|
||||
"Encoding detection: %s is most likely the one.",
|
||||
current_match.encoding,
|
||||
)
|
||||
if explain: # Defensive: ensure exit path clean handler
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
return CharsetMatches([current_match])
|
||||
|
||||
early_stop_results.append(current_match)
|
||||
|
||||
if (
|
||||
len(early_stop_results)
|
||||
and (specified_encoding is None or specified_encoding in tested)
|
||||
and "ascii" in tested
|
||||
and "utf_8" in tested
|
||||
):
|
||||
probable_result: CharsetMatch = early_stop_results.best() # type: ignore[assignment]
|
||||
logger.debug(
|
||||
"Encoding detection: %s is most likely the one.",
|
||||
probable_result.encoding,
|
||||
)
|
||||
if explain: # Defensive: ensure exit path clean handler
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
|
||||
return CharsetMatches([probable_result])
|
||||
|
||||
if encoding_iana == sig_encoding:
|
||||
logger.debug(
|
||||
"Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
|
||||
"the beginning of the sequence.",
|
||||
encoding_iana,
|
||||
)
|
||||
if explain: # Defensive: ensure exit path clean handler
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
return CharsetMatches([results[encoding_iana]])
|
||||
|
||||
if len(results) == 0:
|
||||
if fallback_u8 or fallback_ascii or fallback_specified:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
|
||||
)
|
||||
|
||||
if fallback_specified:
|
||||
logger.debug(
|
||||
"Encoding detection: %s will be used as a fallback match",
|
||||
fallback_specified.encoding,
|
||||
)
|
||||
results.append(fallback_specified)
|
||||
elif (
|
||||
(fallback_u8 and fallback_ascii is None)
|
||||
or (
|
||||
fallback_u8
|
||||
and fallback_ascii
|
||||
and fallback_u8.fingerprint != fallback_ascii.fingerprint
|
||||
)
|
||||
or (fallback_u8 is not None)
|
||||
):
|
||||
logger.debug("Encoding detection: utf_8 will be used as a fallback match")
|
||||
results.append(fallback_u8)
|
||||
elif fallback_ascii:
|
||||
logger.debug("Encoding detection: ascii will be used as a fallback match")
|
||||
results.append(fallback_ascii)
|
||||
|
||||
if results:
|
||||
logger.debug(
|
||||
"Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
|
||||
results.best().encoding, # type: ignore
|
||||
len(results) - 1,
|
||||
)
|
||||
else:
|
||||
logger.debug("Encoding detection: Unable to determine any suitable charset.")
|
||||
|
||||
if explain:
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def from_fp(
|
||||
fp: BinaryIO,
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.20,
|
||||
cp_isolation: list[str] | None = None,
|
||||
cp_exclusion: list[str] | None = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = True,
|
||||
) -> CharsetMatches:
|
||||
"""
|
||||
Same thing than the function from_bytes but using a file pointer that is already ready.
|
||||
Will not close the file pointer.
|
||||
"""
|
||||
return from_bytes(
|
||||
fp.read(),
|
||||
steps,
|
||||
chunk_size,
|
||||
threshold,
|
||||
cp_isolation,
|
||||
cp_exclusion,
|
||||
preemptive_behaviour,
|
||||
explain,
|
||||
language_threshold,
|
||||
enable_fallback,
|
||||
)
|
||||
|
||||
|
||||
def from_path(
|
||||
path: str | bytes | PathLike, # type: ignore[type-arg]
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.20,
|
||||
cp_isolation: list[str] | None = None,
|
||||
cp_exclusion: list[str] | None = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = True,
|
||||
) -> CharsetMatches:
|
||||
"""
|
||||
Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
|
||||
Can raise IOError.
|
||||
"""
|
||||
with open(path, "rb") as fp:
|
||||
return from_fp(
|
||||
fp,
|
||||
steps,
|
||||
chunk_size,
|
||||
threshold,
|
||||
cp_isolation,
|
||||
cp_exclusion,
|
||||
preemptive_behaviour,
|
||||
explain,
|
||||
language_threshold,
|
||||
enable_fallback,
|
||||
)
|
||||
|
||||
|
||||
def is_binary(
|
||||
fp_or_path_or_payload: PathLike | str | BinaryIO | bytes, # type: ignore[type-arg]
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.20,
|
||||
cp_isolation: list[str] | None = None,
|
||||
cp_exclusion: list[str] | None = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = False,
|
||||
) -> bool:
|
||||
"""
|
||||
Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
|
||||
Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
|
||||
are disabled to be stricter around ASCII-compatible but unlikely to be a string.
|
||||
"""
|
||||
if isinstance(fp_or_path_or_payload, (str, PathLike)):
|
||||
guesses = from_path(
|
||||
fp_or_path_or_payload,
|
||||
steps=steps,
|
||||
chunk_size=chunk_size,
|
||||
threshold=threshold,
|
||||
cp_isolation=cp_isolation,
|
||||
cp_exclusion=cp_exclusion,
|
||||
preemptive_behaviour=preemptive_behaviour,
|
||||
explain=explain,
|
||||
language_threshold=language_threshold,
|
||||
enable_fallback=enable_fallback,
|
||||
)
|
||||
elif isinstance(
|
||||
fp_or_path_or_payload,
|
||||
(
|
||||
bytes,
|
||||
bytearray,
|
||||
),
|
||||
):
|
||||
guesses = from_bytes(
|
||||
fp_or_path_or_payload,
|
||||
steps=steps,
|
||||
chunk_size=chunk_size,
|
||||
threshold=threshold,
|
||||
cp_isolation=cp_isolation,
|
||||
cp_exclusion=cp_exclusion,
|
||||
preemptive_behaviour=preemptive_behaviour,
|
||||
explain=explain,
|
||||
language_threshold=language_threshold,
|
||||
enable_fallback=enable_fallback,
|
||||
)
|
||||
else:
|
||||
guesses = from_fp(
|
||||
fp_or_path_or_payload,
|
||||
steps=steps,
|
||||
chunk_size=chunk_size,
|
||||
threshold=threshold,
|
||||
cp_isolation=cp_isolation,
|
||||
cp_exclusion=cp_exclusion,
|
||||
preemptive_behaviour=preemptive_behaviour,
|
||||
explain=explain,
|
||||
language_threshold=language_threshold,
|
||||
enable_fallback=enable_fallback,
|
||||
)
|
||||
|
||||
return not guesses
|
||||
395
llmlab/lib/python3.12/site-packages/charset_normalizer/cd.py
Normal file
395
llmlab/lib/python3.12/site-packages/charset_normalizer/cd.py
Normal file
@ -0,0 +1,395 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
from codecs import IncrementalDecoder
|
||||
from collections import Counter
|
||||
from functools import lru_cache
|
||||
from typing import Counter as TypeCounter
|
||||
|
||||
from .constant import (
|
||||
FREQUENCIES,
|
||||
KO_NAMES,
|
||||
LANGUAGE_SUPPORTED_COUNT,
|
||||
TOO_SMALL_SEQUENCE,
|
||||
ZH_NAMES,
|
||||
)
|
||||
from .md import is_suspiciously_successive_range
|
||||
from .models import CoherenceMatches
|
||||
from .utils import (
|
||||
is_accentuated,
|
||||
is_latin,
|
||||
is_multi_byte_encoding,
|
||||
is_unicode_range_secondary,
|
||||
unicode_range,
|
||||
)
|
||||
|
||||
|
||||
def encoding_unicode_range(iana_name: str) -> list[str]:
|
||||
"""
|
||||
Return associated unicode ranges in a single byte code page.
|
||||
"""
|
||||
if is_multi_byte_encoding(iana_name):
|
||||
raise OSError("Function not supported on multi-byte code page")
|
||||
|
||||
decoder = importlib.import_module(f"encodings.{iana_name}").IncrementalDecoder
|
||||
|
||||
p: IncrementalDecoder = decoder(errors="ignore")
|
||||
seen_ranges: dict[str, int] = {}
|
||||
character_count: int = 0
|
||||
|
||||
for i in range(0x40, 0xFF):
|
||||
chunk: str = p.decode(bytes([i]))
|
||||
|
||||
if chunk:
|
||||
character_range: str | None = unicode_range(chunk)
|
||||
|
||||
if character_range is None:
|
||||
continue
|
||||
|
||||
if is_unicode_range_secondary(character_range) is False:
|
||||
if character_range not in seen_ranges:
|
||||
seen_ranges[character_range] = 0
|
||||
seen_ranges[character_range] += 1
|
||||
character_count += 1
|
||||
|
||||
return sorted(
|
||||
[
|
||||
character_range
|
||||
for character_range in seen_ranges
|
||||
if seen_ranges[character_range] / character_count >= 0.15
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def unicode_range_languages(primary_range: str) -> list[str]:
|
||||
"""
|
||||
Return inferred languages used with a unicode range.
|
||||
"""
|
||||
languages: list[str] = []
|
||||
|
||||
for language, characters in FREQUENCIES.items():
|
||||
for character in characters:
|
||||
if unicode_range(character) == primary_range:
|
||||
languages.append(language)
|
||||
break
|
||||
|
||||
return languages
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def encoding_languages(iana_name: str) -> list[str]:
|
||||
"""
|
||||
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
|
||||
This function does the correspondence.
|
||||
"""
|
||||
unicode_ranges: list[str] = encoding_unicode_range(iana_name)
|
||||
primary_range: str | None = None
|
||||
|
||||
for specified_range in unicode_ranges:
|
||||
if "Latin" not in specified_range:
|
||||
primary_range = specified_range
|
||||
break
|
||||
|
||||
if primary_range is None:
|
||||
return ["Latin Based"]
|
||||
|
||||
return unicode_range_languages(primary_range)
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def mb_encoding_languages(iana_name: str) -> list[str]:
|
||||
"""
|
||||
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
|
||||
This function does the correspondence.
|
||||
"""
|
||||
if (
|
||||
iana_name.startswith("shift_")
|
||||
or iana_name.startswith("iso2022_jp")
|
||||
or iana_name.startswith("euc_j")
|
||||
or iana_name == "cp932"
|
||||
):
|
||||
return ["Japanese"]
|
||||
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
|
||||
return ["Chinese"]
|
||||
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
|
||||
return ["Korean"]
|
||||
|
||||
return []
|
||||
|
||||
|
||||
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
|
||||
def get_target_features(language: str) -> tuple[bool, bool]:
|
||||
"""
|
||||
Determine main aspects from a supported language if it contains accents and if is pure Latin.
|
||||
"""
|
||||
target_have_accents: bool = False
|
||||
target_pure_latin: bool = True
|
||||
|
||||
for character in FREQUENCIES[language]:
|
||||
if not target_have_accents and is_accentuated(character):
|
||||
target_have_accents = True
|
||||
if target_pure_latin and is_latin(character) is False:
|
||||
target_pure_latin = False
|
||||
|
||||
return target_have_accents, target_pure_latin
|
||||
|
||||
|
||||
def alphabet_languages(
|
||||
characters: list[str], ignore_non_latin: bool = False
|
||||
) -> list[str]:
|
||||
"""
|
||||
Return associated languages associated to given characters.
|
||||
"""
|
||||
languages: list[tuple[str, float]] = []
|
||||
|
||||
source_have_accents = any(is_accentuated(character) for character in characters)
|
||||
|
||||
for language, language_characters in FREQUENCIES.items():
|
||||
target_have_accents, target_pure_latin = get_target_features(language)
|
||||
|
||||
if ignore_non_latin and target_pure_latin is False:
|
||||
continue
|
||||
|
||||
if target_have_accents is False and source_have_accents:
|
||||
continue
|
||||
|
||||
character_count: int = len(language_characters)
|
||||
|
||||
character_match_count: int = len(
|
||||
[c for c in language_characters if c in characters]
|
||||
)
|
||||
|
||||
ratio: float = character_match_count / character_count
|
||||
|
||||
if ratio >= 0.2:
|
||||
languages.append((language, ratio))
|
||||
|
||||
languages = sorted(languages, key=lambda x: x[1], reverse=True)
|
||||
|
||||
return [compatible_language[0] for compatible_language in languages]
|
||||
|
||||
|
||||
def characters_popularity_compare(
|
||||
language: str, ordered_characters: list[str]
|
||||
) -> float:
|
||||
"""
|
||||
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
|
||||
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
|
||||
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
|
||||
"""
|
||||
if language not in FREQUENCIES:
|
||||
raise ValueError(f"{language} not available")
|
||||
|
||||
character_approved_count: int = 0
|
||||
FREQUENCIES_language_set = set(FREQUENCIES[language])
|
||||
|
||||
ordered_characters_count: int = len(ordered_characters)
|
||||
target_language_characters_count: int = len(FREQUENCIES[language])
|
||||
|
||||
large_alphabet: bool = target_language_characters_count > 26
|
||||
|
||||
for character, character_rank in zip(
|
||||
ordered_characters, range(0, ordered_characters_count)
|
||||
):
|
||||
if character not in FREQUENCIES_language_set:
|
||||
continue
|
||||
|
||||
character_rank_in_language: int = FREQUENCIES[language].index(character)
|
||||
expected_projection_ratio: float = (
|
||||
target_language_characters_count / ordered_characters_count
|
||||
)
|
||||
character_rank_projection: int = int(character_rank * expected_projection_ratio)
|
||||
|
||||
if (
|
||||
large_alphabet is False
|
||||
and abs(character_rank_projection - character_rank_in_language) > 4
|
||||
):
|
||||
continue
|
||||
|
||||
if (
|
||||
large_alphabet is True
|
||||
and abs(character_rank_projection - character_rank_in_language)
|
||||
< target_language_characters_count / 3
|
||||
):
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
characters_before_source: list[str] = FREQUENCIES[language][
|
||||
0:character_rank_in_language
|
||||
]
|
||||
characters_after_source: list[str] = FREQUENCIES[language][
|
||||
character_rank_in_language:
|
||||
]
|
||||
characters_before: list[str] = ordered_characters[0:character_rank]
|
||||
characters_after: list[str] = ordered_characters[character_rank:]
|
||||
|
||||
before_match_count: int = len(
|
||||
set(characters_before) & set(characters_before_source)
|
||||
)
|
||||
|
||||
after_match_count: int = len(
|
||||
set(characters_after) & set(characters_after_source)
|
||||
)
|
||||
|
||||
if len(characters_before_source) == 0 and before_match_count <= 4:
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
if len(characters_after_source) == 0 and after_match_count <= 4:
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
if (
|
||||
before_match_count / len(characters_before_source) >= 0.4
|
||||
or after_match_count / len(characters_after_source) >= 0.4
|
||||
):
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
return character_approved_count / len(ordered_characters)
|
||||
|
||||
|
||||
def alpha_unicode_split(decoded_sequence: str) -> list[str]:
|
||||
"""
|
||||
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
|
||||
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
|
||||
One containing the latin letters and the other hebrew.
|
||||
"""
|
||||
layers: dict[str, str] = {}
|
||||
|
||||
for character in decoded_sequence:
|
||||
if character.isalpha() is False:
|
||||
continue
|
||||
|
||||
character_range: str | None = unicode_range(character)
|
||||
|
||||
if character_range is None:
|
||||
continue
|
||||
|
||||
layer_target_range: str | None = None
|
||||
|
||||
for discovered_range in layers:
|
||||
if (
|
||||
is_suspiciously_successive_range(discovered_range, character_range)
|
||||
is False
|
||||
):
|
||||
layer_target_range = discovered_range
|
||||
break
|
||||
|
||||
if layer_target_range is None:
|
||||
layer_target_range = character_range
|
||||
|
||||
if layer_target_range not in layers:
|
||||
layers[layer_target_range] = character.lower()
|
||||
continue
|
||||
|
||||
layers[layer_target_range] += character.lower()
|
||||
|
||||
return list(layers.values())
|
||||
|
||||
|
||||
def merge_coherence_ratios(results: list[CoherenceMatches]) -> CoherenceMatches:
|
||||
"""
|
||||
This function merge results previously given by the function coherence_ratio.
|
||||
The return type is the same as coherence_ratio.
|
||||
"""
|
||||
per_language_ratios: dict[str, list[float]] = {}
|
||||
for result in results:
|
||||
for sub_result in result:
|
||||
language, ratio = sub_result
|
||||
if language not in per_language_ratios:
|
||||
per_language_ratios[language] = [ratio]
|
||||
continue
|
||||
per_language_ratios[language].append(ratio)
|
||||
|
||||
merge = [
|
||||
(
|
||||
language,
|
||||
round(
|
||||
sum(per_language_ratios[language]) / len(per_language_ratios[language]),
|
||||
4,
|
||||
),
|
||||
)
|
||||
for language in per_language_ratios
|
||||
]
|
||||
|
||||
return sorted(merge, key=lambda x: x[1], reverse=True)
|
||||
|
||||
|
||||
def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches:
|
||||
"""
|
||||
We shall NOT return "English—" in CoherenceMatches because it is an alternative
|
||||
of "English". This function only keeps the best match and remove the em-dash in it.
|
||||
"""
|
||||
index_results: dict[str, list[float]] = dict()
|
||||
|
||||
for result in results:
|
||||
language, ratio = result
|
||||
no_em_name: str = language.replace("—", "")
|
||||
|
||||
if no_em_name not in index_results:
|
||||
index_results[no_em_name] = []
|
||||
|
||||
index_results[no_em_name].append(ratio)
|
||||
|
||||
if any(len(index_results[e]) > 1 for e in index_results):
|
||||
filtered_results: CoherenceMatches = []
|
||||
|
||||
for language in index_results:
|
||||
filtered_results.append((language, max(index_results[language])))
|
||||
|
||||
return filtered_results
|
||||
|
||||
return results
|
||||
|
||||
|
||||
@lru_cache(maxsize=2048)
|
||||
def coherence_ratio(
|
||||
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: str | None = None
|
||||
) -> CoherenceMatches:
|
||||
"""
|
||||
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
|
||||
A layer = Character extraction by alphabets/ranges.
|
||||
"""
|
||||
|
||||
results: list[tuple[str, float]] = []
|
||||
ignore_non_latin: bool = False
|
||||
|
||||
sufficient_match_count: int = 0
|
||||
|
||||
lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
|
||||
if "Latin Based" in lg_inclusion_list:
|
||||
ignore_non_latin = True
|
||||
lg_inclusion_list.remove("Latin Based")
|
||||
|
||||
for layer in alpha_unicode_split(decoded_sequence):
|
||||
sequence_frequencies: TypeCounter[str] = Counter(layer)
|
||||
most_common = sequence_frequencies.most_common()
|
||||
|
||||
character_count: int = sum(o for c, o in most_common)
|
||||
|
||||
if character_count <= TOO_SMALL_SEQUENCE:
|
||||
continue
|
||||
|
||||
popular_character_ordered: list[str] = [c for c, o in most_common]
|
||||
|
||||
for language in lg_inclusion_list or alphabet_languages(
|
||||
popular_character_ordered, ignore_non_latin
|
||||
):
|
||||
ratio: float = characters_popularity_compare(
|
||||
language, popular_character_ordered
|
||||
)
|
||||
|
||||
if ratio < threshold:
|
||||
continue
|
||||
elif ratio >= 0.8:
|
||||
sufficient_match_count += 1
|
||||
|
||||
results.append((language, round(ratio, 4)))
|
||||
|
||||
if sufficient_match_count >= 3:
|
||||
break
|
||||
|
||||
return sorted(
|
||||
filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True
|
||||
)
|
||||
@ -0,0 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .__main__ import cli_detect, query_yes_no
|
||||
|
||||
__all__ = (
|
||||
"cli_detect",
|
||||
"query_yes_no",
|
||||
)
|
||||
@ -0,0 +1,321 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from json import dumps
|
||||
from os.path import abspath, basename, dirname, join, realpath
|
||||
from platform import python_version
|
||||
from unicodedata import unidata_version
|
||||
|
||||
import charset_normalizer.md as md_module
|
||||
from charset_normalizer import from_fp
|
||||
from charset_normalizer.models import CliDetectionResult
|
||||
from charset_normalizer.version import __version__
|
||||
|
||||
|
||||
def query_yes_no(question: str, default: str = "yes") -> bool:
|
||||
"""Ask a yes/no question via input() and return their answer.
|
||||
|
||||
"question" is a string that is presented to the user.
|
||||
"default" is the presumed answer if the user just hits <Enter>.
|
||||
It must be "yes" (the default), "no" or None (meaning
|
||||
an answer is required of the user).
|
||||
|
||||
The "answer" return value is True for "yes" or False for "no".
|
||||
|
||||
Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
|
||||
"""
|
||||
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
|
||||
if default is None:
|
||||
prompt = " [y/n] "
|
||||
elif default == "yes":
|
||||
prompt = " [Y/n] "
|
||||
elif default == "no":
|
||||
prompt = " [y/N] "
|
||||
else:
|
||||
raise ValueError("invalid default answer: '%s'" % default)
|
||||
|
||||
while True:
|
||||
sys.stdout.write(question + prompt)
|
||||
choice = input().lower()
|
||||
if default is not None and choice == "":
|
||||
return valid[default]
|
||||
elif choice in valid:
|
||||
return valid[choice]
|
||||
else:
|
||||
sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
|
||||
|
||||
|
||||
def cli_detect(argv: list[str] | None = None) -> int:
|
||||
"""
|
||||
CLI assistant using ARGV and ArgumentParser
|
||||
:param argv:
|
||||
:return: 0 if everything is fine, anything else equal trouble
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="The Real First Universal Charset Detector. "
|
||||
"Discover originating encoding used on text file. "
|
||||
"Normalize text to unicode."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"files", type=argparse.FileType("rb"), nargs="+", help="File(s) to be analysed"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="verbose",
|
||||
help="Display complementary information about file if any. "
|
||||
"Stdout will contain logs about the detection process.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-a",
|
||||
"--with-alternative",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="alternatives",
|
||||
help="Output complementary possibilities if any. Top-level JSON WILL be a list.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-n",
|
||||
"--normalize",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="normalize",
|
||||
help="Permit to normalize input file. If not set, program does not write anything.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-m",
|
||||
"--minimal",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="minimal",
|
||||
help="Only output the charset detected to STDOUT. Disabling JSON output.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--replace",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="replace",
|
||||
help="Replace file when trying to normalize it instead of creating a new one.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="force",
|
||||
help="Replace file without asking if you are sure, use this flag with caution.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-i",
|
||||
"--no-preemptive",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="no_preemptive",
|
||||
help="Disable looking at a charset declaration to hint the detector.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--threshold",
|
||||
action="store",
|
||||
default=0.2,
|
||||
type=float,
|
||||
dest="threshold",
|
||||
help="Define a custom maximum amount of noise allowed in decoded content. 0. <= noise <= 1.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
action="version",
|
||||
version="Charset-Normalizer {} - Python {} - Unicode {} - SpeedUp {}".format(
|
||||
__version__,
|
||||
python_version(),
|
||||
unidata_version,
|
||||
"OFF" if md_module.__file__.lower().endswith(".py") else "ON",
|
||||
),
|
||||
help="Show version information and exit.",
|
||||
)
|
||||
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
if args.replace is True and args.normalize is False:
|
||||
if args.files:
|
||||
for my_file in args.files:
|
||||
my_file.close()
|
||||
print("Use --replace in addition of --normalize only.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
if args.force is True and args.replace is False:
|
||||
if args.files:
|
||||
for my_file in args.files:
|
||||
my_file.close()
|
||||
print("Use --force in addition of --replace only.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
if args.threshold < 0.0 or args.threshold > 1.0:
|
||||
if args.files:
|
||||
for my_file in args.files:
|
||||
my_file.close()
|
||||
print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
x_ = []
|
||||
|
||||
for my_file in args.files:
|
||||
matches = from_fp(
|
||||
my_file,
|
||||
threshold=args.threshold,
|
||||
explain=args.verbose,
|
||||
preemptive_behaviour=args.no_preemptive is False,
|
||||
)
|
||||
|
||||
best_guess = matches.best()
|
||||
|
||||
if best_guess is None:
|
||||
print(
|
||||
'Unable to identify originating encoding for "{}". {}'.format(
|
||||
my_file.name,
|
||||
(
|
||||
"Maybe try increasing maximum amount of chaos."
|
||||
if args.threshold < 1.0
|
||||
else ""
|
||||
),
|
||||
),
|
||||
file=sys.stderr,
|
||||
)
|
||||
x_.append(
|
||||
CliDetectionResult(
|
||||
abspath(my_file.name),
|
||||
None,
|
||||
[],
|
||||
[],
|
||||
"Unknown",
|
||||
[],
|
||||
False,
|
||||
1.0,
|
||||
0.0,
|
||||
None,
|
||||
True,
|
||||
)
|
||||
)
|
||||
else:
|
||||
x_.append(
|
||||
CliDetectionResult(
|
||||
abspath(my_file.name),
|
||||
best_guess.encoding,
|
||||
best_guess.encoding_aliases,
|
||||
[
|
||||
cp
|
||||
for cp in best_guess.could_be_from_charset
|
||||
if cp != best_guess.encoding
|
||||
],
|
||||
best_guess.language,
|
||||
best_guess.alphabets,
|
||||
best_guess.bom,
|
||||
best_guess.percent_chaos,
|
||||
best_guess.percent_coherence,
|
||||
None,
|
||||
True,
|
||||
)
|
||||
)
|
||||
|
||||
if len(matches) > 1 and args.alternatives:
|
||||
for el in matches:
|
||||
if el != best_guess:
|
||||
x_.append(
|
||||
CliDetectionResult(
|
||||
abspath(my_file.name),
|
||||
el.encoding,
|
||||
el.encoding_aliases,
|
||||
[
|
||||
cp
|
||||
for cp in el.could_be_from_charset
|
||||
if cp != el.encoding
|
||||
],
|
||||
el.language,
|
||||
el.alphabets,
|
||||
el.bom,
|
||||
el.percent_chaos,
|
||||
el.percent_coherence,
|
||||
None,
|
||||
False,
|
||||
)
|
||||
)
|
||||
|
||||
if args.normalize is True:
|
||||
if best_guess.encoding.startswith("utf") is True:
|
||||
print(
|
||||
'"{}" file does not need to be normalized, as it already came from unicode.'.format(
|
||||
my_file.name
|
||||
),
|
||||
file=sys.stderr,
|
||||
)
|
||||
if my_file.closed is False:
|
||||
my_file.close()
|
||||
continue
|
||||
|
||||
dir_path = dirname(realpath(my_file.name))
|
||||
file_name = basename(realpath(my_file.name))
|
||||
|
||||
o_: list[str] = file_name.split(".")
|
||||
|
||||
if args.replace is False:
|
||||
o_.insert(-1, best_guess.encoding)
|
||||
if my_file.closed is False:
|
||||
my_file.close()
|
||||
elif (
|
||||
args.force is False
|
||||
and query_yes_no(
|
||||
'Are you sure to normalize "{}" by replacing it ?'.format(
|
||||
my_file.name
|
||||
),
|
||||
"no",
|
||||
)
|
||||
is False
|
||||
):
|
||||
if my_file.closed is False:
|
||||
my_file.close()
|
||||
continue
|
||||
|
||||
try:
|
||||
x_[0].unicode_path = join(dir_path, ".".join(o_))
|
||||
|
||||
with open(x_[0].unicode_path, "wb") as fp:
|
||||
fp.write(best_guess.output())
|
||||
except OSError as e:
|
||||
print(str(e), file=sys.stderr)
|
||||
if my_file.closed is False:
|
||||
my_file.close()
|
||||
return 2
|
||||
|
||||
if my_file.closed is False:
|
||||
my_file.close()
|
||||
|
||||
if args.minimal is False:
|
||||
print(
|
||||
dumps(
|
||||
[el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__,
|
||||
ensure_ascii=True,
|
||||
indent=4,
|
||||
)
|
||||
)
|
||||
else:
|
||||
for my_file in args.files:
|
||||
print(
|
||||
", ".join(
|
||||
[
|
||||
el.encoding or "undefined"
|
||||
for el in x_
|
||||
if el.path == abspath(my_file.name)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli_detect()
|
||||
Binary file not shown.
Binary file not shown.
1998
llmlab/lib/python3.12/site-packages/charset_normalizer/constant.py
Normal file
1998
llmlab/lib/python3.12/site-packages/charset_normalizer/constant.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,66 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from warnings import warn
|
||||
|
||||
from .api import from_bytes
|
||||
from .constant import CHARDET_CORRESPONDENCE
|
||||
|
||||
# TODO: remove this check when dropping Python 3.7 support
|
||||
if TYPE_CHECKING:
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
class ResultDict(TypedDict):
|
||||
encoding: str | None
|
||||
language: str
|
||||
confidence: float | None
|
||||
|
||||
|
||||
def detect(
|
||||
byte_str: bytes, should_rename_legacy: bool = False, **kwargs: Any
|
||||
) -> ResultDict:
|
||||
"""
|
||||
chardet legacy method
|
||||
Detect the encoding of the given byte string. It should be mostly backward-compatible.
|
||||
Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
|
||||
This function is deprecated and should be used to migrate your project easily, consult the documentation for
|
||||
further information. Not planned for removal.
|
||||
|
||||
:param byte_str: The byte sequence to examine.
|
||||
:param should_rename_legacy: Should we rename legacy encodings
|
||||
to their more modern equivalents?
|
||||
"""
|
||||
if len(kwargs):
|
||||
warn(
|
||||
f"charset-normalizer disregard arguments '{','.join(list(kwargs.keys()))}' in legacy function detect()"
|
||||
)
|
||||
|
||||
if not isinstance(byte_str, (bytearray, bytes)):
|
||||
raise TypeError( # pragma: nocover
|
||||
"Expected object of type bytes or bytearray, got: " "{}".format(
|
||||
type(byte_str)
|
||||
)
|
||||
)
|
||||
|
||||
if isinstance(byte_str, bytearray):
|
||||
byte_str = bytes(byte_str)
|
||||
|
||||
r = from_bytes(byte_str).best()
|
||||
|
||||
encoding = r.encoding if r is not None else None
|
||||
language = r.language if r is not None and r.language != "Unknown" else ""
|
||||
confidence = 1.0 - r.chaos if r is not None else None
|
||||
|
||||
# Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
|
||||
# but chardet does return 'utf-8-sig' and it is a valid codec name.
|
||||
if r is not None and encoding == "utf_8" and r.bom:
|
||||
encoding += "_sig"
|
||||
|
||||
if should_rename_legacy is False and encoding in CHARDET_CORRESPONDENCE:
|
||||
encoding = CHARDET_CORRESPONDENCE[encoding]
|
||||
|
||||
return {
|
||||
"encoding": encoding,
|
||||
"language": language,
|
||||
"confidence": confidence,
|
||||
}
|
||||
Binary file not shown.
630
llmlab/lib/python3.12/site-packages/charset_normalizer/md.py
Normal file
630
llmlab/lib/python3.12/site-packages/charset_normalizer/md.py
Normal file
@ -0,0 +1,630 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from functools import lru_cache
|
||||
from logging import getLogger
|
||||
|
||||
from .constant import (
|
||||
COMMON_SAFE_ASCII_CHARACTERS,
|
||||
TRACE,
|
||||
UNICODE_SECONDARY_RANGE_KEYWORD,
|
||||
)
|
||||
from .utils import (
|
||||
is_accentuated,
|
||||
is_arabic,
|
||||
is_arabic_isolated_form,
|
||||
is_case_variable,
|
||||
is_cjk,
|
||||
is_emoticon,
|
||||
is_hangul,
|
||||
is_hiragana,
|
||||
is_katakana,
|
||||
is_latin,
|
||||
is_punctuation,
|
||||
is_separator,
|
||||
is_symbol,
|
||||
is_thai,
|
||||
is_unprintable,
|
||||
remove_accent,
|
||||
unicode_range,
|
||||
)
|
||||
|
||||
|
||||
class MessDetectorPlugin:
|
||||
"""
|
||||
Base abstract class used for mess detection plugins.
|
||||
All detectors MUST extend and implement given methods.
|
||||
"""
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
"""
|
||||
Determine if given character should be fed in.
|
||||
"""
|
||||
raise NotImplementedError # pragma: nocover
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
"""
|
||||
The main routine to be executed upon character.
|
||||
Insert the logic in witch the text would be considered chaotic.
|
||||
"""
|
||||
raise NotImplementedError # pragma: nocover
|
||||
|
||||
def reset(self) -> None: # pragma: no cover
|
||||
"""
|
||||
Permit to reset the plugin to the initial state.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
"""
|
||||
Compute the chaos ratio based on what your feed() has seen.
|
||||
Must NOT be lower than 0.; No restriction gt 0.
|
||||
"""
|
||||
raise NotImplementedError # pragma: nocover
|
||||
|
||||
|
||||
class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
|
||||
def __init__(self) -> None:
|
||||
self._punctuation_count: int = 0
|
||||
self._symbol_count: int = 0
|
||||
self._character_count: int = 0
|
||||
|
||||
self._last_printable_char: str | None = None
|
||||
self._frenzy_symbol_in_word: bool = False
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return character.isprintable()
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
self._character_count += 1
|
||||
|
||||
if (
|
||||
character != self._last_printable_char
|
||||
and character not in COMMON_SAFE_ASCII_CHARACTERS
|
||||
):
|
||||
if is_punctuation(character):
|
||||
self._punctuation_count += 1
|
||||
elif (
|
||||
character.isdigit() is False
|
||||
and is_symbol(character)
|
||||
and is_emoticon(character) is False
|
||||
):
|
||||
self._symbol_count += 2
|
||||
|
||||
self._last_printable_char = character
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._punctuation_count = 0
|
||||
self._character_count = 0
|
||||
self._symbol_count = 0
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count == 0:
|
||||
return 0.0
|
||||
|
||||
ratio_of_punctuation: float = (
|
||||
self._punctuation_count + self._symbol_count
|
||||
) / self._character_count
|
||||
|
||||
return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
|
||||
|
||||
|
||||
class TooManyAccentuatedPlugin(MessDetectorPlugin):
|
||||
def __init__(self) -> None:
|
||||
self._character_count: int = 0
|
||||
self._accentuated_count: int = 0
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return character.isalpha()
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
self._character_count += 1
|
||||
|
||||
if is_accentuated(character):
|
||||
self._accentuated_count += 1
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._character_count = 0
|
||||
self._accentuated_count = 0
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count < 8:
|
||||
return 0.0
|
||||
|
||||
ratio_of_accentuation: float = self._accentuated_count / self._character_count
|
||||
return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
|
||||
|
||||
|
||||
class UnprintablePlugin(MessDetectorPlugin):
|
||||
def __init__(self) -> None:
|
||||
self._unprintable_count: int = 0
|
||||
self._character_count: int = 0
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return True
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
if is_unprintable(character):
|
||||
self._unprintable_count += 1
|
||||
self._character_count += 1
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._unprintable_count = 0
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count == 0:
|
||||
return 0.0
|
||||
|
||||
return (self._unprintable_count * 8) / self._character_count
|
||||
|
||||
|
||||
class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
|
||||
def __init__(self) -> None:
|
||||
self._successive_count: int = 0
|
||||
self._character_count: int = 0
|
||||
|
||||
self._last_latin_character: str | None = None
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return character.isalpha() and is_latin(character)
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
self._character_count += 1
|
||||
if (
|
||||
self._last_latin_character is not None
|
||||
and is_accentuated(character)
|
||||
and is_accentuated(self._last_latin_character)
|
||||
):
|
||||
if character.isupper() and self._last_latin_character.isupper():
|
||||
self._successive_count += 1
|
||||
# Worse if its the same char duplicated with different accent.
|
||||
if remove_accent(character) == remove_accent(self._last_latin_character):
|
||||
self._successive_count += 1
|
||||
self._last_latin_character = character
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._successive_count = 0
|
||||
self._character_count = 0
|
||||
self._last_latin_character = None
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count == 0:
|
||||
return 0.0
|
||||
|
||||
return (self._successive_count * 2) / self._character_count
|
||||
|
||||
|
||||
class SuspiciousRange(MessDetectorPlugin):
|
||||
def __init__(self) -> None:
|
||||
self._suspicious_successive_range_count: int = 0
|
||||
self._character_count: int = 0
|
||||
self._last_printable_seen: str | None = None
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return character.isprintable()
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
self._character_count += 1
|
||||
|
||||
if (
|
||||
character.isspace()
|
||||
or is_punctuation(character)
|
||||
or character in COMMON_SAFE_ASCII_CHARACTERS
|
||||
):
|
||||
self._last_printable_seen = None
|
||||
return
|
||||
|
||||
if self._last_printable_seen is None:
|
||||
self._last_printable_seen = character
|
||||
return
|
||||
|
||||
unicode_range_a: str | None = unicode_range(self._last_printable_seen)
|
||||
unicode_range_b: str | None = unicode_range(character)
|
||||
|
||||
if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
|
||||
self._suspicious_successive_range_count += 1
|
||||
|
||||
self._last_printable_seen = character
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._character_count = 0
|
||||
self._suspicious_successive_range_count = 0
|
||||
self._last_printable_seen = None
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count <= 13:
|
||||
return 0.0
|
||||
|
||||
ratio_of_suspicious_range_usage: float = (
|
||||
self._suspicious_successive_range_count * 2
|
||||
) / self._character_count
|
||||
|
||||
return ratio_of_suspicious_range_usage
|
||||
|
||||
|
||||
class SuperWeirdWordPlugin(MessDetectorPlugin):
|
||||
def __init__(self) -> None:
|
||||
self._word_count: int = 0
|
||||
self._bad_word_count: int = 0
|
||||
self._foreign_long_count: int = 0
|
||||
|
||||
self._is_current_word_bad: bool = False
|
||||
self._foreign_long_watch: bool = False
|
||||
|
||||
self._character_count: int = 0
|
||||
self._bad_character_count: int = 0
|
||||
|
||||
self._buffer: str = ""
|
||||
self._buffer_accent_count: int = 0
|
||||
self._buffer_glyph_count: int = 0
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return True
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
if character.isalpha():
|
||||
self._buffer += character
|
||||
if is_accentuated(character):
|
||||
self._buffer_accent_count += 1
|
||||
if (
|
||||
self._foreign_long_watch is False
|
||||
and (is_latin(character) is False or is_accentuated(character))
|
||||
and is_cjk(character) is False
|
||||
and is_hangul(character) is False
|
||||
and is_katakana(character) is False
|
||||
and is_hiragana(character) is False
|
||||
and is_thai(character) is False
|
||||
):
|
||||
self._foreign_long_watch = True
|
||||
if (
|
||||
is_cjk(character)
|
||||
or is_hangul(character)
|
||||
or is_katakana(character)
|
||||
or is_hiragana(character)
|
||||
or is_thai(character)
|
||||
):
|
||||
self._buffer_glyph_count += 1
|
||||
return
|
||||
if not self._buffer:
|
||||
return
|
||||
if (
|
||||
character.isspace() or is_punctuation(character) or is_separator(character)
|
||||
) and self._buffer:
|
||||
self._word_count += 1
|
||||
buffer_length: int = len(self._buffer)
|
||||
|
||||
self._character_count += buffer_length
|
||||
|
||||
if buffer_length >= 4:
|
||||
if self._buffer_accent_count / buffer_length >= 0.5:
|
||||
self._is_current_word_bad = True
|
||||
# Word/Buffer ending with an upper case accentuated letter are so rare,
|
||||
# that we will consider them all as suspicious. Same weight as foreign_long suspicious.
|
||||
elif (
|
||||
is_accentuated(self._buffer[-1])
|
||||
and self._buffer[-1].isupper()
|
||||
and all(_.isupper() for _ in self._buffer) is False
|
||||
):
|
||||
self._foreign_long_count += 1
|
||||
self._is_current_word_bad = True
|
||||
elif self._buffer_glyph_count == 1:
|
||||
self._is_current_word_bad = True
|
||||
self._foreign_long_count += 1
|
||||
if buffer_length >= 24 and self._foreign_long_watch:
|
||||
camel_case_dst = [
|
||||
i
|
||||
for c, i in zip(self._buffer, range(0, buffer_length))
|
||||
if c.isupper()
|
||||
]
|
||||
probable_camel_cased: bool = False
|
||||
|
||||
if camel_case_dst and (len(camel_case_dst) / buffer_length <= 0.3):
|
||||
probable_camel_cased = True
|
||||
|
||||
if not probable_camel_cased:
|
||||
self._foreign_long_count += 1
|
||||
self._is_current_word_bad = True
|
||||
|
||||
if self._is_current_word_bad:
|
||||
self._bad_word_count += 1
|
||||
self._bad_character_count += len(self._buffer)
|
||||
self._is_current_word_bad = False
|
||||
|
||||
self._foreign_long_watch = False
|
||||
self._buffer = ""
|
||||
self._buffer_accent_count = 0
|
||||
self._buffer_glyph_count = 0
|
||||
elif (
|
||||
character not in {"<", ">", "-", "=", "~", "|", "_"}
|
||||
and character.isdigit() is False
|
||||
and is_symbol(character)
|
||||
):
|
||||
self._is_current_word_bad = True
|
||||
self._buffer += character
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._buffer = ""
|
||||
self._is_current_word_bad = False
|
||||
self._foreign_long_watch = False
|
||||
self._bad_word_count = 0
|
||||
self._word_count = 0
|
||||
self._character_count = 0
|
||||
self._bad_character_count = 0
|
||||
self._foreign_long_count = 0
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._word_count <= 10 and self._foreign_long_count == 0:
|
||||
return 0.0
|
||||
|
||||
return self._bad_character_count / self._character_count
|
||||
|
||||
|
||||
class CjkInvalidStopPlugin(MessDetectorPlugin):
|
||||
"""
|
||||
GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and
|
||||
can be easily detected. Searching for the overuse of '丅' and '丄'.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._wrong_stop_count: int = 0
|
||||
self._cjk_character_count: int = 0
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return True
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
if character in {"丅", "丄"}:
|
||||
self._wrong_stop_count += 1
|
||||
return
|
||||
if is_cjk(character):
|
||||
self._cjk_character_count += 1
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._wrong_stop_count = 0
|
||||
self._cjk_character_count = 0
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._cjk_character_count < 16:
|
||||
return 0.0
|
||||
return self._wrong_stop_count / self._cjk_character_count
|
||||
|
||||
|
||||
class ArchaicUpperLowerPlugin(MessDetectorPlugin):
|
||||
def __init__(self) -> None:
|
||||
self._buf: bool = False
|
||||
|
||||
self._character_count_since_last_sep: int = 0
|
||||
|
||||
self._successive_upper_lower_count: int = 0
|
||||
self._successive_upper_lower_count_final: int = 0
|
||||
|
||||
self._character_count: int = 0
|
||||
|
||||
self._last_alpha_seen: str | None = None
|
||||
self._current_ascii_only: bool = True
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return True
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
is_concerned = character.isalpha() and is_case_variable(character)
|
||||
chunk_sep = is_concerned is False
|
||||
|
||||
if chunk_sep and self._character_count_since_last_sep > 0:
|
||||
if (
|
||||
self._character_count_since_last_sep <= 64
|
||||
and character.isdigit() is False
|
||||
and self._current_ascii_only is False
|
||||
):
|
||||
self._successive_upper_lower_count_final += (
|
||||
self._successive_upper_lower_count
|
||||
)
|
||||
|
||||
self._successive_upper_lower_count = 0
|
||||
self._character_count_since_last_sep = 0
|
||||
self._last_alpha_seen = None
|
||||
self._buf = False
|
||||
self._character_count += 1
|
||||
self._current_ascii_only = True
|
||||
|
||||
return
|
||||
|
||||
if self._current_ascii_only is True and character.isascii() is False:
|
||||
self._current_ascii_only = False
|
||||
|
||||
if self._last_alpha_seen is not None:
|
||||
if (character.isupper() and self._last_alpha_seen.islower()) or (
|
||||
character.islower() and self._last_alpha_seen.isupper()
|
||||
):
|
||||
if self._buf is True:
|
||||
self._successive_upper_lower_count += 2
|
||||
self._buf = False
|
||||
else:
|
||||
self._buf = True
|
||||
else:
|
||||
self._buf = False
|
||||
|
||||
self._character_count += 1
|
||||
self._character_count_since_last_sep += 1
|
||||
self._last_alpha_seen = character
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._character_count = 0
|
||||
self._character_count_since_last_sep = 0
|
||||
self._successive_upper_lower_count = 0
|
||||
self._successive_upper_lower_count_final = 0
|
||||
self._last_alpha_seen = None
|
||||
self._buf = False
|
||||
self._current_ascii_only = True
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count == 0:
|
||||
return 0.0
|
||||
|
||||
return self._successive_upper_lower_count_final / self._character_count
|
||||
|
||||
|
||||
class ArabicIsolatedFormPlugin(MessDetectorPlugin):
|
||||
def __init__(self) -> None:
|
||||
self._character_count: int = 0
|
||||
self._isolated_form_count: int = 0
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._character_count = 0
|
||||
self._isolated_form_count = 0
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return is_arabic(character)
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
self._character_count += 1
|
||||
|
||||
if is_arabic_isolated_form(character):
|
||||
self._isolated_form_count += 1
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count < 8:
|
||||
return 0.0
|
||||
|
||||
isolated_form_usage: float = self._isolated_form_count / self._character_count
|
||||
|
||||
return isolated_form_usage
|
||||
|
||||
|
||||
@lru_cache(maxsize=1024)
|
||||
def is_suspiciously_successive_range(
|
||||
unicode_range_a: str | None, unicode_range_b: str | None
|
||||
) -> bool:
|
||||
"""
|
||||
Determine if two Unicode range seen next to each other can be considered as suspicious.
|
||||
"""
|
||||
if unicode_range_a is None or unicode_range_b is None:
|
||||
return True
|
||||
|
||||
if unicode_range_a == unicode_range_b:
|
||||
return False
|
||||
|
||||
if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
|
||||
return False
|
||||
|
||||
if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
|
||||
return False
|
||||
|
||||
# Latin characters can be accompanied with a combining diacritical mark
|
||||
# eg. Vietnamese.
|
||||
if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and (
|
||||
"Combining" in unicode_range_a or "Combining" in unicode_range_b
|
||||
):
|
||||
return False
|
||||
|
||||
keywords_range_a, keywords_range_b = (
|
||||
unicode_range_a.split(" "),
|
||||
unicode_range_b.split(" "),
|
||||
)
|
||||
|
||||
for el in keywords_range_a:
|
||||
if el in UNICODE_SECONDARY_RANGE_KEYWORD:
|
||||
continue
|
||||
if el in keywords_range_b:
|
||||
return False
|
||||
|
||||
# Japanese Exception
|
||||
range_a_jp_chars, range_b_jp_chars = (
|
||||
unicode_range_a
|
||||
in (
|
||||
"Hiragana",
|
||||
"Katakana",
|
||||
),
|
||||
unicode_range_b in ("Hiragana", "Katakana"),
|
||||
)
|
||||
if (range_a_jp_chars or range_b_jp_chars) and (
|
||||
"CJK" in unicode_range_a or "CJK" in unicode_range_b
|
||||
):
|
||||
return False
|
||||
if range_a_jp_chars and range_b_jp_chars:
|
||||
return False
|
||||
|
||||
if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
|
||||
if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
|
||||
return False
|
||||
if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
|
||||
return False
|
||||
|
||||
# Chinese/Japanese use dedicated range for punctuation and/or separators.
|
||||
if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or (
|
||||
unicode_range_a in ["Katakana", "Hiragana"]
|
||||
and unicode_range_b in ["Katakana", "Hiragana"]
|
||||
):
|
||||
if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b:
|
||||
return False
|
||||
if "Forms" in unicode_range_a or "Forms" in unicode_range_b:
|
||||
return False
|
||||
if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@lru_cache(maxsize=2048)
|
||||
def mess_ratio(
|
||||
decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False
|
||||
) -> float:
|
||||
"""
|
||||
Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
|
||||
"""
|
||||
|
||||
detectors: list[MessDetectorPlugin] = [
|
||||
md_class() for md_class in MessDetectorPlugin.__subclasses__()
|
||||
]
|
||||
|
||||
length: int = len(decoded_sequence) + 1
|
||||
|
||||
mean_mess_ratio: float = 0.0
|
||||
|
||||
if length < 512:
|
||||
intermediary_mean_mess_ratio_calc: int = 32
|
||||
elif length <= 1024:
|
||||
intermediary_mean_mess_ratio_calc = 64
|
||||
else:
|
||||
intermediary_mean_mess_ratio_calc = 128
|
||||
|
||||
for character, index in zip(decoded_sequence + "\n", range(length)):
|
||||
for detector in detectors:
|
||||
if detector.eligible(character):
|
||||
detector.feed(character)
|
||||
|
||||
if (
|
||||
index > 0 and index % intermediary_mean_mess_ratio_calc == 0
|
||||
) or index == length - 1:
|
||||
mean_mess_ratio = sum(dt.ratio for dt in detectors)
|
||||
|
||||
if mean_mess_ratio >= maximum_threshold:
|
||||
break
|
||||
|
||||
if debug:
|
||||
logger = getLogger("charset_normalizer")
|
||||
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Mess-detector extended-analysis start. "
|
||||
f"intermediary_mean_mess_ratio_calc={intermediary_mean_mess_ratio_calc} mean_mess_ratio={mean_mess_ratio} "
|
||||
f"maximum_threshold={maximum_threshold}",
|
||||
)
|
||||
|
||||
if len(decoded_sequence) > 16:
|
||||
logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}")
|
||||
logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}")
|
||||
|
||||
for dt in detectors:
|
||||
logger.log(TRACE, f"{dt.__class__}: {dt.ratio}")
|
||||
|
||||
return round(mean_mess_ratio, 3)
|
||||
Binary file not shown.
360
llmlab/lib/python3.12/site-packages/charset_normalizer/models.py
Normal file
360
llmlab/lib/python3.12/site-packages/charset_normalizer/models.py
Normal file
@ -0,0 +1,360 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from encodings.aliases import aliases
|
||||
from hashlib import sha256
|
||||
from json import dumps
|
||||
from re import sub
|
||||
from typing import Any, Iterator, List, Tuple
|
||||
|
||||
from .constant import RE_POSSIBLE_ENCODING_INDICATION, TOO_BIG_SEQUENCE
|
||||
from .utils import iana_name, is_multi_byte_encoding, unicode_range
|
||||
|
||||
|
||||
class CharsetMatch:
|
||||
def __init__(
|
||||
self,
|
||||
payload: bytes,
|
||||
guessed_encoding: str,
|
||||
mean_mess_ratio: float,
|
||||
has_sig_or_bom: bool,
|
||||
languages: CoherenceMatches,
|
||||
decoded_payload: str | None = None,
|
||||
preemptive_declaration: str | None = None,
|
||||
):
|
||||
self._payload: bytes = payload
|
||||
|
||||
self._encoding: str = guessed_encoding
|
||||
self._mean_mess_ratio: float = mean_mess_ratio
|
||||
self._languages: CoherenceMatches = languages
|
||||
self._has_sig_or_bom: bool = has_sig_or_bom
|
||||
self._unicode_ranges: list[str] | None = None
|
||||
|
||||
self._leaves: list[CharsetMatch] = []
|
||||
self._mean_coherence_ratio: float = 0.0
|
||||
|
||||
self._output_payload: bytes | None = None
|
||||
self._output_encoding: str | None = None
|
||||
|
||||
self._string: str | None = decoded_payload
|
||||
|
||||
self._preemptive_declaration: str | None = preemptive_declaration
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, CharsetMatch):
|
||||
if isinstance(other, str):
|
||||
return iana_name(other) == self.encoding
|
||||
return False
|
||||
return self.encoding == other.encoding and self.fingerprint == other.fingerprint
|
||||
|
||||
def __lt__(self, other: object) -> bool:
|
||||
"""
|
||||
Implemented to make sorted available upon CharsetMatches items.
|
||||
"""
|
||||
if not isinstance(other, CharsetMatch):
|
||||
raise ValueError
|
||||
|
||||
chaos_difference: float = abs(self.chaos - other.chaos)
|
||||
coherence_difference: float = abs(self.coherence - other.coherence)
|
||||
|
||||
# Below 1% difference --> Use Coherence
|
||||
if chaos_difference < 0.01 and coherence_difference > 0.02:
|
||||
return self.coherence > other.coherence
|
||||
elif chaos_difference < 0.01 and coherence_difference <= 0.02:
|
||||
# When having a difficult decision, use the result that decoded as many multi-byte as possible.
|
||||
# preserve RAM usage!
|
||||
if len(self._payload) >= TOO_BIG_SEQUENCE:
|
||||
return self.chaos < other.chaos
|
||||
return self.multi_byte_usage > other.multi_byte_usage
|
||||
|
||||
return self.chaos < other.chaos
|
||||
|
||||
@property
|
||||
def multi_byte_usage(self) -> float:
|
||||
return 1.0 - (len(str(self)) / len(self.raw))
|
||||
|
||||
def __str__(self) -> str:
|
||||
# Lazy Str Loading
|
||||
if self._string is None:
|
||||
self._string = str(self._payload, self._encoding, "strict")
|
||||
return self._string
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<CharsetMatch '{self.encoding}' bytes({self.fingerprint})>"
|
||||
|
||||
def add_submatch(self, other: CharsetMatch) -> None:
|
||||
if not isinstance(other, CharsetMatch) or other == self:
|
||||
raise ValueError(
|
||||
"Unable to add instance <{}> as a submatch of a CharsetMatch".format(
|
||||
other.__class__
|
||||
)
|
||||
)
|
||||
|
||||
other._string = None # Unload RAM usage; dirty trick.
|
||||
self._leaves.append(other)
|
||||
|
||||
@property
|
||||
def encoding(self) -> str:
|
||||
return self._encoding
|
||||
|
||||
@property
|
||||
def encoding_aliases(self) -> list[str]:
|
||||
"""
|
||||
Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
|
||||
"""
|
||||
also_known_as: list[str] = []
|
||||
for u, p in aliases.items():
|
||||
if self.encoding == u:
|
||||
also_known_as.append(p)
|
||||
elif self.encoding == p:
|
||||
also_known_as.append(u)
|
||||
return also_known_as
|
||||
|
||||
@property
|
||||
def bom(self) -> bool:
|
||||
return self._has_sig_or_bom
|
||||
|
||||
@property
|
||||
def byte_order_mark(self) -> bool:
|
||||
return self._has_sig_or_bom
|
||||
|
||||
@property
|
||||
def languages(self) -> list[str]:
|
||||
"""
|
||||
Return the complete list of possible languages found in decoded sequence.
|
||||
Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
|
||||
"""
|
||||
return [e[0] for e in self._languages]
|
||||
|
||||
@property
|
||||
def language(self) -> str:
|
||||
"""
|
||||
Most probable language found in decoded sequence. If none were detected or inferred, the property will return
|
||||
"Unknown".
|
||||
"""
|
||||
if not self._languages:
|
||||
# Trying to infer the language based on the given encoding
|
||||
# Its either English or we should not pronounce ourselves in certain cases.
|
||||
if "ascii" in self.could_be_from_charset:
|
||||
return "English"
|
||||
|
||||
# doing it there to avoid circular import
|
||||
from charset_normalizer.cd import encoding_languages, mb_encoding_languages
|
||||
|
||||
languages = (
|
||||
mb_encoding_languages(self.encoding)
|
||||
if is_multi_byte_encoding(self.encoding)
|
||||
else encoding_languages(self.encoding)
|
||||
)
|
||||
|
||||
if len(languages) == 0 or "Latin Based" in languages:
|
||||
return "Unknown"
|
||||
|
||||
return languages[0]
|
||||
|
||||
return self._languages[0][0]
|
||||
|
||||
@property
|
||||
def chaos(self) -> float:
|
||||
return self._mean_mess_ratio
|
||||
|
||||
@property
|
||||
def coherence(self) -> float:
|
||||
if not self._languages:
|
||||
return 0.0
|
||||
return self._languages[0][1]
|
||||
|
||||
@property
|
||||
def percent_chaos(self) -> float:
|
||||
return round(self.chaos * 100, ndigits=3)
|
||||
|
||||
@property
|
||||
def percent_coherence(self) -> float:
|
||||
return round(self.coherence * 100, ndigits=3)
|
||||
|
||||
@property
|
||||
def raw(self) -> bytes:
|
||||
"""
|
||||
Original untouched bytes.
|
||||
"""
|
||||
return self._payload
|
||||
|
||||
@property
|
||||
def submatch(self) -> list[CharsetMatch]:
|
||||
return self._leaves
|
||||
|
||||
@property
|
||||
def has_submatch(self) -> bool:
|
||||
return len(self._leaves) > 0
|
||||
|
||||
@property
|
||||
def alphabets(self) -> list[str]:
|
||||
if self._unicode_ranges is not None:
|
||||
return self._unicode_ranges
|
||||
# list detected ranges
|
||||
detected_ranges: list[str | None] = [unicode_range(char) for char in str(self)]
|
||||
# filter and sort
|
||||
self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
|
||||
return self._unicode_ranges
|
||||
|
||||
@property
|
||||
def could_be_from_charset(self) -> list[str]:
|
||||
"""
|
||||
The complete list of encoding that output the exact SAME str result and therefore could be the originating
|
||||
encoding.
|
||||
This list does include the encoding available in property 'encoding'.
|
||||
"""
|
||||
return [self._encoding] + [m.encoding for m in self._leaves]
|
||||
|
||||
def output(self, encoding: str = "utf_8") -> bytes:
|
||||
"""
|
||||
Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
|
||||
Any errors will be simply ignored by the encoder NOT replaced.
|
||||
"""
|
||||
if self._output_encoding is None or self._output_encoding != encoding:
|
||||
self._output_encoding = encoding
|
||||
decoded_string = str(self)
|
||||
if (
|
||||
self._preemptive_declaration is not None
|
||||
and self._preemptive_declaration.lower()
|
||||
not in ["utf-8", "utf8", "utf_8"]
|
||||
):
|
||||
patched_header = sub(
|
||||
RE_POSSIBLE_ENCODING_INDICATION,
|
||||
lambda m: m.string[m.span()[0] : m.span()[1]].replace(
|
||||
m.groups()[0],
|
||||
iana_name(self._output_encoding).replace("_", "-"), # type: ignore[arg-type]
|
||||
),
|
||||
decoded_string[:8192],
|
||||
count=1,
|
||||
)
|
||||
|
||||
decoded_string = patched_header + decoded_string[8192:]
|
||||
|
||||
self._output_payload = decoded_string.encode(encoding, "replace")
|
||||
|
||||
return self._output_payload # type: ignore
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> str:
|
||||
"""
|
||||
Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
|
||||
"""
|
||||
return sha256(self.output()).hexdigest()
|
||||
|
||||
|
||||
class CharsetMatches:
|
||||
"""
|
||||
Container with every CharsetMatch items ordered by default from most probable to the less one.
|
||||
Act like a list(iterable) but does not implements all related methods.
|
||||
"""
|
||||
|
||||
def __init__(self, results: list[CharsetMatch] | None = None):
|
||||
self._results: list[CharsetMatch] = sorted(results) if results else []
|
||||
|
||||
def __iter__(self) -> Iterator[CharsetMatch]:
|
||||
yield from self._results
|
||||
|
||||
def __getitem__(self, item: int | str) -> CharsetMatch:
|
||||
"""
|
||||
Retrieve a single item either by its position or encoding name (alias may be used here).
|
||||
Raise KeyError upon invalid index or encoding not present in results.
|
||||
"""
|
||||
if isinstance(item, int):
|
||||
return self._results[item]
|
||||
if isinstance(item, str):
|
||||
item = iana_name(item, False)
|
||||
for result in self._results:
|
||||
if item in result.could_be_from_charset:
|
||||
return result
|
||||
raise KeyError
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._results)
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return len(self._results) > 0
|
||||
|
||||
def append(self, item: CharsetMatch) -> None:
|
||||
"""
|
||||
Insert a single match. Will be inserted accordingly to preserve sort.
|
||||
Can be inserted as a submatch.
|
||||
"""
|
||||
if not isinstance(item, CharsetMatch):
|
||||
raise ValueError(
|
||||
"Cannot append instance '{}' to CharsetMatches".format(
|
||||
str(item.__class__)
|
||||
)
|
||||
)
|
||||
# We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
|
||||
if len(item.raw) < TOO_BIG_SEQUENCE:
|
||||
for match in self._results:
|
||||
if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
|
||||
match.add_submatch(item)
|
||||
return
|
||||
self._results.append(item)
|
||||
self._results = sorted(self._results)
|
||||
|
||||
def best(self) -> CharsetMatch | None:
|
||||
"""
|
||||
Simply return the first match. Strict equivalent to matches[0].
|
||||
"""
|
||||
if not self._results:
|
||||
return None
|
||||
return self._results[0]
|
||||
|
||||
def first(self) -> CharsetMatch | None:
|
||||
"""
|
||||
Redundant method, call the method best(). Kept for BC reasons.
|
||||
"""
|
||||
return self.best()
|
||||
|
||||
|
||||
CoherenceMatch = Tuple[str, float]
|
||||
CoherenceMatches = List[CoherenceMatch]
|
||||
|
||||
|
||||
class CliDetectionResult:
|
||||
def __init__(
|
||||
self,
|
||||
path: str,
|
||||
encoding: str | None,
|
||||
encoding_aliases: list[str],
|
||||
alternative_encodings: list[str],
|
||||
language: str,
|
||||
alphabets: list[str],
|
||||
has_sig_or_bom: bool,
|
||||
chaos: float,
|
||||
coherence: float,
|
||||
unicode_path: str | None,
|
||||
is_preferred: bool,
|
||||
):
|
||||
self.path: str = path
|
||||
self.unicode_path: str | None = unicode_path
|
||||
self.encoding: str | None = encoding
|
||||
self.encoding_aliases: list[str] = encoding_aliases
|
||||
self.alternative_encodings: list[str] = alternative_encodings
|
||||
self.language: str = language
|
||||
self.alphabets: list[str] = alphabets
|
||||
self.has_sig_or_bom: bool = has_sig_or_bom
|
||||
self.chaos: float = chaos
|
||||
self.coherence: float = coherence
|
||||
self.is_preferred: bool = is_preferred
|
||||
|
||||
@property
|
||||
def __dict__(self) -> dict[str, Any]: # type: ignore
|
||||
return {
|
||||
"path": self.path,
|
||||
"encoding": self.encoding,
|
||||
"encoding_aliases": self.encoding_aliases,
|
||||
"alternative_encodings": self.alternative_encodings,
|
||||
"language": self.language,
|
||||
"alphabets": self.alphabets,
|
||||
"has_sig_or_bom": self.has_sig_or_bom,
|
||||
"chaos": self.chaos,
|
||||
"coherence": self.coherence,
|
||||
"unicode_path": self.unicode_path,
|
||||
"is_preferred": self.is_preferred,
|
||||
}
|
||||
|
||||
def to_json(self) -> str:
|
||||
return dumps(self.__dict__, ensure_ascii=True, indent=4)
|
||||
408
llmlab/lib/python3.12/site-packages/charset_normalizer/utils.py
Normal file
408
llmlab/lib/python3.12/site-packages/charset_normalizer/utils.py
Normal file
@ -0,0 +1,408 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
import unicodedata
|
||||
from codecs import IncrementalDecoder
|
||||
from encodings.aliases import aliases
|
||||
from functools import lru_cache
|
||||
from re import findall
|
||||
from typing import Generator
|
||||
|
||||
from _multibytecodec import ( # type: ignore[import-not-found,import]
|
||||
MultibyteIncrementalDecoder,
|
||||
)
|
||||
|
||||
from .constant import (
|
||||
ENCODING_MARKS,
|
||||
IANA_SUPPORTED_SIMILAR,
|
||||
RE_POSSIBLE_ENCODING_INDICATION,
|
||||
UNICODE_RANGES_COMBINED,
|
||||
UNICODE_SECONDARY_RANGE_KEYWORD,
|
||||
UTF8_MAXIMAL_ALLOCATION,
|
||||
)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_accentuated(character: str) -> bool:
|
||||
try:
|
||||
description: str = unicodedata.name(character)
|
||||
except ValueError: # Defensive: unicode database outdated?
|
||||
return False
|
||||
return (
|
||||
"WITH GRAVE" in description
|
||||
or "WITH ACUTE" in description
|
||||
or "WITH CEDILLA" in description
|
||||
or "WITH DIAERESIS" in description
|
||||
or "WITH CIRCUMFLEX" in description
|
||||
or "WITH TILDE" in description
|
||||
or "WITH MACRON" in description
|
||||
or "WITH RING ABOVE" in description
|
||||
)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def remove_accent(character: str) -> str:
|
||||
decomposed: str = unicodedata.decomposition(character)
|
||||
if not decomposed:
|
||||
return character
|
||||
|
||||
codes: list[str] = decomposed.split(" ")
|
||||
|
||||
return chr(int(codes[0], 16))
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def unicode_range(character: str) -> str | None:
|
||||
"""
|
||||
Retrieve the Unicode range official name from a single character.
|
||||
"""
|
||||
character_ord: int = ord(character)
|
||||
|
||||
for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
|
||||
if character_ord in ord_range:
|
||||
return range_name
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_latin(character: str) -> bool:
|
||||
try:
|
||||
description: str = unicodedata.name(character)
|
||||
except ValueError: # Defensive: unicode database outdated?
|
||||
return False
|
||||
return "LATIN" in description
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_punctuation(character: str) -> bool:
|
||||
character_category: str = unicodedata.category(character)
|
||||
|
||||
if "P" in character_category:
|
||||
return True
|
||||
|
||||
character_range: str | None = unicode_range(character)
|
||||
|
||||
if character_range is None:
|
||||
return False
|
||||
|
||||
return "Punctuation" in character_range
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_symbol(character: str) -> bool:
|
||||
character_category: str = unicodedata.category(character)
|
||||
|
||||
if "S" in character_category or "N" in character_category:
|
||||
return True
|
||||
|
||||
character_range: str | None = unicode_range(character)
|
||||
|
||||
if character_range is None:
|
||||
return False
|
||||
|
||||
return "Forms" in character_range and character_category != "Lo"
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_emoticon(character: str) -> bool:
|
||||
character_range: str | None = unicode_range(character)
|
||||
|
||||
if character_range is None:
|
||||
return False
|
||||
|
||||
return "Emoticons" in character_range or "Pictographs" in character_range
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_separator(character: str) -> bool:
|
||||
if character.isspace() or character in {"|", "+", "<", ">"}:
|
||||
return True
|
||||
|
||||
character_category: str = unicodedata.category(character)
|
||||
|
||||
return "Z" in character_category or character_category in {"Po", "Pd", "Pc"}
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_case_variable(character: str) -> bool:
|
||||
return character.islower() != character.isupper()
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_cjk(character: str) -> bool:
|
||||
try:
|
||||
character_name = unicodedata.name(character)
|
||||
except ValueError: # Defensive: unicode database outdated?
|
||||
return False
|
||||
|
||||
return "CJK" in character_name
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_hiragana(character: str) -> bool:
|
||||
try:
|
||||
character_name = unicodedata.name(character)
|
||||
except ValueError: # Defensive: unicode database outdated?
|
||||
return False
|
||||
|
||||
return "HIRAGANA" in character_name
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_katakana(character: str) -> bool:
|
||||
try:
|
||||
character_name = unicodedata.name(character)
|
||||
except ValueError: # Defensive: unicode database outdated?
|
||||
return False
|
||||
|
||||
return "KATAKANA" in character_name
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_hangul(character: str) -> bool:
|
||||
try:
|
||||
character_name = unicodedata.name(character)
|
||||
except ValueError: # Defensive: unicode database outdated?
|
||||
return False
|
||||
|
||||
return "HANGUL" in character_name
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_thai(character: str) -> bool:
|
||||
try:
|
||||
character_name = unicodedata.name(character)
|
||||
except ValueError: # Defensive: unicode database outdated?
|
||||
return False
|
||||
|
||||
return "THAI" in character_name
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_arabic(character: str) -> bool:
|
||||
try:
|
||||
character_name = unicodedata.name(character)
|
||||
except ValueError: # Defensive: unicode database outdated?
|
||||
return False
|
||||
|
||||
return "ARABIC" in character_name
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_arabic_isolated_form(character: str) -> bool:
|
||||
try:
|
||||
character_name = unicodedata.name(character)
|
||||
except ValueError: # Defensive: unicode database outdated?
|
||||
return False
|
||||
|
||||
return "ARABIC" in character_name and "ISOLATED FORM" in character_name
|
||||
|
||||
|
||||
@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
|
||||
def is_unicode_range_secondary(range_name: str) -> bool:
|
||||
return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_unprintable(character: str) -> bool:
|
||||
return (
|
||||
character.isspace() is False # includes \n \t \r \v
|
||||
and character.isprintable() is False
|
||||
and character != "\x1a" # Why? Its the ASCII substitute character.
|
||||
and character != "\ufeff" # bug discovered in Python,
|
||||
# Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
|
||||
)
|
||||
|
||||
|
||||
def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> str | None:
|
||||
"""
|
||||
Extract using ASCII-only decoder any specified encoding in the first n-bytes.
|
||||
"""
|
||||
if not isinstance(sequence, bytes):
|
||||
raise TypeError
|
||||
|
||||
seq_len: int = len(sequence)
|
||||
|
||||
results: list[str] = findall(
|
||||
RE_POSSIBLE_ENCODING_INDICATION,
|
||||
sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
|
||||
)
|
||||
|
||||
if len(results) == 0:
|
||||
return None
|
||||
|
||||
for specified_encoding in results:
|
||||
specified_encoding = specified_encoding.lower().replace("-", "_")
|
||||
|
||||
encoding_alias: str
|
||||
encoding_iana: str
|
||||
|
||||
for encoding_alias, encoding_iana in aliases.items():
|
||||
if encoding_alias == specified_encoding:
|
||||
return encoding_iana
|
||||
if encoding_iana == specified_encoding:
|
||||
return encoding_iana
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
def is_multi_byte_encoding(name: str) -> bool:
|
||||
"""
|
||||
Verify is a specific encoding is a multi byte one based on it IANA name
|
||||
"""
|
||||
return name in {
|
||||
"utf_8",
|
||||
"utf_8_sig",
|
||||
"utf_16",
|
||||
"utf_16_be",
|
||||
"utf_16_le",
|
||||
"utf_32",
|
||||
"utf_32_le",
|
||||
"utf_32_be",
|
||||
"utf_7",
|
||||
} or issubclass(
|
||||
importlib.import_module(f"encodings.{name}").IncrementalDecoder,
|
||||
MultibyteIncrementalDecoder,
|
||||
)
|
||||
|
||||
|
||||
def identify_sig_or_bom(sequence: bytes) -> tuple[str | None, bytes]:
|
||||
"""
|
||||
Identify and extract SIG/BOM in given sequence.
|
||||
"""
|
||||
|
||||
for iana_encoding in ENCODING_MARKS:
|
||||
marks: bytes | list[bytes] = ENCODING_MARKS[iana_encoding]
|
||||
|
||||
if isinstance(marks, bytes):
|
||||
marks = [marks]
|
||||
|
||||
for mark in marks:
|
||||
if sequence.startswith(mark):
|
||||
return iana_encoding, mark
|
||||
|
||||
return None, b""
|
||||
|
||||
|
||||
def should_strip_sig_or_bom(iana_encoding: str) -> bool:
|
||||
return iana_encoding not in {"utf_16", "utf_32"}
|
||||
|
||||
|
||||
def iana_name(cp_name: str, strict: bool = True) -> str:
|
||||
"""Returns the Python normalized encoding name (Not the IANA official name)."""
|
||||
cp_name = cp_name.lower().replace("-", "_")
|
||||
|
||||
encoding_alias: str
|
||||
encoding_iana: str
|
||||
|
||||
for encoding_alias, encoding_iana in aliases.items():
|
||||
if cp_name in [encoding_alias, encoding_iana]:
|
||||
return encoding_iana
|
||||
|
||||
if strict:
|
||||
raise ValueError(f"Unable to retrieve IANA for '{cp_name}'")
|
||||
|
||||
return cp_name
|
||||
|
||||
|
||||
def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
|
||||
if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
|
||||
return 0.0
|
||||
|
||||
decoder_a = importlib.import_module(f"encodings.{iana_name_a}").IncrementalDecoder
|
||||
decoder_b = importlib.import_module(f"encodings.{iana_name_b}").IncrementalDecoder
|
||||
|
||||
id_a: IncrementalDecoder = decoder_a(errors="ignore")
|
||||
id_b: IncrementalDecoder = decoder_b(errors="ignore")
|
||||
|
||||
character_match_count: int = 0
|
||||
|
||||
for i in range(255):
|
||||
to_be_decoded: bytes = bytes([i])
|
||||
if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
|
||||
character_match_count += 1
|
||||
|
||||
return character_match_count / 254
|
||||
|
||||
|
||||
def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
|
||||
"""
|
||||
Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
|
||||
the function cp_similarity.
|
||||
"""
|
||||
return (
|
||||
iana_name_a in IANA_SUPPORTED_SIMILAR
|
||||
and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
|
||||
)
|
||||
|
||||
|
||||
def set_logging_handler(
|
||||
name: str = "charset_normalizer",
|
||||
level: int = logging.INFO,
|
||||
format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
|
||||
) -> None:
|
||||
logger = logging.getLogger(name)
|
||||
logger.setLevel(level)
|
||||
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(logging.Formatter(format_string))
|
||||
logger.addHandler(handler)
|
||||
|
||||
|
||||
def cut_sequence_chunks(
|
||||
sequences: bytes,
|
||||
encoding_iana: str,
|
||||
offsets: range,
|
||||
chunk_size: int,
|
||||
bom_or_sig_available: bool,
|
||||
strip_sig_or_bom: bool,
|
||||
sig_payload: bytes,
|
||||
is_multi_byte_decoder: bool,
|
||||
decoded_payload: str | None = None,
|
||||
) -> Generator[str, None, None]:
|
||||
if decoded_payload and is_multi_byte_decoder is False:
|
||||
for i in offsets:
|
||||
chunk = decoded_payload[i : i + chunk_size]
|
||||
if not chunk:
|
||||
break
|
||||
yield chunk
|
||||
else:
|
||||
for i in offsets:
|
||||
chunk_end = i + chunk_size
|
||||
if chunk_end > len(sequences) + 8:
|
||||
continue
|
||||
|
||||
cut_sequence = sequences[i : i + chunk_size]
|
||||
|
||||
if bom_or_sig_available and strip_sig_or_bom is False:
|
||||
cut_sequence = sig_payload + cut_sequence
|
||||
|
||||
chunk = cut_sequence.decode(
|
||||
encoding_iana,
|
||||
errors="ignore" if is_multi_byte_decoder else "strict",
|
||||
)
|
||||
|
||||
# multi-byte bad cutting detector and adjustment
|
||||
# not the cleanest way to perform that fix but clever enough for now.
|
||||
if is_multi_byte_decoder and i > 0:
|
||||
chunk_partial_size_chk: int = min(chunk_size, 16)
|
||||
|
||||
if (
|
||||
decoded_payload
|
||||
and chunk[:chunk_partial_size_chk] not in decoded_payload
|
||||
):
|
||||
for j in range(i, i - 4, -1):
|
||||
cut_sequence = sequences[j:chunk_end]
|
||||
|
||||
if bom_or_sig_available and strip_sig_or_bom is False:
|
||||
cut_sequence = sig_payload + cut_sequence
|
||||
|
||||
chunk = cut_sequence.decode(encoding_iana, errors="ignore")
|
||||
|
||||
if chunk[:chunk_partial_size_chk] in decoded_payload:
|
||||
break
|
||||
|
||||
yield chunk
|
||||
@ -0,0 +1,8 @@
|
||||
"""
|
||||
Expose version
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
__version__ = "3.4.1"
|
||||
VERSION = __version__.split(".")
|
||||
@ -0,0 +1 @@
|
||||
import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'local') == 'local'; enabled and __import__('_distutils_hack').add_shim();
|
||||
@ -0,0 +1 @@
|
||||
pip
|
||||
@ -0,0 +1,31 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2013-2024, Kim Davies and contributors.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
250
llmlab/lib/python3.12/site-packages/idna-3.10.dist-info/METADATA
Normal file
250
llmlab/lib/python3.12/site-packages/idna-3.10.dist-info/METADATA
Normal file
@ -0,0 +1,250 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: idna
|
||||
Version: 3.10
|
||||
Summary: Internationalized Domain Names in Applications (IDNA)
|
||||
Author-email: Kim Davies <kim+pypi@gumleaf.org>
|
||||
Requires-Python: >=3.6
|
||||
Description-Content-Type: text/x-rst
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Intended Audience :: System Administrators
|
||||
Classifier: License :: OSI Approved :: BSD License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Programming Language :: Python :: 3.13
|
||||
Classifier: Programming Language :: Python :: Implementation :: CPython
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Classifier: Topic :: Internet :: Name Service (DNS)
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Topic :: Utilities
|
||||
Requires-Dist: ruff >= 0.6.2 ; extra == "all"
|
||||
Requires-Dist: mypy >= 1.11.2 ; extra == "all"
|
||||
Requires-Dist: pytest >= 8.3.2 ; extra == "all"
|
||||
Requires-Dist: flake8 >= 7.1.1 ; extra == "all"
|
||||
Project-URL: Changelog, https://github.com/kjd/idna/blob/master/HISTORY.rst
|
||||
Project-URL: Issue tracker, https://github.com/kjd/idna/issues
|
||||
Project-URL: Source, https://github.com/kjd/idna
|
||||
Provides-Extra: all
|
||||
|
||||
Internationalized Domain Names in Applications (IDNA)
|
||||
=====================================================
|
||||
|
||||
Support for the Internationalized Domain Names in
|
||||
Applications (IDNA) protocol as specified in `RFC 5891
|
||||
<https://tools.ietf.org/html/rfc5891>`_. This is the latest version of
|
||||
the protocol and is sometimes referred to as “IDNA 2008”.
|
||||
|
||||
This library also provides support for Unicode Technical
|
||||
Standard 46, `Unicode IDNA Compatibility Processing
|
||||
<https://unicode.org/reports/tr46/>`_.
|
||||
|
||||
This acts as a suitable replacement for the “encodings.idna”
|
||||
module that comes with the Python standard library, but which
|
||||
only supports the older superseded IDNA specification (`RFC 3490
|
||||
<https://tools.ietf.org/html/rfc3490>`_).
|
||||
|
||||
Basic functions are simply executed:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> import idna
|
||||
>>> idna.encode('ドメイン.テスト')
|
||||
b'xn--eckwd4c7c.xn--zckzah'
|
||||
>>> print(idna.decode('xn--eckwd4c7c.xn--zckzah'))
|
||||
ドメイン.テスト
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
This package is available for installation from PyPI:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ python3 -m pip install idna
|
||||
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
For typical usage, the ``encode`` and ``decode`` functions will take a
|
||||
domain name argument and perform a conversion to A-labels or U-labels
|
||||
respectively.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> import idna
|
||||
>>> idna.encode('ドメイン.テスト')
|
||||
b'xn--eckwd4c7c.xn--zckzah'
|
||||
>>> print(idna.decode('xn--eckwd4c7c.xn--zckzah'))
|
||||
ドメイン.テスト
|
||||
|
||||
You may use the codec encoding and decoding methods using the
|
||||
``idna.codec`` module:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> import idna.codec
|
||||
>>> print('домен.испытание'.encode('idna2008'))
|
||||
b'xn--d1acufc.xn--80akhbyknj4f'
|
||||
>>> print(b'xn--d1acufc.xn--80akhbyknj4f'.decode('idna2008'))
|
||||
домен.испытание
|
||||
|
||||
Conversions can be applied at a per-label basis using the ``ulabel`` or
|
||||
``alabel`` functions if necessary:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> idna.alabel('测试')
|
||||
b'xn--0zwm56d'
|
||||
|
||||
Compatibility Mapping (UTS #46)
|
||||
+++++++++++++++++++++++++++++++
|
||||
|
||||
As described in `RFC 5895 <https://tools.ietf.org/html/rfc5895>`_, the
|
||||
IDNA specification does not normalize input from different potential
|
||||
ways a user may input a domain name. This functionality, known as
|
||||
a “mapping”, is considered by the specification to be a local
|
||||
user-interface issue distinct from IDNA conversion functionality.
|
||||
|
||||
This library provides one such mapping that was developed by the
|
||||
Unicode Consortium. Known as `Unicode IDNA Compatibility Processing
|
||||
<https://unicode.org/reports/tr46/>`_, it provides for both a regular
|
||||
mapping for typical applications, as well as a transitional mapping to
|
||||
help migrate from older IDNA 2003 applications. Strings are
|
||||
preprocessed according to Section 4.4 “Preprocessing for IDNA2008”
|
||||
prior to the IDNA operations.
|
||||
|
||||
For example, “Königsgäßchen” is not a permissible label as *LATIN
|
||||
CAPITAL LETTER K* is not allowed (nor are capital letters in general).
|
||||
UTS 46 will convert this into lower case prior to applying the IDNA
|
||||
conversion.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> import idna
|
||||
>>> idna.encode('Königsgäßchen')
|
||||
...
|
||||
idna.core.InvalidCodepoint: Codepoint U+004B at position 1 of 'Königsgäßchen' not allowed
|
||||
>>> idna.encode('Königsgäßchen', uts46=True)
|
||||
b'xn--knigsgchen-b4a3dun'
|
||||
>>> print(idna.decode('xn--knigsgchen-b4a3dun'))
|
||||
königsgäßchen
|
||||
|
||||
Transitional processing provides conversions to help transition from
|
||||
the older 2003 standard to the current standard. For example, in the
|
||||
original IDNA specification, the *LATIN SMALL LETTER SHARP S* (ß) was
|
||||
converted into two *LATIN SMALL LETTER S* (ss), whereas in the current
|
||||
IDNA specification this conversion is not performed.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> idna.encode('Königsgäßchen', uts46=True, transitional=True)
|
||||
'xn--knigsgsschen-lcb0w'
|
||||
|
||||
Implementers should use transitional processing with caution, only in
|
||||
rare cases where conversion from legacy labels to current labels must be
|
||||
performed (i.e. IDNA implementations that pre-date 2008). For typical
|
||||
applications that just need to convert labels, transitional processing
|
||||
is unlikely to be beneficial and could produce unexpected incompatible
|
||||
results.
|
||||
|
||||
``encodings.idna`` Compatibility
|
||||
++++++++++++++++++++++++++++++++
|
||||
|
||||
Function calls from the Python built-in ``encodings.idna`` module are
|
||||
mapped to their IDNA 2008 equivalents using the ``idna.compat`` module.
|
||||
Simply substitute the ``import`` clause in your code to refer to the new
|
||||
module name.
|
||||
|
||||
Exceptions
|
||||
----------
|
||||
|
||||
All errors raised during the conversion following the specification
|
||||
should raise an exception derived from the ``idna.IDNAError`` base
|
||||
class.
|
||||
|
||||
More specific exceptions that may be generated as ``idna.IDNABidiError``
|
||||
when the error reflects an illegal combination of left-to-right and
|
||||
right-to-left characters in a label; ``idna.InvalidCodepoint`` when
|
||||
a specific codepoint is an illegal character in an IDN label (i.e.
|
||||
INVALID); and ``idna.InvalidCodepointContext`` when the codepoint is
|
||||
illegal based on its positional context (i.e. it is CONTEXTO or CONTEXTJ
|
||||
but the contextual requirements are not satisfied.)
|
||||
|
||||
Building and Diagnostics
|
||||
------------------------
|
||||
|
||||
The IDNA and UTS 46 functionality relies upon pre-calculated lookup
|
||||
tables for performance. These tables are derived from computing against
|
||||
eligibility criteria in the respective standards. These tables are
|
||||
computed using the command-line script ``tools/idna-data``.
|
||||
|
||||
This tool will fetch relevant codepoint data from the Unicode repository
|
||||
and perform the required calculations to identify eligibility. There are
|
||||
three main modes:
|
||||
|
||||
* ``idna-data make-libdata``. Generates ``idnadata.py`` and
|
||||
``uts46data.py``, the pre-calculated lookup tables used for IDNA and
|
||||
UTS 46 conversions. Implementers who wish to track this library against
|
||||
a different Unicode version may use this tool to manually generate a
|
||||
different version of the ``idnadata.py`` and ``uts46data.py`` files.
|
||||
|
||||
* ``idna-data make-table``. Generate a table of the IDNA disposition
|
||||
(e.g. PVALID, CONTEXTJ, CONTEXTO) in the format found in Appendix
|
||||
B.1 of RFC 5892 and the pre-computed tables published by `IANA
|
||||
<https://www.iana.org/>`_.
|
||||
|
||||
* ``idna-data U+0061``. Prints debugging output on the various
|
||||
properties associated with an individual Unicode codepoint (in this
|
||||
case, U+0061), that are used to assess the IDNA and UTS 46 status of a
|
||||
codepoint. This is helpful in debugging or analysis.
|
||||
|
||||
The tool accepts a number of arguments, described using ``idna-data
|
||||
-h``. Most notably, the ``--version`` argument allows the specification
|
||||
of the version of Unicode to be used in computing the table data. For
|
||||
example, ``idna-data --version 9.0.0 make-libdata`` will generate
|
||||
library data against Unicode 9.0.0.
|
||||
|
||||
|
||||
Additional Notes
|
||||
----------------
|
||||
|
||||
* **Packages**. The latest tagged release version is published in the
|
||||
`Python Package Index <https://pypi.org/project/idna/>`_.
|
||||
|
||||
* **Version support**. This library supports Python 3.6 and higher.
|
||||
As this library serves as a low-level toolkit for a variety of
|
||||
applications, many of which strive for broad compatibility with older
|
||||
Python versions, there is no rush to remove older interpreter support.
|
||||
Removing support for older versions should be well justified in that the
|
||||
maintenance burden has become too high.
|
||||
|
||||
* **Python 2**. Python 2 is supported by version 2.x of this library.
|
||||
Use "idna<3" in your requirements file if you need this library for
|
||||
a Python 2 application. Be advised that these versions are no longer
|
||||
actively developed.
|
||||
|
||||
* **Testing**. The library has a test suite based on each rule of the
|
||||
IDNA specification, as well as tests that are provided as part of the
|
||||
Unicode Technical Standard 46, `Unicode IDNA Compatibility Processing
|
||||
<https://unicode.org/reports/tr46/>`_.
|
||||
|
||||
* **Emoji**. It is an occasional request to support emoji domains in
|
||||
this library. Encoding of symbols like emoji is expressly prohibited by
|
||||
the technical standard IDNA 2008 and emoji domains are broadly phased
|
||||
out across the domain industry due to associated security risks. For
|
||||
now, applications that need to support these non-compliant labels
|
||||
may wish to consider trying the encode/decode operation in this library
|
||||
first, and then falling back to using `encodings.idna`. See `the Github
|
||||
project <https://github.com/kjd/idna/issues/18>`_ for more discussion.
|
||||
|
||||
@ -0,0 +1,22 @@
|
||||
idna-3.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
idna-3.10.dist-info/LICENSE.md,sha256=pZ8LDvNjWHQQmkRhykT_enDVBpboFHZ7-vch1Mmw2w8,1541
|
||||
idna-3.10.dist-info/METADATA,sha256=URR5ZyDfQ1PCEGhkYoojqfi2Ra0tau2--lhwG4XSfjI,10158
|
||||
idna-3.10.dist-info/RECORD,,
|
||||
idna-3.10.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
|
||||
idna/__init__.py,sha256=MPqNDLZbXqGaNdXxAFhiqFPKEQXju2jNQhCey6-5eJM,868
|
||||
idna/__pycache__/__init__.cpython-312.pyc,,
|
||||
idna/__pycache__/codec.cpython-312.pyc,,
|
||||
idna/__pycache__/compat.cpython-312.pyc,,
|
||||
idna/__pycache__/core.cpython-312.pyc,,
|
||||
idna/__pycache__/idnadata.cpython-312.pyc,,
|
||||
idna/__pycache__/intranges.cpython-312.pyc,,
|
||||
idna/__pycache__/package_data.cpython-312.pyc,,
|
||||
idna/__pycache__/uts46data.cpython-312.pyc,,
|
||||
idna/codec.py,sha256=PEew3ItwzjW4hymbasnty2N2OXvNcgHB-JjrBuxHPYY,3422
|
||||
idna/compat.py,sha256=RzLy6QQCdl9784aFhb2EX9EKGCJjg0P3PilGdeXXcx8,316
|
||||
idna/core.py,sha256=YJYyAMnwiQEPjVC4-Fqu_p4CJ6yKKuDGmppBNQNQpFs,13239
|
||||
idna/idnadata.py,sha256=W30GcIGvtOWYwAjZj4ZjuouUutC6ffgNuyjJy7fZ-lo,78306
|
||||
idna/intranges.py,sha256=amUtkdhYcQG8Zr-CoMM_kVRacxkivC1WgxN1b63KKdU,1898
|
||||
idna/package_data.py,sha256=q59S3OXsc5VI8j6vSD0sGBMyk6zZ4vWFREE88yCJYKs,21
|
||||
idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
idna/uts46data.py,sha256=rt90K9J40gUSwppDPCrhjgi5AA6pWM65dEGRSf6rIhM,239289
|
||||
@ -0,0 +1,4 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: flit 3.9.0
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user