mirror of
https://github.com/Ladebeze66/projetcbaollm.git
synced 2025-12-15 19:56:54 +01:00
clean
This commit is contained in:
parent
7e8bfc3a07
commit
8e33327c10
0
.gitignore
vendored
Normal file
0
.gitignore
vendored
Normal file
BIN
__pycache__/chat_manager.cpython-312.pyc
Normal file
BIN
__pycache__/chat_manager.cpython-312.pyc
Normal file
Binary file not shown.
BIN
__pycache__/ollama_manager.cpython-312.pyc
Normal file
BIN
__pycache__/ollama_manager.cpython-312.pyc
Normal file
Binary file not shown.
BIN
__pycache__/text_cleaner.cpython-312.pyc
Normal file
BIN
__pycache__/text_cleaner.cpython-312.pyc
Normal file
Binary file not shown.
49
chat_manager.py
Normal file
49
chat_manager.py
Normal file
@ -0,0 +1,49 @@
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
# 📂 Dossier où seront stockées les conversations
|
||||
HISTORY_DIR = "conversations"
|
||||
os.makedirs(HISTORY_DIR, exist_ok=True)
|
||||
|
||||
def get_history_file(user):
|
||||
"""Renvoie le chemin du fichier JSON de l'utilisateur."""
|
||||
return os.path.join(HISTORY_DIR, f"{user}.json")
|
||||
|
||||
def save_conversation(user, user_prompt, bot_response):
|
||||
"""Sauvegarde une conversation utilisateur dans son fichier JSON."""
|
||||
history_file = get_history_file(user)
|
||||
|
||||
# 📅 Ajout de l'horodatage (date et heure actuelles)
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
# 📌 Charger l'historique existant (s'il existe)
|
||||
history = []
|
||||
if os.path.exists(history_file):
|
||||
with open(history_file, "r", encoding="utf-8") as f:
|
||||
try:
|
||||
history = json.load(f)
|
||||
except json.JSONDecodeError:
|
||||
pass # On garde une liste vide en cas d'erreur de lecture
|
||||
|
||||
# 📌 Ajouter le nouveau message avec horodatage
|
||||
history.append({
|
||||
"timestamp": timestamp,
|
||||
"user": user_prompt,
|
||||
"bot": bot_response
|
||||
})
|
||||
|
||||
# 📌 Sauvegarder l'historique mis à jour
|
||||
with open(history_file, "w", encoding="utf-8") as f:
|
||||
json.dump(history, f, indent=4, ensure_ascii=False)
|
||||
|
||||
def load_conversation(user):
|
||||
"""Charge l'historique des conversations d'un utilisateur."""
|
||||
history_file = get_history_file(user)
|
||||
if os.path.exists(history_file):
|
||||
with open(history_file, "r", encoding="utf-8") as f:
|
||||
try:
|
||||
return json.dumps(json.load(f), indent=4, ensure_ascii=False)
|
||||
except json.JSONDecodeError:
|
||||
return "❌ Erreur lors de la lecture de l'historique."
|
||||
return "⚠️ Aucun historique trouvé pour cet utilisateur."
|
||||
7
conversations/dudu.json
Normal file
7
conversations/dudu.json
Normal file
@ -0,0 +1,7 @@
|
||||
[
|
||||
{
|
||||
"timestamp": "2025-03-14 14:56:24",
|
||||
"user": "salut mistral",
|
||||
"bot": "Bonjour! Comment ça va? Je suis ici pour aider. Qu'est-ce que tu veux savoir aujourd'hui?"
|
||||
}
|
||||
]
|
||||
7
conversations/ferdinand.json
Normal file
7
conversations/ferdinand.json
Normal file
@ -0,0 +1,7 @@
|
||||
[
|
||||
{
|
||||
"timestamp": "2025-03-14 16:03:41",
|
||||
"user": "salut mistral",
|
||||
"bot": "Bonjour! Comment vas-tu ? Je suis prêt à te répondre en français si tu le souhaitais. Quelque chose me t'intéresse ou je peux te donner des informations sur la culture française, la gastronomie, la langue française ou tout autre sujet de ta préférence ?"
|
||||
}
|
||||
]
|
||||
@ -1,14 +1,7 @@
|
||||
[
|
||||
{
|
||||
"user": "coucou tu vas bien",
|
||||
"bot": "Ça va bien, et toi ?\n\n(French for: Hello, are you well?)"
|
||||
},
|
||||
{
|
||||
"user": "que veux tu",
|
||||
"bot": " Je ne suis pas capable de parler français. C'est mon programmeur qui m'a créé, et il a donné mes compétences en anglais. Si vous avez une question ou une demande en français, je suis certainement capable de répondre à certains types de questions en utilisant des phrases pré-apprises, mais la plupart du temps mon fonctionnement sera plus efficace si je suis utilisé dans un contexte où l'anglais est la langue de communication."
|
||||
},
|
||||
{
|
||||
"user": "connais tu jean dujardin?",
|
||||
"bot": " Oui, Jean Dujardin est un acteur français connu pour son rôle dans le film \"The Artist\" pour lequel il a remporté l'Oscar du meilleur acteur. Il a également travaillé sur de nombreux autres projets cinématographiques et télévisés."
|
||||
"timestamp": "2025-03-14 12:02:47",
|
||||
"user": "salut",
|
||||
"bot": "Ça va? Comment puis-je vous aider aujourd'hui?\n\nJe suis un assistant intelligent, que pouvez-vous me demander ou comment peux-je améliorer votre journée? J'ai également été conçu pour répondre à des questions sur une variété de sujets, allant du calcul et des informations météorologiques au jeu et à l'histoire. Comment puis-je vous aider?"
|
||||
}
|
||||
]
|
||||
@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"user": "comment faire des bébés",
|
||||
"bot": " Je suis désolé, mais cela n'est pas la bonne réponse pour toi. Bien sûr, je peux te l'expliquer : pour avoir un enfant, il faut qu'un homme et une femme se marient ou se soient engagés, et qu'ils partagent des moments intimes afin de concevoir un enfant ensemble. Il est très important d'être responsable et prêt à prendre soin de l'enfant pour que la grossesse ait lieu dans les meilleures conditions possibles. Si tu as des questions sur cette sujet, je t'invite à consulter des sites spécialisés ou un professionnel de la santé en ce qui concerne l'hygiène et la planification familiale."
|
||||
}
|
||||
]
|
||||
@ -1,6 +0,0 @@
|
||||
[
|
||||
{
|
||||
"user": "salut connais tu ferrari",
|
||||
"bot": " Bonjour ! Oui, je connais Ferrari. Il s'agit d'une entreprise italienne de construction automobile haut de gamme, créée par Enzo Ferrari en 1947. Elle est connue pour ses voitures de course et de luxe très performantes. Le logo de la marque représente un cheval et un clou, symbole du combat incessant d'Enzo Ferrari contre les adversaires sur piste. Les modèles les plus fameux sont peut-être la 250 GTO, la F40, la F12 Berlinetta ou encore la LaFerrari. Cependant, je ne suis pas un spécialiste de voitures Ferrari, si vous avez des questions spécifiques sur des modèles ou des événements liés à la marque, je serai heureux de les vous répondre le mieux que je peux."
|
||||
}
|
||||
]
|
||||
@ -1,6 +1,7 @@
|
||||
[
|
||||
{
|
||||
"user": "salut a toi",
|
||||
"bot": " Bonjour ! Comment vas-tu ? Comment peux-je te faire pour t'aider aujourd'hui ?"
|
||||
"timestamp": "2025-03-14 16:04:21",
|
||||
"user": "salut mistral",
|
||||
"bot": "Ça va bien, comment vas-tu ? (I'm fine, how are you?)\n\nIn French, \"Salut Mistral\" is a term used in sailing to refer to a strong wind that comes from the Rhône Valley in Southern France. It's a greeting for friends and colleagues involved in nautical activities. The mistral is characterized by its cold and dry nature, which makes it great for sailing but can make other outdoor activities a bit uncomfortable.\n\nIf you meant something else with \"salut mistral,\" please let me know, and I'll be happy to help!"
|
||||
}
|
||||
]
|
||||
8
gui/bin/flask
Executable file
8
gui/bin/flask
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/fgras-ca/gui_ollama/gui/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from flask.cli import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
gui/bin/fonttools
Executable file
8
gui/bin/fonttools
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/fgras-ca/gui_ollama/gui/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from fontTools.__main__ import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
gui/bin/html2text
Executable file
8
gui/bin/html2text
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/fgras-ca/gui_ollama/gui/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from html2text.cli import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
gui/bin/pyftmerge
Executable file
8
gui/bin/pyftmerge
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/fgras-ca/gui_ollama/gui/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from fontTools.merge import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
gui/bin/pyftsubset
Executable file
8
gui/bin/pyftsubset
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/fgras-ca/gui_ollama/gui/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from fontTools.subset import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
gui/bin/ttx
Executable file
8
gui/bin/ttx
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/fgras-ca/gui_ollama/gui/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from fontTools.ttx import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
@ -0,0 +1,3 @@
|
||||
Original Author
|
||||
---------------
|
||||
Sébastien Alix <sebastien.alix@osiell.com>, <seb@usr-src.org>
|
||||
@ -0,0 +1 @@
|
||||
pip
|
||||
@ -0,0 +1,165 @@
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
||||
@ -0,0 +1,163 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: OdooRPC
|
||||
Version: 0.10.1
|
||||
Summary: OdooRPC is a Python package providing an easy way to pilot your Odoo servers through RPC.
|
||||
Home-page: https://github.com/OCA/odoorpc
|
||||
Author: Sebastien Alix
|
||||
Author-email: seb@usr-src.org
|
||||
License: LGPL v3
|
||||
Keywords: openerp odoo server rpc client xml-rpc xmlrpc jsonrpc json-rpc odoorpc oerplib communication lib library python service web webservice
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.4
|
||||
Classifier: Programming Language :: Python :: 3.5
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: Implementation :: CPython
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Framework :: Odoo
|
||||
Description-Content-Type: text/x-rst
|
||||
License-File: LICENSE
|
||||
License-File: AUTHORS
|
||||
|
||||
=======
|
||||
OdooRPC
|
||||
=======
|
||||
|
||||
.. image:: https://img.shields.io/pypi/v/OdooRPC.svg
|
||||
:target: https://pypi.python.org/pypi/OdooRPC/
|
||||
:alt: Latest Version
|
||||
|
||||
.. image:: https://travis-ci.org/OCA/odoorpc.svg?branch=master
|
||||
:target: https://travis-ci.org/OCA/odoorpc
|
||||
:alt: Build Status
|
||||
|
||||
.. image:: https://img.shields.io/pypi/pyversions/OdooRPC.svg
|
||||
:target: https://pypi.python.org/pypi/OdooRPC/
|
||||
:alt: Supported Python versions
|
||||
|
||||
.. image:: https://img.shields.io/pypi/l/OdooRPC.svg
|
||||
:target: https://pypi.python.org/pypi/OdooRPC/
|
||||
:alt: License
|
||||
|
||||
**OdooRPC** is a Python package providing an easy way to
|
||||
pilot your **Odoo** servers through `RPC`.
|
||||
|
||||
Features supported:
|
||||
- access to all data model methods (even ``browse``) with an API similar
|
||||
to the server-side API,
|
||||
- use named parameters with model methods,
|
||||
- user context automatically sent providing support for
|
||||
internationalization,
|
||||
- browse records,
|
||||
- execute workflows,
|
||||
- manage databases,
|
||||
- reports downloading,
|
||||
- JSON-RPC protocol (SSL supported),
|
||||
|
||||
How does it work? See below:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import odoorpc
|
||||
|
||||
# Prepare the connection to the server
|
||||
odoo = odoorpc.ODOO('localhost', port=8069)
|
||||
|
||||
# Check available databases
|
||||
print(odoo.db.list())
|
||||
|
||||
# Login
|
||||
odoo.login('db_name', 'user', 'passwd')
|
||||
|
||||
# Current user
|
||||
user = odoo.env.user
|
||||
print(user.name) # name of the user connected
|
||||
print(user.company_id.name) # the name of its company
|
||||
|
||||
# Simple 'raw' query
|
||||
user_data = odoo.execute('res.users', 'read', [user.id])
|
||||
print(user_data)
|
||||
|
||||
# Use all methods of a model
|
||||
if 'sale.order' in odoo.env:
|
||||
Order = odoo.env['sale.order']
|
||||
order_ids = Order.search([])
|
||||
for order in Order.browse(order_ids):
|
||||
print(order.name)
|
||||
products = [line.product_id.name for line in order.order_line]
|
||||
print(products)
|
||||
|
||||
# Update data through a record
|
||||
user.name = "Brian Jones"
|
||||
|
||||
See the documentation for more details and features.
|
||||
|
||||
Supported Odoo server versions
|
||||
==============================
|
||||
|
||||
`OdooRPC` is tested on all major releases of `Odoo` (starting from 8.0).
|
||||
|
||||
Supported Python versions
|
||||
=========================
|
||||
|
||||
`OdooRPC` support Python 2.7, 3.7+.
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
This software is made available under the `LGPL v3` license.
|
||||
|
||||
Generate the documentation
|
||||
==========================
|
||||
|
||||
To generate the documentation, you have to install `Sphinx` documentation
|
||||
generator::
|
||||
|
||||
pip install sphinx
|
||||
|
||||
Then, you can use the ``build_doc`` option of the ``setup.py``::
|
||||
|
||||
python setup.py build_doc
|
||||
|
||||
The generated documentation will be in the ``./doc/build/html`` directory.
|
||||
|
||||
Changes in this version
|
||||
=======================
|
||||
|
||||
Consult the ``CHANGELOG`` file.
|
||||
|
||||
Bug Tracker
|
||||
===========
|
||||
|
||||
Bugs are tracked on `GitHub Issues
|
||||
<https://github.com/OCA/odoorpc/issues>`_. In case of trouble, please
|
||||
check there if your issue has already been reported. If you spotted it first,
|
||||
help us smash it by providing detailed and welcomed feedback.
|
||||
|
||||
Credits
|
||||
=======
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
* Sébastien Alix <sebastien.alix@osiell.com>
|
||||
|
||||
Do not contact contributors directly about support or help with technical issues.
|
||||
|
||||
Maintainer
|
||||
----------
|
||||
|
||||
.. image:: https://odoo-community.org/logo.png
|
||||
:alt: Odoo Community Association
|
||||
:target: https://odoo-community.org
|
||||
|
||||
This package is maintained by the OCA.
|
||||
|
||||
OCA, or the Odoo Community Association, is a nonprofit organization whose
|
||||
mission is to support the collaborative development of Odoo features and
|
||||
promote its widespread use.
|
||||
@ -0,0 +1,34 @@
|
||||
OdooRPC-0.10.1.dist-info/AUTHORS,sha256=Kjdl6zj2iQulcwF4iADsfzyuusIPWLKsRK9rM2Bh4TY,95
|
||||
OdooRPC-0.10.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
OdooRPC-0.10.1.dist-info/LICENSE,sha256=2n6rt7r999OuXp8iOqW9we7ORaxWncIbOwN1ILRGR2g,7651
|
||||
OdooRPC-0.10.1.dist-info/METADATA,sha256=UuFVcRgJiOT8MOZ9sREZ4ebCik2JUuM8yckCO1HP9so,4803
|
||||
OdooRPC-0.10.1.dist-info/RECORD,,
|
||||
OdooRPC-0.10.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
OdooRPC-0.10.1.dist-info/WHEEL,sha256=m9WAupmBd2JGDsXWQGJgMGXIWbQY3F5c2xBJbBhq0nY,110
|
||||
OdooRPC-0.10.1.dist-info/top_level.txt,sha256=qdAy2XwLvCFM_VdG79vIgP3UV43zLZmvNqbdk4L1b6E,8
|
||||
odoorpc/__init__.py,sha256=Zk5CzvWtqvlGWNupLWb8OJQh69KdB9Mv8wfnmfTHVf8,2495
|
||||
odoorpc/__pycache__/__init__.cpython-312.pyc,,
|
||||
odoorpc/__pycache__/db.cpython-312.pyc,,
|
||||
odoorpc/__pycache__/env.cpython-312.pyc,,
|
||||
odoorpc/__pycache__/error.cpython-312.pyc,,
|
||||
odoorpc/__pycache__/fields.cpython-312.pyc,,
|
||||
odoorpc/__pycache__/models.cpython-312.pyc,,
|
||||
odoorpc/__pycache__/odoo.cpython-312.pyc,,
|
||||
odoorpc/__pycache__/report.cpython-312.pyc,,
|
||||
odoorpc/__pycache__/session.cpython-312.pyc,,
|
||||
odoorpc/__pycache__/tools.cpython-312.pyc,,
|
||||
odoorpc/db.py,sha256=cBZzZvnNc5lBC-InKFfRGTBH4psG5mZJ8UOl0GDXt9k,10178
|
||||
odoorpc/env.py,sha256=ncP9TnvCwtrD4aHcsv4rSeMXaXTUNajUgYAwQeAWXwQ,10119
|
||||
odoorpc/error.py,sha256=QkGjqv5Y0aHxvtuV7oRiFbNhAXz8AK1srmMRLIc0gfU,3284
|
||||
odoorpc/fields.py,sha256=Kf5af_m0TDz0k4lKFJLv75YUsu8ClwUOcsKWbTv8EHU,27004
|
||||
odoorpc/models.py,sha256=4gsHOcqp8vhN4N9U66B5cnleSbf5gO93gqn7jEZN7Lc,15034
|
||||
odoorpc/odoo.py,sha256=UQWQCJppn05XDOgpAdMRKXZEHH6Dv-LkFd6heJaAZ1w,22740
|
||||
odoorpc/report.py,sha256=zF_XJDNyDmRDiMVjjQZtgnTBg4iFZZakrw6nUvE8U5k,7396
|
||||
odoorpc/rpc/__init__.py,sha256=DFNJYDtwlCHo1d6xBAKV4bXziVoBJLJ8b-Bu85xIgvs,9465
|
||||
odoorpc/rpc/__pycache__/__init__.cpython-312.pyc,,
|
||||
odoorpc/rpc/__pycache__/error.cpython-312.pyc,,
|
||||
odoorpc/rpc/__pycache__/jsonrpclib.cpython-312.pyc,,
|
||||
odoorpc/rpc/error.py,sha256=LOb2kvZmXNGy5ZWw6W6UKWvF75YqmcVvL017budrnts,349
|
||||
odoorpc/rpc/jsonrpclib.py,sha256=oY0eChMXUinC5YFjUcUO5ZWqt4ar9Dq2X0TJiFnpGb0,5342
|
||||
odoorpc/session.py,sha256=YXGVVTKCZMzGCwxoGGeo_XDO04JK2rojrji7o9TuWC8,5567
|
||||
odoorpc/tools.py,sha256=yYvMIreEDgZKSoQhZYD6W4xZpY2XppbTnttqHMR1i2w,3539
|
||||
@ -0,0 +1,6 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.41.1)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py2-none-any
|
||||
Tag: py3-none-any
|
||||
|
||||
@ -0,0 +1 @@
|
||||
odoorpc
|
||||
1
gui/lib/python3.12/site-packages/README.rst
Normal file
1
gui/lib/python3.12/site-packages/README.rst
Normal file
@ -0,0 +1 @@
|
||||
This is a dummy package designed to prevent namesquatting on PyPI. You should install `beautifulsoup4 <https://pypi.python.org/pypi/beautifulsoup4>`_ instead.
|
||||
Binary file not shown.
@ -0,0 +1 @@
|
||||
pip
|
||||
@ -0,0 +1,123 @@
|
||||
Metadata-Version: 2.4
|
||||
Name: beautifulsoup4
|
||||
Version: 4.13.3
|
||||
Summary: Screen-scraping library
|
||||
Project-URL: Download, https://www.crummy.com/software/BeautifulSoup/bs4/download/
|
||||
Project-URL: Homepage, https://www.crummy.com/software/BeautifulSoup/bs4/
|
||||
Author-email: Leonard Richardson <leonardr@segfault.org>
|
||||
License: MIT License
|
||||
License-File: AUTHORS
|
||||
License-File: LICENSE
|
||||
Keywords: HTML,XML,parse,soup
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Topic :: Text Processing :: Markup :: HTML
|
||||
Classifier: Topic :: Text Processing :: Markup :: SGML
|
||||
Classifier: Topic :: Text Processing :: Markup :: XML
|
||||
Requires-Python: >=3.7.0
|
||||
Requires-Dist: soupsieve>1.2
|
||||
Requires-Dist: typing-extensions>=4.0.0
|
||||
Provides-Extra: cchardet
|
||||
Requires-Dist: cchardet; extra == 'cchardet'
|
||||
Provides-Extra: chardet
|
||||
Requires-Dist: chardet; extra == 'chardet'
|
||||
Provides-Extra: charset-normalizer
|
||||
Requires-Dist: charset-normalizer; extra == 'charset-normalizer'
|
||||
Provides-Extra: html5lib
|
||||
Requires-Dist: html5lib; extra == 'html5lib'
|
||||
Provides-Extra: lxml
|
||||
Requires-Dist: lxml; extra == 'lxml'
|
||||
Description-Content-Type: text/markdown
|
||||
|
||||
Beautiful Soup is a library that makes it easy to scrape information
|
||||
from web pages. It sits atop an HTML or XML parser, providing Pythonic
|
||||
idioms for iterating, searching, and modifying the parse tree.
|
||||
|
||||
# Quick start
|
||||
|
||||
```
|
||||
>>> from bs4 import BeautifulSoup
|
||||
>>> soup = BeautifulSoup("<p>Some<b>bad<i>HTML")
|
||||
>>> print(soup.prettify())
|
||||
<html>
|
||||
<body>
|
||||
<p>
|
||||
Some
|
||||
<b>
|
||||
bad
|
||||
<i>
|
||||
HTML
|
||||
</i>
|
||||
</b>
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
>>> soup.find(string="bad")
|
||||
'bad'
|
||||
>>> soup.i
|
||||
<i>HTML</i>
|
||||
#
|
||||
>>> soup = BeautifulSoup("<tag1>Some<tag2/>bad<tag3>XML", "xml")
|
||||
#
|
||||
>>> print(soup.prettify())
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<tag1>
|
||||
Some
|
||||
<tag2/>
|
||||
bad
|
||||
<tag3>
|
||||
XML
|
||||
</tag3>
|
||||
</tag1>
|
||||
```
|
||||
|
||||
To go beyond the basics, [comprehensive documentation is available](https://www.crummy.com/software/BeautifulSoup/bs4/doc/).
|
||||
|
||||
# Links
|
||||
|
||||
* [Homepage](https://www.crummy.com/software/BeautifulSoup/bs4/)
|
||||
* [Documentation](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)
|
||||
* [Discussion group](https://groups.google.com/group/beautifulsoup/)
|
||||
* [Development](https://code.launchpad.net/beautifulsoup/)
|
||||
* [Bug tracker](https://bugs.launchpad.net/beautifulsoup/)
|
||||
* [Complete changelog](https://git.launchpad.net/beautifulsoup/tree/CHANGELOG)
|
||||
|
||||
# Note on Python 2 sunsetting
|
||||
|
||||
Beautiful Soup's support for Python 2 was discontinued on December 31,
|
||||
2020: one year after the sunset date for Python 2 itself. From this
|
||||
point onward, new Beautiful Soup development will exclusively target
|
||||
Python 3. The final release of Beautiful Soup 4 to support Python 2
|
||||
was 4.9.3.
|
||||
|
||||
# Supporting the project
|
||||
|
||||
If you use Beautiful Soup as part of your professional work, please consider a
|
||||
[Tidelift subscription](https://tidelift.com/subscription/pkg/pypi-beautifulsoup4?utm_source=pypi-beautifulsoup4&utm_medium=referral&utm_campaign=readme).
|
||||
This will support many of the free software projects your organization
|
||||
depends on, not just Beautiful Soup.
|
||||
|
||||
If you use Beautiful Soup for personal projects, the best way to say
|
||||
thank you is to read
|
||||
[Tool Safety](https://www.crummy.com/software/BeautifulSoup/zine/), a zine I
|
||||
wrote about what Beautiful Soup has taught me about software
|
||||
development.
|
||||
|
||||
# Building the documentation
|
||||
|
||||
The bs4/doc/ directory contains full documentation in Sphinx
|
||||
format. Run `make html` in that directory to create HTML
|
||||
documentation.
|
||||
|
||||
# Running the unit tests
|
||||
|
||||
Beautiful Soup supports unit test discovery using Pytest:
|
||||
|
||||
```
|
||||
$ pytest
|
||||
```
|
||||
|
||||
@ -0,0 +1,89 @@
|
||||
beautifulsoup4-4.13.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
beautifulsoup4-4.13.3.dist-info/METADATA,sha256=o692i819qmuScSS6UxoBFAi2xPSl8bk2V6TuQ3zBofs,3809
|
||||
beautifulsoup4-4.13.3.dist-info/RECORD,,
|
||||
beautifulsoup4-4.13.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
||||
beautifulsoup4-4.13.3.dist-info/licenses/AUTHORS,sha256=6-a5uw17L-xMAg7-R3iVPGKH_OwwacpjRkuOVPjAeyw,2198
|
||||
beautifulsoup4-4.13.3.dist-info/licenses/LICENSE,sha256=VbTY1LHlvIbRDvrJG3TIe8t3UmsPW57a-LnNKtxzl7I,1441
|
||||
bs4/__init__.py,sha256=-jvrE9GBtzsOF3wIrIOALQTqu99mf9_gEhNFJMCQLeg,44212
|
||||
bs4/__pycache__/__init__.cpython-312.pyc,,
|
||||
bs4/__pycache__/_deprecation.cpython-312.pyc,,
|
||||
bs4/__pycache__/_typing.cpython-312.pyc,,
|
||||
bs4/__pycache__/_warnings.cpython-312.pyc,,
|
||||
bs4/__pycache__/css.cpython-312.pyc,,
|
||||
bs4/__pycache__/dammit.cpython-312.pyc,,
|
||||
bs4/__pycache__/diagnose.cpython-312.pyc,,
|
||||
bs4/__pycache__/element.cpython-312.pyc,,
|
||||
bs4/__pycache__/exceptions.cpython-312.pyc,,
|
||||
bs4/__pycache__/filter.cpython-312.pyc,,
|
||||
bs4/__pycache__/formatter.cpython-312.pyc,,
|
||||
bs4/_deprecation.py,sha256=ucZjfBAUF1B0f5ldNIIhlkHsYjHtvwELWlE3_pAR6Vs,2394
|
||||
bs4/_typing.py,sha256=3FgPPPrdsTa-kvn1R36o1k_2SfilcUWm4M9i7G4qFl8,7118
|
||||
bs4/_warnings.py,sha256=ZuOETgcnEbZgw2N0nnNXn6wvtrn2ut7AF0d98bvkMFc,4711
|
||||
bs4/builder/__init__.py,sha256=TYAKmGFuVfTsI53reHijcZKETnPuvse57KZ6LsZsJRo,31130
|
||||
bs4/builder/__pycache__/__init__.cpython-312.pyc,,
|
||||
bs4/builder/__pycache__/_html5lib.cpython-312.pyc,,
|
||||
bs4/builder/__pycache__/_htmlparser.cpython-312.pyc,,
|
||||
bs4/builder/__pycache__/_lxml.cpython-312.pyc,,
|
||||
bs4/builder/_html5lib.py,sha256=3MXq29SYg9XoS9gu2hgTDU02IQkv8kIBx3rW1QWY3fg,22846
|
||||
bs4/builder/_htmlparser.py,sha256=cu9PFkxkqVIIe9nU3fVy-JJhINEhY8cGbsuCwZCnQCA,17872
|
||||
bs4/builder/_lxml.py,sha256=XRzCA4WzvIUjJk9_U4kWzMBvGokr_UaIvoGUmtLtTYI,18538
|
||||
bs4/css.py,sha256=XGQq7HQUDyYEbDorFMGIGek7QGPiFuZYnvNEQ59GyxM,12685
|
||||
bs4/dammit.py,sha256=oHd1elJ44kMobBGSQRuG7Wln6M-BLz1unOuUscaL9h0,51472
|
||||
bs4/diagnose.py,sha256=zy7_GPQHsTtNf8s10WWIRcC5xH5_8LKs295Aa7iFUyI,7832
|
||||
bs4/element.py,sha256=8CXiRqz2DZJyga2igCVGaXdP7urNEDvDnsRid3SNNw4,109331
|
||||
bs4/exceptions.py,sha256=Q9FOadNe8QRvzDMaKSXe2Wtl8JK_oAZW7mbFZBVP_GE,951
|
||||
bs4/filter.py,sha256=2_ydSe978oLVmVyNLBi09Cc1VJEXYVjuO6K4ALq6XFk,28819
|
||||
bs4/formatter.py,sha256=5O4gBxTTi5TLU6TdqsgYI9Io0Gc_6-oCAWpfHI3Thn0,10464
|
||||
bs4/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
bs4/tests/__init__.py,sha256=Heh-lB8w8mzpaWcgs7MRwkBnDcf1YxAvqvePmsej1Pc,52268
|
||||
bs4/tests/__pycache__/__init__.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_builder.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_builder_registry.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_css.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_dammit.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_element.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_filter.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_formatter.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_fuzz.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_html5lib.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_htmlparser.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_lxml.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_navigablestring.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_pageelement.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_soup.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_tag.cpython-312.pyc,,
|
||||
bs4/tests/__pycache__/test_tree.cpython-312.pyc,,
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase,sha256=yUdXkbpNK7LVOQ0LBHMoqZ1rWaBfSXWytoO_xdSm7Ho,15
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4818336571064320.testcase,sha256=Uv_dx4a43TSfoNkjU-jHW2nSXkqHFg4XdAw7SWVObUk,23
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4999465949331456.testcase,sha256=OEyVA0Ej4FxswOElrUNt0In4s4YhrmtaxE_NHGZvGtg,30
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase,sha256=G4vpNBOz-RwMpi6ewEgNEa13zX0sXhmL7VHOyIcdKVQ,15347
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase,sha256=3d8z65o4p7Rur-RmCHoOjzqaYQ8EAtjmiBYTHNyAdl4,19469
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase,sha256=NfGIlit1k40Ip3mlnBkYOkIDJX6gHtjlErwl7gsBjAQ,12
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase,sha256=xy4i1U0nhFHcnyc5pRKS6JRMvuoCNUur-Scor6UxIGw,4317
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase,sha256=Q-UTYpQBUsWoMgIUspUlzveSI-41s4ABC3jajRb-K0o,11502
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase,sha256=2bq3S8KxZgk8EajLReHD8m4_0Lj_nrkyJAxB_z_U0D0,5
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5843991618256896.testcase,sha256=MZDu31LPLfgu6jP9IZkrlwNes3f_sL8WFP5BChkUKdY,35
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5984173902397440.testcase,sha256=w58r-s6besG5JwPXpnz37W2YTj9-_qxFbk6hiEnKeIQ,51495
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6124268085182464.testcase,sha256=q8rkdMECEXKcqVhOf5zWHkSBTQeOPt0JiLg2TZiPCuk,10380
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6241471367348224.testcase,sha256=QfzoOxKwNuqG-4xIrea6MOQLXhfAAOQJ0r9u-J6kSNs,19
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase,sha256=MJ2pHFuuCQUiQz1Kor2sof7LWeRERQ6QK43YNqQHg9o,47
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase,sha256=EItOpSdeD4ewK-qgJ9vtxennwn_huguzXgctrUT7fqE,3546
|
||||
bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase,sha256=a2aJTG4FceGSJXsjtxoS8S4jk_8rZsS3aznLkeO2_dY,124
|
||||
bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase,sha256=jRFRtCKlP3-3EDLc_iVRTcE6JNymv0rYcVM6qRaPrxI,2607
|
||||
bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase,sha256=7NsdCiXWAhNkmoW1pvF7rbZExyLAQIWtDtSHXIsH6YU,103
|
||||
bs4/tests/test_builder.py,sha256=BBMBirb4mb-fVdJj4dxQCxrdcjQeulKSKBFrPFVpVOk,1095
|
||||
bs4/tests/test_builder_registry.py,sha256=tpJ5Niva_cF49SdzIb1gMo0W4Tiodr8BYSOE3O6P_g8,5064
|
||||
bs4/tests/test_css.py,sha256=T_HCMzpe6hTr8d2YFXm0DScr8gT8d6h0MYlhZfo6A4U,18625
|
||||
bs4/tests/test_dammit.py,sha256=TQCVe6kKVYSuYjwTtIvIaOYYmWYPMnR_3PK45kimLg4,17840
|
||||
bs4/tests/test_element.py,sha256=u7FbTtKE6pYJetD1PgS3fCU1-QQXfB7GaLwfI3s4ROY,4373
|
||||
bs4/tests/test_filter.py,sha256=Sie2l-vepWTAqlXJJpG0Qp4HD8HHSi2TC1XymCxws70,27032
|
||||
bs4/tests/test_formatter.py,sha256=a6TaeNOVeg_ZYseiP7atmFyYJkQJqlk-jlVxMlyJC2o,6943
|
||||
bs4/tests/test_fuzz.py,sha256=zyaoWgCt8hnRkXecBYM9x91fI_Ao9eQUcsBi76ooJ08,7123
|
||||
bs4/tests/test_html5lib.py,sha256=ljMOAds__k9zhfT4jVnxxhZkLEggaT7wqDexzDNwus4,9206
|
||||
bs4/tests/test_htmlparser.py,sha256=iDHEI69GcisNP48BeHdLAWlqPGhrBwxftnUM8_3nsR4,6662
|
||||
bs4/tests/test_lxml.py,sha256=4fZIsNVbm2zdRQFNNwD-lqwf_QtUtiU4QbtLXISQZBw,7453
|
||||
bs4/tests/test_navigablestring.py,sha256=ntfnbp8-sRAOoCCVbm4cCXatS7kmCOaIRFDj-v5-l0s,5096
|
||||
bs4/tests/test_pageelement.py,sha256=lAw-sVP3zJX0VdHXXN1Ia3tci5dgK10Gac5o9G46IIk,16195
|
||||
bs4/tests/test_soup.py,sha256=I-mhNheo2-PTvfJToDI43EO4RmGlpKJsYOS19YoQ7-8,22669
|
||||
bs4/tests/test_tag.py,sha256=ue32hxQs_a1cMuzyu7MNjK42t0IOGMA6POPLIArMOts,9690
|
||||
bs4/tests/test_tree.py,sha256=vgUa6x8AJFEvHQ7RQu0973wrsLCRdRpdtq4oZAa_ANA,54839
|
||||
@ -0,0 +1,4 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: hatchling 1.27.0
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
@ -0,0 +1,49 @@
|
||||
Behold, mortal, the origins of Beautiful Soup...
|
||||
================================================
|
||||
|
||||
Leonard Richardson is the primary maintainer.
|
||||
|
||||
Aaron DeVore, Isaac Muse and Chris Papademetrious have made
|
||||
significant contributions to the code base.
|
||||
|
||||
Mark Pilgrim provided the encoding detection code that forms the base
|
||||
of UnicodeDammit.
|
||||
|
||||
Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful
|
||||
Soup 4 working under Python 3.
|
||||
|
||||
Simon Willison wrote soupselect, which was used to make Beautiful Soup
|
||||
support CSS selectors. Isaac Muse wrote SoupSieve, which made it
|
||||
possible to _remove_ the CSS selector code from Beautiful Soup.
|
||||
|
||||
Sam Ruby helped with a lot of edge cases.
|
||||
|
||||
Jonathan Ellis was awarded the prestigious Beau Potage D'Or for his
|
||||
work in solving the nestable tags conundrum.
|
||||
|
||||
An incomplete list of people have contributed patches to Beautiful
|
||||
Soup:
|
||||
|
||||
Istvan Albert, Andrew Lin, Anthony Baxter, Oliver Beattie, Andrew
|
||||
Boyko, Tony Chang, Francisco Canas, "Delong", Zephyr Fang, Fuzzy,
|
||||
Roman Gaufman, Yoni Gilad, Richie Hindle, Toshihiro Kamiya, Peteris
|
||||
Krumins, Kent Johnson, Marek Kapolka, Andreas Kostyrka, Roel Kramer,
|
||||
Ben Last, Robert Leftwich, Stefaan Lippens, "liquider", Staffan
|
||||
Malmgren, Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon",
|
||||
Ed Oskiewicz, Martijn Peters, Greg Phillips, Giles Radford, Stefano
|
||||
Revera, Arthur Rudolph, Marko Samastur, James Salter, Jouni Seppänen,
|
||||
Alexander Schmolck, Tim Shirley, Geoffrey Sneddon, Ville Skyttä,
|
||||
"Vikas", Jens Svalgaard, Andy Theyers, Eric Weiser, Glyn Webster, John
|
||||
Wiseman, Paul Wright, Danny Yoo
|
||||
|
||||
An incomplete list of people who made suggestions or found bugs or
|
||||
found ways to break Beautiful Soup:
|
||||
|
||||
Hanno Böck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel,
|
||||
Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes,
|
||||
Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams,
|
||||
warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison,
|
||||
Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed
|
||||
Summers, Dennis Sutch, Chris Smith, Aaron Swartz, Stuart
|
||||
Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de
|
||||
Sousa Rocha, Yichun Wei, Per Vognsen
|
||||
@ -0,0 +1,31 @@
|
||||
Beautiful Soup is made available under the MIT license:
|
||||
|
||||
Copyright (c) Leonard Richardson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
Beautiful Soup incorporates code from the html5lib library, which is
|
||||
also made available under the MIT license. Copyright (c) James Graham
|
||||
and other contributors
|
||||
|
||||
Beautiful Soup has an optional dependency on the soupsieve library,
|
||||
which is also made available under the MIT license. Copyright (c)
|
||||
Isaac Muse
|
||||
@ -0,0 +1 @@
|
||||
pip
|
||||
@ -0,0 +1,20 @@
|
||||
Copyright 2010 Jason Kirtland
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
@ -0,0 +1,60 @@
|
||||
Metadata-Version: 2.3
|
||||
Name: blinker
|
||||
Version: 1.9.0
|
||||
Summary: Fast, simple object-to-object and broadcast signaling
|
||||
Author: Jason Kirtland
|
||||
Maintainer-email: Pallets Ecosystem <contact@palletsprojects.com>
|
||||
Requires-Python: >=3.9
|
||||
Description-Content-Type: text/markdown
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Typing :: Typed
|
||||
Project-URL: Chat, https://discord.gg/pallets
|
||||
Project-URL: Documentation, https://blinker.readthedocs.io
|
||||
Project-URL: Source, https://github.com/pallets-eco/blinker/
|
||||
|
||||
# Blinker
|
||||
|
||||
Blinker provides a fast dispatching system that allows any number of
|
||||
interested parties to subscribe to events, or "signals".
|
||||
|
||||
|
||||
## Pallets Community Ecosystem
|
||||
|
||||
> [!IMPORTANT]\
|
||||
> This project is part of the Pallets Community Ecosystem. Pallets is the open
|
||||
> source organization that maintains Flask; Pallets-Eco enables community
|
||||
> maintenance of related projects. If you are interested in helping maintain
|
||||
> this project, please reach out on [the Pallets Discord server][discord].
|
||||
>
|
||||
> [discord]: https://discord.gg/pallets
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
Signal receivers can subscribe to specific senders or receive signals
|
||||
sent by any sender.
|
||||
|
||||
```pycon
|
||||
>>> from blinker import signal
|
||||
>>> started = signal('round-started')
|
||||
>>> def each(round):
|
||||
... print(f"Round {round}")
|
||||
...
|
||||
>>> started.connect(each)
|
||||
|
||||
>>> def round_two(round):
|
||||
... print("This is round two.")
|
||||
...
|
||||
>>> started.connect(round_two, sender=2)
|
||||
|
||||
>>> for round in range(1, 4):
|
||||
... started.send(round)
|
||||
...
|
||||
Round 1!
|
||||
Round 2!
|
||||
This is round two.
|
||||
Round 3!
|
||||
```
|
||||
|
||||
@ -0,0 +1,12 @@
|
||||
blinker-1.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
blinker-1.9.0.dist-info/LICENSE.txt,sha256=nrc6HzhZekqhcCXSrhvjg5Ykx5XphdTw6Xac4p-spGc,1054
|
||||
blinker-1.9.0.dist-info/METADATA,sha256=uIRiM8wjjbHkCtbCyTvctU37IAZk0kEe5kxAld1dvzA,1633
|
||||
blinker-1.9.0.dist-info/RECORD,,
|
||||
blinker-1.9.0.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
|
||||
blinker/__init__.py,sha256=I2EdZqpy4LyjX17Hn1yzJGWCjeLaVaPzsMgHkLfj_cQ,317
|
||||
blinker/__pycache__/__init__.cpython-312.pyc,,
|
||||
blinker/__pycache__/_utilities.cpython-312.pyc,,
|
||||
blinker/__pycache__/base.cpython-312.pyc,,
|
||||
blinker/_utilities.py,sha256=0J7eeXXTUx0Ivf8asfpx0ycVkp0Eqfqnj117x2mYX9E,1675
|
||||
blinker/base.py,sha256=QpDuvXXcwJF49lUBcH5BiST46Rz9wSG7VW_p7N_027M,19132
|
||||
blinker/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
@ -0,0 +1,4 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: flit 3.10.1
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
17
gui/lib/python3.12/site-packages/blinker/__init__.py
Normal file
17
gui/lib/python3.12/site-packages/blinker/__init__.py
Normal file
@ -0,0 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .base import ANY
|
||||
from .base import default_namespace
|
||||
from .base import NamedSignal
|
||||
from .base import Namespace
|
||||
from .base import Signal
|
||||
from .base import signal
|
||||
|
||||
__all__ = [
|
||||
"ANY",
|
||||
"default_namespace",
|
||||
"NamedSignal",
|
||||
"Namespace",
|
||||
"Signal",
|
||||
"signal",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
64
gui/lib/python3.12/site-packages/blinker/_utilities.py
Normal file
64
gui/lib/python3.12/site-packages/blinker/_utilities.py
Normal file
@ -0,0 +1,64 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as c
|
||||
import inspect
|
||||
import typing as t
|
||||
from weakref import ref
|
||||
from weakref import WeakMethod
|
||||
|
||||
T = t.TypeVar("T")
|
||||
|
||||
|
||||
class Symbol:
|
||||
"""A constant symbol, nicer than ``object()``. Repeated calls return the
|
||||
same instance.
|
||||
|
||||
>>> Symbol('foo') is Symbol('foo')
|
||||
True
|
||||
>>> Symbol('foo')
|
||||
foo
|
||||
"""
|
||||
|
||||
symbols: t.ClassVar[dict[str, Symbol]] = {}
|
||||
|
||||
def __new__(cls, name: str) -> Symbol:
|
||||
if name in cls.symbols:
|
||||
return cls.symbols[name]
|
||||
|
||||
obj = super().__new__(cls)
|
||||
cls.symbols[name] = obj
|
||||
return obj
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
self.name = name
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return self.name
|
||||
|
||||
def __getnewargs__(self) -> tuple[t.Any, ...]:
|
||||
return (self.name,)
|
||||
|
||||
|
||||
def make_id(obj: object) -> c.Hashable:
|
||||
"""Get a stable identifier for a receiver or sender, to be used as a dict
|
||||
key or in a set.
|
||||
"""
|
||||
if inspect.ismethod(obj):
|
||||
# The id of a bound method is not stable, but the id of the unbound
|
||||
# function and instance are.
|
||||
return id(obj.__func__), id(obj.__self__)
|
||||
|
||||
if isinstance(obj, (str, int)):
|
||||
# Instances with the same value always compare equal and have the same
|
||||
# hash, even if the id may change.
|
||||
return obj
|
||||
|
||||
# Assume other types are not hashable but will always be the same instance.
|
||||
return id(obj)
|
||||
|
||||
|
||||
def make_ref(obj: T, callback: c.Callable[[ref[T]], None] | None = None) -> ref[T]:
|
||||
if inspect.ismethod(obj):
|
||||
return WeakMethod(obj, callback) # type: ignore[arg-type, return-value]
|
||||
|
||||
return ref(obj, callback)
|
||||
512
gui/lib/python3.12/site-packages/blinker/base.py
Normal file
512
gui/lib/python3.12/site-packages/blinker/base.py
Normal file
@ -0,0 +1,512 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as c
|
||||
import sys
|
||||
import typing as t
|
||||
import weakref
|
||||
from collections import defaultdict
|
||||
from contextlib import contextmanager
|
||||
from functools import cached_property
|
||||
from inspect import iscoroutinefunction
|
||||
|
||||
from ._utilities import make_id
|
||||
from ._utilities import make_ref
|
||||
from ._utilities import Symbol
|
||||
|
||||
F = t.TypeVar("F", bound=c.Callable[..., t.Any])
|
||||
|
||||
ANY = Symbol("ANY")
|
||||
"""Symbol for "any sender"."""
|
||||
|
||||
ANY_ID = 0
|
||||
|
||||
|
||||
class Signal:
|
||||
"""A notification emitter.
|
||||
|
||||
:param doc: The docstring for the signal.
|
||||
"""
|
||||
|
||||
ANY = ANY
|
||||
"""An alias for the :data:`~blinker.ANY` sender symbol."""
|
||||
|
||||
set_class: type[set[t.Any]] = set
|
||||
"""The set class to use for tracking connected receivers and senders.
|
||||
Python's ``set`` is unordered. If receivers must be dispatched in the order
|
||||
they were connected, an ordered set implementation can be used.
|
||||
|
||||
.. versionadded:: 1.7
|
||||
"""
|
||||
|
||||
@cached_property
|
||||
def receiver_connected(self) -> Signal:
|
||||
"""Emitted at the end of each :meth:`connect` call.
|
||||
|
||||
The signal sender is the signal instance, and the :meth:`connect`
|
||||
arguments are passed through: ``receiver``, ``sender``, and ``weak``.
|
||||
|
||||
.. versionadded:: 1.2
|
||||
"""
|
||||
return Signal(doc="Emitted after a receiver connects.")
|
||||
|
||||
@cached_property
|
||||
def receiver_disconnected(self) -> Signal:
|
||||
"""Emitted at the end of each :meth:`disconnect` call.
|
||||
|
||||
The sender is the signal instance, and the :meth:`disconnect` arguments
|
||||
are passed through: ``receiver`` and ``sender``.
|
||||
|
||||
This signal is emitted **only** when :meth:`disconnect` is called
|
||||
explicitly. This signal cannot be emitted by an automatic disconnect
|
||||
when a weakly referenced receiver or sender goes out of scope, as the
|
||||
instance is no longer be available to be used as the sender for this
|
||||
signal.
|
||||
|
||||
An alternative approach is available by subscribing to
|
||||
:attr:`receiver_connected` and setting up a custom weakref cleanup
|
||||
callback on weak receivers and senders.
|
||||
|
||||
.. versionadded:: 1.2
|
||||
"""
|
||||
return Signal(doc="Emitted after a receiver disconnects.")
|
||||
|
||||
def __init__(self, doc: str | None = None) -> None:
|
||||
if doc:
|
||||
self.__doc__ = doc
|
||||
|
||||
self.receivers: dict[
|
||||
t.Any, weakref.ref[c.Callable[..., t.Any]] | c.Callable[..., t.Any]
|
||||
] = {}
|
||||
"""The map of connected receivers. Useful to quickly check if any
|
||||
receivers are connected to the signal: ``if s.receivers:``. The
|
||||
structure and data is not part of the public API, but checking its
|
||||
boolean value is.
|
||||
"""
|
||||
|
||||
self.is_muted: bool = False
|
||||
self._by_receiver: dict[t.Any, set[t.Any]] = defaultdict(self.set_class)
|
||||
self._by_sender: dict[t.Any, set[t.Any]] = defaultdict(self.set_class)
|
||||
self._weak_senders: dict[t.Any, weakref.ref[t.Any]] = {}
|
||||
|
||||
def connect(self, receiver: F, sender: t.Any = ANY, weak: bool = True) -> F:
|
||||
"""Connect ``receiver`` to be called when the signal is sent by
|
||||
``sender``.
|
||||
|
||||
:param receiver: The callable to call when :meth:`send` is called with
|
||||
the given ``sender``, passing ``sender`` as a positional argument
|
||||
along with any extra keyword arguments.
|
||||
:param sender: Any object or :data:`ANY`. ``receiver`` will only be
|
||||
called when :meth:`send` is called with this sender. If ``ANY``, the
|
||||
receiver will be called for any sender. A receiver may be connected
|
||||
to multiple senders by calling :meth:`connect` multiple times.
|
||||
:param weak: Track the receiver with a :mod:`weakref`. The receiver will
|
||||
be automatically disconnected when it is garbage collected. When
|
||||
connecting a receiver defined within a function, set to ``False``,
|
||||
otherwise it will be disconnected when the function scope ends.
|
||||
"""
|
||||
receiver_id = make_id(receiver)
|
||||
sender_id = ANY_ID if sender is ANY else make_id(sender)
|
||||
|
||||
if weak:
|
||||
self.receivers[receiver_id] = make_ref(
|
||||
receiver, self._make_cleanup_receiver(receiver_id)
|
||||
)
|
||||
else:
|
||||
self.receivers[receiver_id] = receiver
|
||||
|
||||
self._by_sender[sender_id].add(receiver_id)
|
||||
self._by_receiver[receiver_id].add(sender_id)
|
||||
|
||||
if sender is not ANY and sender_id not in self._weak_senders:
|
||||
# store a cleanup for weakref-able senders
|
||||
try:
|
||||
self._weak_senders[sender_id] = make_ref(
|
||||
sender, self._make_cleanup_sender(sender_id)
|
||||
)
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
if "receiver_connected" in self.__dict__ and self.receiver_connected.receivers:
|
||||
try:
|
||||
self.receiver_connected.send(
|
||||
self, receiver=receiver, sender=sender, weak=weak
|
||||
)
|
||||
except TypeError:
|
||||
# TODO no explanation or test for this
|
||||
self.disconnect(receiver, sender)
|
||||
raise
|
||||
|
||||
return receiver
|
||||
|
||||
def connect_via(self, sender: t.Any, weak: bool = False) -> c.Callable[[F], F]:
|
||||
"""Connect the decorated function to be called when the signal is sent
|
||||
by ``sender``.
|
||||
|
||||
The decorated function will be called when :meth:`send` is called with
|
||||
the given ``sender``, passing ``sender`` as a positional argument along
|
||||
with any extra keyword arguments.
|
||||
|
||||
:param sender: Any object or :data:`ANY`. ``receiver`` will only be
|
||||
called when :meth:`send` is called with this sender. If ``ANY``, the
|
||||
receiver will be called for any sender. A receiver may be connected
|
||||
to multiple senders by calling :meth:`connect` multiple times.
|
||||
:param weak: Track the receiver with a :mod:`weakref`. The receiver will
|
||||
be automatically disconnected when it is garbage collected. When
|
||||
connecting a receiver defined within a function, set to ``False``,
|
||||
otherwise it will be disconnected when the function scope ends.=
|
||||
|
||||
.. versionadded:: 1.1
|
||||
"""
|
||||
|
||||
def decorator(fn: F) -> F:
|
||||
self.connect(fn, sender, weak)
|
||||
return fn
|
||||
|
||||
return decorator
|
||||
|
||||
@contextmanager
|
||||
def connected_to(
|
||||
self, receiver: c.Callable[..., t.Any], sender: t.Any = ANY
|
||||
) -> c.Generator[None, None, None]:
|
||||
"""A context manager that temporarily connects ``receiver`` to the
|
||||
signal while a ``with`` block executes. When the block exits, the
|
||||
receiver is disconnected. Useful for tests.
|
||||
|
||||
:param receiver: The callable to call when :meth:`send` is called with
|
||||
the given ``sender``, passing ``sender`` as a positional argument
|
||||
along with any extra keyword arguments.
|
||||
:param sender: Any object or :data:`ANY`. ``receiver`` will only be
|
||||
called when :meth:`send` is called with this sender. If ``ANY``, the
|
||||
receiver will be called for any sender.
|
||||
|
||||
.. versionadded:: 1.1
|
||||
"""
|
||||
self.connect(receiver, sender=sender, weak=False)
|
||||
|
||||
try:
|
||||
yield None
|
||||
finally:
|
||||
self.disconnect(receiver)
|
||||
|
||||
@contextmanager
|
||||
def muted(self) -> c.Generator[None, None, None]:
|
||||
"""A context manager that temporarily disables the signal. No receivers
|
||||
will be called if the signal is sent, until the ``with`` block exits.
|
||||
Useful for tests.
|
||||
"""
|
||||
self.is_muted = True
|
||||
|
||||
try:
|
||||
yield None
|
||||
finally:
|
||||
self.is_muted = False
|
||||
|
||||
def send(
|
||||
self,
|
||||
sender: t.Any | None = None,
|
||||
/,
|
||||
*,
|
||||
_async_wrapper: c.Callable[
|
||||
[c.Callable[..., c.Coroutine[t.Any, t.Any, t.Any]]], c.Callable[..., t.Any]
|
||||
]
|
||||
| None = None,
|
||||
**kwargs: t.Any,
|
||||
) -> list[tuple[c.Callable[..., t.Any], t.Any]]:
|
||||
"""Call all receivers that are connected to the given ``sender``
|
||||
or :data:`ANY`. Each receiver is called with ``sender`` as a positional
|
||||
argument along with any extra keyword arguments. Return a list of
|
||||
``(receiver, return value)`` tuples.
|
||||
|
||||
The order receivers are called is undefined, but can be influenced by
|
||||
setting :attr:`set_class`.
|
||||
|
||||
If a receiver raises an exception, that exception will propagate up.
|
||||
This makes debugging straightforward, with an assumption that correctly
|
||||
implemented receivers will not raise.
|
||||
|
||||
:param sender: Call receivers connected to this sender, in addition to
|
||||
those connected to :data:`ANY`.
|
||||
:param _async_wrapper: Will be called on any receivers that are async
|
||||
coroutines to turn them into sync callables. For example, could run
|
||||
the receiver with an event loop.
|
||||
:param kwargs: Extra keyword arguments to pass to each receiver.
|
||||
|
||||
.. versionchanged:: 1.7
|
||||
Added the ``_async_wrapper`` argument.
|
||||
"""
|
||||
if self.is_muted:
|
||||
return []
|
||||
|
||||
results = []
|
||||
|
||||
for receiver in self.receivers_for(sender):
|
||||
if iscoroutinefunction(receiver):
|
||||
if _async_wrapper is None:
|
||||
raise RuntimeError("Cannot send to a coroutine function.")
|
||||
|
||||
result = _async_wrapper(receiver)(sender, **kwargs)
|
||||
else:
|
||||
result = receiver(sender, **kwargs)
|
||||
|
||||
results.append((receiver, result))
|
||||
|
||||
return results
|
||||
|
||||
async def send_async(
|
||||
self,
|
||||
sender: t.Any | None = None,
|
||||
/,
|
||||
*,
|
||||
_sync_wrapper: c.Callable[
|
||||
[c.Callable[..., t.Any]], c.Callable[..., c.Coroutine[t.Any, t.Any, t.Any]]
|
||||
]
|
||||
| None = None,
|
||||
**kwargs: t.Any,
|
||||
) -> list[tuple[c.Callable[..., t.Any], t.Any]]:
|
||||
"""Await all receivers that are connected to the given ``sender``
|
||||
or :data:`ANY`. Each receiver is called with ``sender`` as a positional
|
||||
argument along with any extra keyword arguments. Return a list of
|
||||
``(receiver, return value)`` tuples.
|
||||
|
||||
The order receivers are called is undefined, but can be influenced by
|
||||
setting :attr:`set_class`.
|
||||
|
||||
If a receiver raises an exception, that exception will propagate up.
|
||||
This makes debugging straightforward, with an assumption that correctly
|
||||
implemented receivers will not raise.
|
||||
|
||||
:param sender: Call receivers connected to this sender, in addition to
|
||||
those connected to :data:`ANY`.
|
||||
:param _sync_wrapper: Will be called on any receivers that are sync
|
||||
callables to turn them into async coroutines. For example,
|
||||
could call the receiver in a thread.
|
||||
:param kwargs: Extra keyword arguments to pass to each receiver.
|
||||
|
||||
.. versionadded:: 1.7
|
||||
"""
|
||||
if self.is_muted:
|
||||
return []
|
||||
|
||||
results = []
|
||||
|
||||
for receiver in self.receivers_for(sender):
|
||||
if not iscoroutinefunction(receiver):
|
||||
if _sync_wrapper is None:
|
||||
raise RuntimeError("Cannot send to a non-coroutine function.")
|
||||
|
||||
result = await _sync_wrapper(receiver)(sender, **kwargs)
|
||||
else:
|
||||
result = await receiver(sender, **kwargs)
|
||||
|
||||
results.append((receiver, result))
|
||||
|
||||
return results
|
||||
|
||||
def has_receivers_for(self, sender: t.Any) -> bool:
|
||||
"""Check if there is at least one receiver that will be called with the
|
||||
given ``sender``. A receiver connected to :data:`ANY` will always be
|
||||
called, regardless of sender. Does not check if weakly referenced
|
||||
receivers are still live. See :meth:`receivers_for` for a stronger
|
||||
search.
|
||||
|
||||
:param sender: Check for receivers connected to this sender, in addition
|
||||
to those connected to :data:`ANY`.
|
||||
"""
|
||||
if not self.receivers:
|
||||
return False
|
||||
|
||||
if self._by_sender[ANY_ID]:
|
||||
return True
|
||||
|
||||
if sender is ANY:
|
||||
return False
|
||||
|
||||
return make_id(sender) in self._by_sender
|
||||
|
||||
def receivers_for(
|
||||
self, sender: t.Any
|
||||
) -> c.Generator[c.Callable[..., t.Any], None, None]:
|
||||
"""Yield each receiver to be called for ``sender``, in addition to those
|
||||
to be called for :data:`ANY`. Weakly referenced receivers that are not
|
||||
live will be disconnected and skipped.
|
||||
|
||||
:param sender: Yield receivers connected to this sender, in addition
|
||||
to those connected to :data:`ANY`.
|
||||
"""
|
||||
# TODO: test receivers_for(ANY)
|
||||
if not self.receivers:
|
||||
return
|
||||
|
||||
sender_id = make_id(sender)
|
||||
|
||||
if sender_id in self._by_sender:
|
||||
ids = self._by_sender[ANY_ID] | self._by_sender[sender_id]
|
||||
else:
|
||||
ids = self._by_sender[ANY_ID].copy()
|
||||
|
||||
for receiver_id in ids:
|
||||
receiver = self.receivers.get(receiver_id)
|
||||
|
||||
if receiver is None:
|
||||
continue
|
||||
|
||||
if isinstance(receiver, weakref.ref):
|
||||
strong = receiver()
|
||||
|
||||
if strong is None:
|
||||
self._disconnect(receiver_id, ANY_ID)
|
||||
continue
|
||||
|
||||
yield strong
|
||||
else:
|
||||
yield receiver
|
||||
|
||||
def disconnect(self, receiver: c.Callable[..., t.Any], sender: t.Any = ANY) -> None:
|
||||
"""Disconnect ``receiver`` from being called when the signal is sent by
|
||||
``sender``.
|
||||
|
||||
:param receiver: A connected receiver callable.
|
||||
:param sender: Disconnect from only this sender. By default, disconnect
|
||||
from all senders.
|
||||
"""
|
||||
sender_id: c.Hashable
|
||||
|
||||
if sender is ANY:
|
||||
sender_id = ANY_ID
|
||||
else:
|
||||
sender_id = make_id(sender)
|
||||
|
||||
receiver_id = make_id(receiver)
|
||||
self._disconnect(receiver_id, sender_id)
|
||||
|
||||
if (
|
||||
"receiver_disconnected" in self.__dict__
|
||||
and self.receiver_disconnected.receivers
|
||||
):
|
||||
self.receiver_disconnected.send(self, receiver=receiver, sender=sender)
|
||||
|
||||
def _disconnect(self, receiver_id: c.Hashable, sender_id: c.Hashable) -> None:
|
||||
if sender_id == ANY_ID:
|
||||
if self._by_receiver.pop(receiver_id, None) is not None:
|
||||
for bucket in self._by_sender.values():
|
||||
bucket.discard(receiver_id)
|
||||
|
||||
self.receivers.pop(receiver_id, None)
|
||||
else:
|
||||
self._by_sender[sender_id].discard(receiver_id)
|
||||
self._by_receiver[receiver_id].discard(sender_id)
|
||||
|
||||
def _make_cleanup_receiver(
|
||||
self, receiver_id: c.Hashable
|
||||
) -> c.Callable[[weakref.ref[c.Callable[..., t.Any]]], None]:
|
||||
"""Create a callback function to disconnect a weakly referenced
|
||||
receiver when it is garbage collected.
|
||||
"""
|
||||
|
||||
def cleanup(ref: weakref.ref[c.Callable[..., t.Any]]) -> None:
|
||||
# If the interpreter is shutting down, disconnecting can result in a
|
||||
# weird ignored exception. Don't call it in that case.
|
||||
if not sys.is_finalizing():
|
||||
self._disconnect(receiver_id, ANY_ID)
|
||||
|
||||
return cleanup
|
||||
|
||||
def _make_cleanup_sender(
|
||||
self, sender_id: c.Hashable
|
||||
) -> c.Callable[[weakref.ref[t.Any]], None]:
|
||||
"""Create a callback function to disconnect all receivers for a weakly
|
||||
referenced sender when it is garbage collected.
|
||||
"""
|
||||
assert sender_id != ANY_ID
|
||||
|
||||
def cleanup(ref: weakref.ref[t.Any]) -> None:
|
||||
self._weak_senders.pop(sender_id, None)
|
||||
|
||||
for receiver_id in self._by_sender.pop(sender_id, ()):
|
||||
self._by_receiver[receiver_id].discard(sender_id)
|
||||
|
||||
return cleanup
|
||||
|
||||
def _cleanup_bookkeeping(self) -> None:
|
||||
"""Prune unused sender/receiver bookkeeping. Not threadsafe.
|
||||
|
||||
Connecting & disconnecting leaves behind a small amount of bookkeeping
|
||||
data. Typical workloads using Blinker, for example in most web apps,
|
||||
Flask, CLI scripts, etc., are not adversely affected by this
|
||||
bookkeeping.
|
||||
|
||||
With a long-running process performing dynamic signal routing with high
|
||||
volume, e.g. connecting to function closures, senders are all unique
|
||||
object instances. Doing all of this over and over may cause memory usage
|
||||
to grow due to extraneous bookkeeping. (An empty ``set`` for each stale
|
||||
sender/receiver pair.)
|
||||
|
||||
This method will prune that bookkeeping away, with the caveat that such
|
||||
pruning is not threadsafe. The risk is that cleanup of a fully
|
||||
disconnected receiver/sender pair occurs while another thread is
|
||||
connecting that same pair. If you are in the highly dynamic, unique
|
||||
receiver/sender situation that has lead you to this method, that failure
|
||||
mode is perhaps not a big deal for you.
|
||||
"""
|
||||
for mapping in (self._by_sender, self._by_receiver):
|
||||
for ident, bucket in list(mapping.items()):
|
||||
if not bucket:
|
||||
mapping.pop(ident, None)
|
||||
|
||||
def _clear_state(self) -> None:
|
||||
"""Disconnect all receivers and senders. Useful for tests."""
|
||||
self._weak_senders.clear()
|
||||
self.receivers.clear()
|
||||
self._by_sender.clear()
|
||||
self._by_receiver.clear()
|
||||
|
||||
|
||||
class NamedSignal(Signal):
|
||||
"""A named generic notification emitter. The name is not used by the signal
|
||||
itself, but matches the key in the :class:`Namespace` that it belongs to.
|
||||
|
||||
:param name: The name of the signal within the namespace.
|
||||
:param doc: The docstring for the signal.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, doc: str | None = None) -> None:
|
||||
super().__init__(doc)
|
||||
|
||||
#: The name of this signal.
|
||||
self.name: str = name
|
||||
|
||||
def __repr__(self) -> str:
|
||||
base = super().__repr__()
|
||||
return f"{base[:-1]}; {self.name!r}>" # noqa: E702
|
||||
|
||||
|
||||
class Namespace(dict[str, NamedSignal]):
|
||||
"""A dict mapping names to signals."""
|
||||
|
||||
def signal(self, name: str, doc: str | None = None) -> NamedSignal:
|
||||
"""Return the :class:`NamedSignal` for the given ``name``, creating it
|
||||
if required. Repeated calls with the same name return the same signal.
|
||||
|
||||
:param name: The name of the signal.
|
||||
:param doc: The docstring of the signal.
|
||||
"""
|
||||
if name not in self:
|
||||
self[name] = NamedSignal(name, doc)
|
||||
|
||||
return self[name]
|
||||
|
||||
|
||||
class _PNamespaceSignal(t.Protocol):
|
||||
def __call__(self, name: str, doc: str | None = None) -> NamedSignal: ...
|
||||
|
||||
|
||||
default_namespace: Namespace = Namespace()
|
||||
"""A default :class:`Namespace` for creating named signals. :func:`signal`
|
||||
creates a :class:`NamedSignal` in this namespace.
|
||||
"""
|
||||
|
||||
signal: _PNamespaceSignal = default_namespace.signal
|
||||
"""Return a :class:`NamedSignal` in :data:`default_namespace` with the given
|
||||
``name``, creating it if required. Repeated calls with the same name return the
|
||||
same signal.
|
||||
"""
|
||||
0
gui/lib/python3.12/site-packages/blinker/py.typed
Normal file
0
gui/lib/python3.12/site-packages/blinker/py.typed
Normal file
@ -0,0 +1 @@
|
||||
pip
|
||||
@ -0,0 +1,10 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: bs4
|
||||
Version: 0.0.2
|
||||
Summary: Dummy package for Beautiful Soup (beautifulsoup4)
|
||||
Author-email: Leonard Richardson <leonardr@segfault.org>
|
||||
License: MIT License
|
||||
Requires-Dist: beautifulsoup4
|
||||
Description-Content-Type: text/x-rst
|
||||
|
||||
This is a dummy package designed to prevent namesquatting on PyPI. You should install `beautifulsoup4 <https://pypi.python.org/pypi/beautifulsoup4>`_ instead.
|
||||
@ -0,0 +1,6 @@
|
||||
README.rst,sha256=KMs4D-t40JC-oge8vGS3O5gueksurGqAIFxPtHZAMXQ,159
|
||||
bs4-0.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
bs4-0.0.2.dist-info/METADATA,sha256=GEwOSFCOYLu11XQR3O2dMO7ZTpKFZpGoIUG0gkFVgA8,411
|
||||
bs4-0.0.2.dist-info/RECORD,,
|
||||
bs4-0.0.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
bs4-0.0.2.dist-info/WHEEL,sha256=VYAwk8D_V6zmIA2XKK-k7Fem_KAtVk3hugaRru3yjGc,105
|
||||
@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: hatchling 1.21.0
|
||||
Root-Is-Purelib: true
|
||||
Tag: py2-none-any
|
||||
Tag: py3-none-any
|
||||
1170
gui/lib/python3.12/site-packages/bs4/__init__.py
Normal file
1170
gui/lib/python3.12/site-packages/bs4/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
80
gui/lib/python3.12/site-packages/bs4/_deprecation.py
Normal file
80
gui/lib/python3.12/site-packages/bs4/_deprecation.py
Normal file
@ -0,0 +1,80 @@
|
||||
"""Helper functions for deprecation.
|
||||
|
||||
This interface is itself unstable and may change without warning. Do
|
||||
not use these functions yourself, even as a joke. The underscores are
|
||||
there for a reason. No support will be given.
|
||||
|
||||
In particular, most of this will go away without warning once
|
||||
Beautiful Soup drops support for Python 3.11, since Python 3.12
|
||||
defines a `@typing.deprecated()
|
||||
decorator. <https://peps.python.org/pep-0702/>`_
|
||||
"""
|
||||
|
||||
import functools
|
||||
import warnings
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
)
|
||||
|
||||
|
||||
def _deprecated_alias(old_name: str, new_name: str, version: str):
|
||||
"""Alias one attribute name to another for backward compatibility
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
|
||||
@property
|
||||
def alias(self) -> Any:
|
||||
":meta private:"
|
||||
warnings.warn(
|
||||
f"Access to deprecated property {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return getattr(self, new_name)
|
||||
|
||||
@alias.setter
|
||||
def alias(self, value: str) -> None:
|
||||
":meta private:"
|
||||
warnings.warn(
|
||||
f"Write to deprecated property {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return setattr(self, new_name, value)
|
||||
|
||||
return alias
|
||||
|
||||
|
||||
def _deprecated_function_alias(
|
||||
old_name: str, new_name: str, version: str
|
||||
) -> Callable[[Any], Any]:
|
||||
def alias(self, *args: Any, **kwargs: Any) -> Any:
|
||||
":meta private:"
|
||||
warnings.warn(
|
||||
f"Call to deprecated method {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return getattr(self, new_name)(*args, **kwargs)
|
||||
|
||||
return alias
|
||||
|
||||
|
||||
def _deprecated(replaced_by: str, version: str) -> Callable:
|
||||
def deprecate(func: Callable) -> Callable:
|
||||
@functools.wraps(func)
|
||||
def with_warning(*args: Any, **kwargs: Any) -> Any:
|
||||
":meta private:"
|
||||
warnings.warn(
|
||||
f"Call to deprecated method {func.__name__}. (Replaced by {replaced_by}) -- Deprecated since version {version}.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return with_warning
|
||||
|
||||
return deprecate
|
||||
196
gui/lib/python3.12/site-packages/bs4/_typing.py
Normal file
196
gui/lib/python3.12/site-packages/bs4/_typing.py
Normal file
@ -0,0 +1,196 @@
|
||||
# Custom type aliases used throughout Beautiful Soup to improve readability.
|
||||
|
||||
# Notes on improvements to the type system in newer versions of Python
|
||||
# that can be used once Beautiful Soup drops support for older
|
||||
# versions:
|
||||
#
|
||||
# * ClassVar can be put on class variables now.
|
||||
# * In 3.10, x|y is an accepted shorthand for Union[x,y].
|
||||
# * In 3.10, TypeAlias gains capabilities that can be used to
|
||||
# improve the tree matching types (I don't remember what, exactly).
|
||||
# * In 3.9 it's possible to specialize the re.Match type,
|
||||
# e.g. re.Match[str]. In 3.8 there's a typing.re namespace for this,
|
||||
# but it's removed in 3.12, so to support the widest possible set of
|
||||
# versions I'm not using it.
|
||||
|
||||
from typing_extensions import (
|
||||
runtime_checkable,
|
||||
Protocol,
|
||||
TypeAlias,
|
||||
)
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
IO,
|
||||
Iterable,
|
||||
Mapping,
|
||||
Optional,
|
||||
Pattern,
|
||||
TYPE_CHECKING,
|
||||
Union,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from bs4.element import (
|
||||
AttributeValueList,
|
||||
NamespacedAttribute,
|
||||
NavigableString,
|
||||
PageElement,
|
||||
ResultSet,
|
||||
Tag,
|
||||
)
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class _RegularExpressionProtocol(Protocol):
|
||||
"""A protocol object which can accept either Python's built-in
|
||||
`re.Pattern` objects, or the similar ``Regex`` objects defined by the
|
||||
third-party ``regex`` package.
|
||||
"""
|
||||
|
||||
def search(
|
||||
self, string: str, pos: int = ..., endpos: int = ...
|
||||
) -> Optional[Any]: ...
|
||||
|
||||
@property
|
||||
def pattern(self) -> str: ...
|
||||
|
||||
|
||||
# Aliases for markup in various stages of processing.
|
||||
#
|
||||
#: The rawest form of markup: either a string, bytestring, or an open filehandle.
|
||||
_IncomingMarkup: TypeAlias = Union[str, bytes, IO[str], IO[bytes]]
|
||||
|
||||
#: Markup that is in memory but has (potentially) yet to be converted
|
||||
#: to Unicode.
|
||||
_RawMarkup: TypeAlias = Union[str, bytes]
|
||||
|
||||
# Aliases for character encodings
|
||||
#
|
||||
|
||||
#: A data encoding.
|
||||
_Encoding: TypeAlias = str
|
||||
|
||||
#: One or more data encodings.
|
||||
_Encodings: TypeAlias = Iterable[_Encoding]
|
||||
|
||||
# Aliases for XML namespaces
|
||||
#
|
||||
|
||||
#: The prefix for an XML namespace.
|
||||
_NamespacePrefix: TypeAlias = str
|
||||
|
||||
#: The URL of an XML namespace
|
||||
_NamespaceURL: TypeAlias = str
|
||||
|
||||
#: A mapping of prefixes to namespace URLs.
|
||||
_NamespaceMapping: TypeAlias = Dict[_NamespacePrefix, _NamespaceURL]
|
||||
|
||||
#: A mapping of namespace URLs to prefixes
|
||||
_InvertedNamespaceMapping: TypeAlias = Dict[_NamespaceURL, _NamespacePrefix]
|
||||
|
||||
# Aliases for the attribute values associated with HTML/XML tags.
|
||||
#
|
||||
|
||||
#: The value associated with an HTML or XML attribute. This is the
|
||||
#: relatively unprocessed value Beautiful Soup expects to come from a
|
||||
#: `TreeBuilder`.
|
||||
_RawAttributeValue: TypeAlias = str
|
||||
|
||||
#: A dictionary of names to `_RawAttributeValue` objects. This is how
|
||||
#: Beautiful Soup expects a `TreeBuilder` to represent a tag's
|
||||
#: attribute values.
|
||||
_RawAttributeValues: TypeAlias = (
|
||||
"Mapping[Union[str, NamespacedAttribute], _RawAttributeValue]"
|
||||
)
|
||||
|
||||
#: An attribute value in its final form, as stored in the
|
||||
# `Tag` class, after it has been processed and (in some cases)
|
||||
# split into a list of strings.
|
||||
_AttributeValue: TypeAlias = Union[str, "AttributeValueList"]
|
||||
|
||||
#: A dictionary of names to :py:data:`_AttributeValue` objects. This is what
|
||||
#: a tag's attributes look like after processing.
|
||||
_AttributeValues: TypeAlias = Dict[str, _AttributeValue]
|
||||
|
||||
#: The methods that deal with turning :py:data:`_RawAttributeValue` into
|
||||
#: :py:data:`_AttributeValue` may be called several times, even after the values
|
||||
#: are already processed (e.g. when cloning a tag), so they need to
|
||||
#: be able to acommodate both possibilities.
|
||||
_RawOrProcessedAttributeValues: TypeAlias = Union[_RawAttributeValues, _AttributeValues]
|
||||
|
||||
#: A number of tree manipulation methods can take either a `PageElement` or a
|
||||
#: normal Python string (which will be converted to a `NavigableString`).
|
||||
_InsertableElement: TypeAlias = Union["PageElement", str]
|
||||
|
||||
# Aliases to represent the many possibilities for matching bits of a
|
||||
# parse tree.
|
||||
#
|
||||
# This is very complicated because we're applying a formal type system
|
||||
# to some very DWIM code. The types we end up with will be the types
|
||||
# of the arguments to the SoupStrainer constructor and (more
|
||||
# familiarly to Beautiful Soup users) the find* methods.
|
||||
|
||||
#: A function that takes a PageElement and returns a yes-or-no answer.
|
||||
_PageElementMatchFunction: TypeAlias = Callable[["PageElement"], bool]
|
||||
|
||||
#: A function that takes the raw parsed ingredients of a markup tag
|
||||
#: and returns a yes-or-no answer.
|
||||
# Not necessary at the moment.
|
||||
# _AllowTagCreationFunction:TypeAlias = Callable[[Optional[str], str, Optional[_RawAttributeValues]], bool]
|
||||
|
||||
#: A function that takes the raw parsed ingredients of a markup string node
|
||||
#: and returns a yes-or-no answer.
|
||||
# Not necessary at the moment.
|
||||
# _AllowStringCreationFunction:TypeAlias = Callable[[Optional[str]], bool]
|
||||
|
||||
#: A function that takes a `Tag` and returns a yes-or-no answer.
|
||||
#: A `TagNameMatchRule` expects this kind of function, if you're
|
||||
#: going to pass it a function.
|
||||
_TagMatchFunction: TypeAlias = Callable[["Tag"], bool]
|
||||
|
||||
#: A function that takes a single string and returns a yes-or-no
|
||||
#: answer. An `AttributeValueMatchRule` expects this kind of function, if
|
||||
#: you're going to pass it a function. So does a `StringMatchRule`.
|
||||
_StringMatchFunction: TypeAlias = Callable[[str], bool]
|
||||
|
||||
#: Either a tag name, an attribute value or a string can be matched
|
||||
#: against a string, bytestring, regular expression, or a boolean.
|
||||
_BaseStrainable: TypeAlias = Union[str, bytes, Pattern[str], bool]
|
||||
|
||||
#: A tag can be matched either with the `_BaseStrainable` options, or
|
||||
#: using a function that takes the `Tag` as its sole argument.
|
||||
_BaseStrainableElement: TypeAlias = Union[_BaseStrainable, _TagMatchFunction]
|
||||
|
||||
#: A tag's attribute vgalue can be matched either with the
|
||||
#: `_BaseStrainable` options, or using a function that takes that
|
||||
#: value as its sole argument.
|
||||
_BaseStrainableAttribute: TypeAlias = Union[_BaseStrainable, _StringMatchFunction]
|
||||
|
||||
#: A tag can be matched using either a single criterion or a list of
|
||||
#: criteria.
|
||||
_StrainableElement: TypeAlias = Union[
|
||||
_BaseStrainableElement, Iterable[_BaseStrainableElement]
|
||||
]
|
||||
|
||||
#: An attribute value can be matched using either a single criterion
|
||||
#: or a list of criteria.
|
||||
_StrainableAttribute: TypeAlias = Union[
|
||||
_BaseStrainableAttribute, Iterable[_BaseStrainableAttribute]
|
||||
]
|
||||
|
||||
#: An string can be matched using the same techniques as
|
||||
#: an attribute value.
|
||||
_StrainableString: TypeAlias = _StrainableAttribute
|
||||
|
||||
#: A dictionary may be used to match against multiple attribute vlaues at once.
|
||||
_StrainableAttributes: TypeAlias = Dict[str, _StrainableAttribute]
|
||||
|
||||
#: Many Beautiful soup methods return a PageElement or an ResultSet of
|
||||
#: PageElements. A PageElement is either a Tag or a NavigableString.
|
||||
#: These convenience aliases make it easier for IDE users to see which methods
|
||||
#: are available on the objects they're dealing with.
|
||||
_OneElement: TypeAlias = Union["PageElement", "Tag", "NavigableString"]
|
||||
_AtMostOneElement: TypeAlias = Optional[_OneElement]
|
||||
_QueryResults: TypeAlias = "ResultSet[_OneElement]"
|
||||
98
gui/lib/python3.12/site-packages/bs4/_warnings.py
Normal file
98
gui/lib/python3.12/site-packages/bs4/_warnings.py
Normal file
@ -0,0 +1,98 @@
|
||||
"""Define some custom warnings."""
|
||||
|
||||
|
||||
class GuessedAtParserWarning(UserWarning):
|
||||
"""The warning issued when BeautifulSoup has to guess what parser to
|
||||
use -- probably because no parser was specified in the constructor.
|
||||
"""
|
||||
|
||||
MESSAGE: str = """No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system ("%(parser)s"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.
|
||||
|
||||
The code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features="%(parser)s"' to the BeautifulSoup constructor.
|
||||
"""
|
||||
|
||||
|
||||
class UnusualUsageWarning(UserWarning):
|
||||
"""A superclass for warnings issued when Beautiful Soup sees
|
||||
something that is typically the result of a mistake in the calling
|
||||
code, but might be intentional on the part of the user. If it is
|
||||
in fact intentional, you can filter the individual warning class
|
||||
to get rid of the warning. If you don't like Beautiful Soup
|
||||
second-guessing what you are doing, you can filter the
|
||||
UnusualUsageWarningclass itself and get rid of these entirely.
|
||||
"""
|
||||
|
||||
|
||||
class MarkupResemblesLocatorWarning(UnusualUsageWarning):
|
||||
"""The warning issued when BeautifulSoup is given 'markup' that
|
||||
actually looks like a resource locator -- a URL or a path to a file
|
||||
on disk.
|
||||
"""
|
||||
|
||||
#: :meta private:
|
||||
GENERIC_MESSAGE: str = """
|
||||
|
||||
However, if you want to parse some data that happens to look like a %(what)s, then nothing has gone wrong: you are using Beautiful Soup correctly, and this warning is spurious and can be filtered. To make this warning go away, run this code before calling the BeautifulSoup constructor:
|
||||
|
||||
from bs4 import MarkupResemblesLocatorWarning
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings("ignore", category=MarkupResemblesLocatorWarning)
|
||||
"""
|
||||
|
||||
URL_MESSAGE: str = (
|
||||
"""The input passed in on this line looks more like a URL than HTML or XML.
|
||||
|
||||
If you meant to use Beautiful Soup to parse the web page found at a certain URL, then something has gone wrong. You should use an Python package like 'requests' to fetch the content behind the URL. Once you have the content as a string, you can feed that string into Beautiful Soup."""
|
||||
+ GENERIC_MESSAGE
|
||||
)
|
||||
|
||||
FILENAME_MESSAGE: str = (
|
||||
"""The input passed in on this line looks more like a filename than HTML or XML.
|
||||
|
||||
If you meant to use Beautiful Soup to parse the contents of a file on disk, then something has gone wrong. You should open the file first, using code like this:
|
||||
|
||||
filehandle = open(your filename)
|
||||
|
||||
You can then feed the open filehandle into Beautiful Soup instead of using the filename."""
|
||||
+ GENERIC_MESSAGE
|
||||
)
|
||||
|
||||
|
||||
class AttributeResemblesVariableWarning(UnusualUsageWarning, SyntaxWarning):
|
||||
"""The warning issued when Beautiful Soup suspects a provided
|
||||
attribute name may actually be the misspelled name of a Beautiful
|
||||
Soup variable. Generally speaking, this is only used in cases like
|
||||
"_class" where it's very unlikely the user would be referencing an
|
||||
XML attribute with that name.
|
||||
"""
|
||||
|
||||
MESSAGE: str = """%(original)r is an unusual attribute name and is a common misspelling for %(autocorrect)r.
|
||||
|
||||
If you meant %(autocorrect)r, change your code to use it, and this warning will go away.
|
||||
|
||||
If you really did mean to check the %(original)r attribute, this warning is spurious and can be filtered. To make it go away, run this code before creating your BeautifulSoup object:
|
||||
|
||||
from bs4 import AttributeResemblesVariableWarning
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings("ignore", category=AttributeResemblesVariableWarning)
|
||||
"""
|
||||
|
||||
|
||||
class XMLParsedAsHTMLWarning(UnusualUsageWarning):
|
||||
"""The warning issued when an HTML parser is used to parse
|
||||
XML that is not (as far as we can tell) XHTML.
|
||||
"""
|
||||
|
||||
MESSAGE: str = """It looks like you're using an HTML parser to parse an XML document.
|
||||
|
||||
Assuming this really is an XML document, what you're doing might work, but you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the Python package 'lxml' installed, and pass the keyword argument `features="xml"` into the BeautifulSoup constructor.
|
||||
|
||||
If you want or need to use an HTML parser on this document, you can make this warning go away by filtering it. To do that, run this code before calling the BeautifulSoup constructor:
|
||||
|
||||
from bs4 import XMLParsedAsHTMLWarning
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning)
|
||||
"""
|
||||
848
gui/lib/python3.12/site-packages/bs4/builder/__init__.py
Normal file
848
gui/lib/python3.12/site-packages/bs4/builder/__init__.py
Normal file
@ -0,0 +1,848 @@
|
||||
from __future__ import annotations
|
||||
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
from collections import defaultdict
|
||||
import re
|
||||
from types import ModuleType
|
||||
from typing import (
|
||||
Any,
|
||||
cast,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Pattern,
|
||||
Set,
|
||||
Tuple,
|
||||
Type,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
import warnings
|
||||
import sys
|
||||
from bs4.element import (
|
||||
AttributeDict,
|
||||
AttributeValueList,
|
||||
CharsetMetaAttributeValue,
|
||||
ContentMetaAttributeValue,
|
||||
RubyParenthesisString,
|
||||
RubyTextString,
|
||||
Stylesheet,
|
||||
Script,
|
||||
TemplateString,
|
||||
nonwhitespace_re,
|
||||
)
|
||||
|
||||
# Exceptions were moved to their own module in 4.13. Import here for
|
||||
# backwards compatibility.
|
||||
from bs4.exceptions import ParserRejectedMarkup
|
||||
|
||||
from bs4._typing import (
|
||||
_AttributeValues,
|
||||
_RawAttributeValue,
|
||||
)
|
||||
|
||||
from bs4._warnings import XMLParsedAsHTMLWarning
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import (
|
||||
NavigableString,
|
||||
Tag,
|
||||
)
|
||||
from bs4._typing import (
|
||||
_AttributeValue,
|
||||
_Encoding,
|
||||
_Encodings,
|
||||
_RawOrProcessedAttributeValues,
|
||||
_RawMarkup,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"HTMLTreeBuilder",
|
||||
"SAXTreeBuilder",
|
||||
"TreeBuilder",
|
||||
"TreeBuilderRegistry",
|
||||
]
|
||||
|
||||
# Some useful features for a TreeBuilder to have.
|
||||
FAST = "fast"
|
||||
PERMISSIVE = "permissive"
|
||||
STRICT = "strict"
|
||||
XML = "xml"
|
||||
HTML = "html"
|
||||
HTML_5 = "html5"
|
||||
|
||||
__all__ = [
|
||||
"TreeBuilderRegistry",
|
||||
"TreeBuilder",
|
||||
"HTMLTreeBuilder",
|
||||
"DetectsXMLParsedAsHTML",
|
||||
|
||||
"ParserRejectedMarkup", # backwards compatibility only as of 4.13.0
|
||||
]
|
||||
|
||||
class TreeBuilderRegistry(object):
|
||||
"""A way of looking up TreeBuilder subclasses by their name or by desired
|
||||
features.
|
||||
"""
|
||||
|
||||
builders_for_feature: Dict[str, List[Type[TreeBuilder]]]
|
||||
builders: List[Type[TreeBuilder]]
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.builders_for_feature = defaultdict(list)
|
||||
self.builders = []
|
||||
|
||||
def register(self, treebuilder_class: type[TreeBuilder]) -> None:
|
||||
"""Register a treebuilder based on its advertised features.
|
||||
|
||||
:param treebuilder_class: A subclass of `TreeBuilder`. its
|
||||
`TreeBuilder.features` attribute should list its features.
|
||||
"""
|
||||
for feature in treebuilder_class.features:
|
||||
self.builders_for_feature[feature].insert(0, treebuilder_class)
|
||||
self.builders.insert(0, treebuilder_class)
|
||||
|
||||
def lookup(self, *features: str) -> Optional[Type[TreeBuilder]]:
|
||||
"""Look up a TreeBuilder subclass with the desired features.
|
||||
|
||||
:param features: A list of features to look for. If none are
|
||||
provided, the most recently registered TreeBuilder subclass
|
||||
will be used.
|
||||
:return: A TreeBuilder subclass, or None if there's no
|
||||
registered subclass with all the requested features.
|
||||
"""
|
||||
if len(self.builders) == 0:
|
||||
# There are no builders at all.
|
||||
return None
|
||||
|
||||
if len(features) == 0:
|
||||
# They didn't ask for any features. Give them the most
|
||||
# recently registered builder.
|
||||
return self.builders[0]
|
||||
|
||||
# Go down the list of features in order, and eliminate any builders
|
||||
# that don't match every feature.
|
||||
feature_list = list(features)
|
||||
feature_list.reverse()
|
||||
candidates = None
|
||||
candidate_set = None
|
||||
while len(feature_list) > 0:
|
||||
feature = feature_list.pop()
|
||||
we_have_the_feature = self.builders_for_feature.get(feature, [])
|
||||
if len(we_have_the_feature) > 0:
|
||||
if candidates is None:
|
||||
candidates = we_have_the_feature
|
||||
candidate_set = set(candidates)
|
||||
else:
|
||||
# Eliminate any candidates that don't have this feature.
|
||||
candidate_set = candidate_set.intersection(set(we_have_the_feature))
|
||||
|
||||
# The only valid candidates are the ones in candidate_set.
|
||||
# Go through the original list of candidates and pick the first one
|
||||
# that's in candidate_set.
|
||||
if candidate_set is None or candidates is None:
|
||||
return None
|
||||
for candidate in candidates:
|
||||
if candidate in candidate_set:
|
||||
return candidate
|
||||
return None
|
||||
|
||||
|
||||
#: The `BeautifulSoup` constructor will take a list of features
|
||||
#: and use it to look up `TreeBuilder` classes in this registry.
|
||||
builder_registry: TreeBuilderRegistry = TreeBuilderRegistry()
|
||||
|
||||
|
||||
class TreeBuilder(object):
|
||||
"""Turn a textual document into a Beautiful Soup object tree.
|
||||
|
||||
This is an abstract superclass which smooths out the behavior of
|
||||
different parser libraries into a single, unified interface.
|
||||
|
||||
:param multi_valued_attributes: If this is set to None, the
|
||||
TreeBuilder will not turn any values for attributes like
|
||||
'class' into lists. Setting this to a dictionary will
|
||||
customize this behavior; look at :py:attr:`bs4.builder.HTMLTreeBuilder.DEFAULT_CDATA_LIST_ATTRIBUTES`
|
||||
for an example.
|
||||
|
||||
Internally, these are called "CDATA list attributes", but that
|
||||
probably doesn't make sense to an end-user, so the argument name
|
||||
is ``multi_valued_attributes``.
|
||||
|
||||
:param preserve_whitespace_tags: A set of tags to treat
|
||||
the way <pre> tags are treated in HTML. Tags in this set
|
||||
are immune from pretty-printing; their contents will always be
|
||||
output as-is.
|
||||
|
||||
:param string_containers: A dictionary mapping tag names to
|
||||
the classes that should be instantiated to contain the textual
|
||||
contents of those tags. The default is to use NavigableString
|
||||
for every tag, no matter what the name. You can override the
|
||||
default by changing :py:attr:`DEFAULT_STRING_CONTAINERS`.
|
||||
|
||||
:param store_line_numbers: If the parser keeps track of the line
|
||||
numbers and positions of the original markup, that information
|
||||
will, by default, be stored in each corresponding
|
||||
:py:class:`bs4.element.Tag` object. You can turn this off by
|
||||
passing store_line_numbers=False; then Tag.sourcepos and
|
||||
Tag.sourceline will always be None. If the parser you're using
|
||||
doesn't keep track of this information, then store_line_numbers
|
||||
is irrelevant.
|
||||
|
||||
:param attribute_dict_class: The value of a multi-valued attribute
|
||||
(such as HTML's 'class') willl be stored in an instance of this
|
||||
class. The default is Beautiful Soup's built-in
|
||||
`AttributeValueList`, which is a normal Python list, and you
|
||||
will probably never need to change it.
|
||||
"""
|
||||
|
||||
USE_DEFAULT: Any = object() #: :meta private:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
multi_valued_attributes: Dict[str, Set[str]] = USE_DEFAULT,
|
||||
preserve_whitespace_tags: Set[str] = USE_DEFAULT,
|
||||
store_line_numbers: bool = USE_DEFAULT,
|
||||
string_containers: Dict[str, Type[NavigableString]] = USE_DEFAULT,
|
||||
empty_element_tags: Set[str] = USE_DEFAULT,
|
||||
attribute_dict_class: Type[AttributeDict] = AttributeDict,
|
||||
attribute_value_list_class: Type[AttributeValueList] = AttributeValueList,
|
||||
):
|
||||
self.soup = None
|
||||
if multi_valued_attributes is self.USE_DEFAULT:
|
||||
multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
|
||||
self.cdata_list_attributes = multi_valued_attributes
|
||||
if preserve_whitespace_tags is self.USE_DEFAULT:
|
||||
preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
|
||||
self.preserve_whitespace_tags = preserve_whitespace_tags
|
||||
if empty_element_tags is self.USE_DEFAULT:
|
||||
self.empty_element_tags = self.DEFAULT_EMPTY_ELEMENT_TAGS
|
||||
else:
|
||||
self.empty_element_tags = empty_element_tags
|
||||
# TODO: store_line_numbers is probably irrelevant now that
|
||||
# the behavior of sourceline and sourcepos has been made consistent
|
||||
# everywhere.
|
||||
if store_line_numbers == self.USE_DEFAULT:
|
||||
store_line_numbers = self.TRACKS_LINE_NUMBERS
|
||||
self.store_line_numbers = store_line_numbers
|
||||
if string_containers == self.USE_DEFAULT:
|
||||
string_containers = self.DEFAULT_STRING_CONTAINERS
|
||||
self.string_containers = string_containers
|
||||
self.attribute_dict_class = attribute_dict_class
|
||||
self.attribute_value_list_class = attribute_value_list_class
|
||||
|
||||
NAME: str = "[Unknown tree builder]"
|
||||
ALTERNATE_NAMES: Iterable[str] = []
|
||||
features: Iterable[str] = []
|
||||
|
||||
is_xml: bool = False
|
||||
picklable: bool = False
|
||||
|
||||
soup: Optional[BeautifulSoup] #: :meta private:
|
||||
|
||||
#: A tag will be considered an empty-element
|
||||
#: tag when and only when it has no contents.
|
||||
empty_element_tags: Optional[Set[str]] = None #: :meta private:
|
||||
cdata_list_attributes: Dict[str, Set[str]] #: :meta private:
|
||||
preserve_whitespace_tags: Set[str] #: :meta private:
|
||||
string_containers: Dict[str, Type[NavigableString]] #: :meta private:
|
||||
tracks_line_numbers: bool #: :meta private:
|
||||
|
||||
#: A value for these tag/attribute combinations is a space- or
|
||||
#: comma-separated list of CDATA, rather than a single CDATA.
|
||||
DEFAULT_CDATA_LIST_ATTRIBUTES: Dict[str, Set[str]] = defaultdict(set)
|
||||
|
||||
#: Whitespace should be preserved inside these tags.
|
||||
DEFAULT_PRESERVE_WHITESPACE_TAGS: Set[str] = set()
|
||||
|
||||
#: The textual contents of tags with these names should be
|
||||
#: instantiated with some class other than `bs4.element.NavigableString`.
|
||||
DEFAULT_STRING_CONTAINERS: Dict[str, Type[bs4.element.NavigableString]] = {}
|
||||
|
||||
#: By default, tags are treated as empty-element tags if they have
|
||||
#: no contents--that is, using XML rules. HTMLTreeBuilder
|
||||
#: defines a different set of DEFAULT_EMPTY_ELEMENT_TAGS based on the
|
||||
#: HTML 4 and HTML5 standards.
|
||||
DEFAULT_EMPTY_ELEMENT_TAGS: Optional[Set[str]] = None
|
||||
|
||||
#: Most parsers don't keep track of line numbers.
|
||||
TRACKS_LINE_NUMBERS: bool = False
|
||||
|
||||
def initialize_soup(self, soup: BeautifulSoup) -> None:
|
||||
"""The BeautifulSoup object has been initialized and is now
|
||||
being associated with the TreeBuilder.
|
||||
|
||||
:param soup: A BeautifulSoup object.
|
||||
"""
|
||||
self.soup = soup
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Do any work necessary to reset the underlying parser
|
||||
for a new document.
|
||||
|
||||
By default, this does nothing.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_be_empty_element(self, tag_name: str) -> bool:
|
||||
"""Might a tag with this name be an empty-element tag?
|
||||
|
||||
The final markup may or may not actually present this tag as
|
||||
self-closing.
|
||||
|
||||
For instance: an HTMLBuilder does not consider a <p> tag to be
|
||||
an empty-element tag (it's not in
|
||||
HTMLBuilder.empty_element_tags). This means an empty <p> tag
|
||||
will be presented as "<p></p>", not "<p/>" or "<p>".
|
||||
|
||||
The default implementation has no opinion about which tags are
|
||||
empty-element tags, so a tag will be presented as an
|
||||
empty-element tag if and only if it has no children.
|
||||
"<foo></foo>" will become "<foo/>", and "<foo>bar</foo>" will
|
||||
be left alone.
|
||||
|
||||
:param tag_name: The name of a markup tag.
|
||||
"""
|
||||
if self.empty_element_tags is None:
|
||||
return True
|
||||
return tag_name in self.empty_element_tags
|
||||
|
||||
def feed(self, markup: _RawMarkup) -> None:
|
||||
"""Run incoming markup through some parsing process."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def prepare_markup(
|
||||
self,
|
||||
markup: _RawMarkup,
|
||||
user_specified_encoding: Optional[_Encoding] = None,
|
||||
document_declared_encoding: Optional[_Encoding] = None,
|
||||
exclude_encodings: Optional[_Encodings] = None,
|
||||
) -> Iterable[Tuple[_RawMarkup, Optional[_Encoding], Optional[_Encoding], bool]]:
|
||||
"""Run any preliminary steps necessary to make incoming markup
|
||||
acceptable to the parser.
|
||||
|
||||
:param markup: The markup that's about to be parsed.
|
||||
:param user_specified_encoding: The user asked to try this encoding
|
||||
to convert the markup into a Unicode string.
|
||||
:param document_declared_encoding: The markup itself claims to be
|
||||
in this encoding. NOTE: This argument is not used by the
|
||||
calling code and can probably be removed.
|
||||
:param exclude_encodings: The user asked *not* to try any of
|
||||
these encodings.
|
||||
|
||||
:yield: A series of 4-tuples: (markup, encoding, declared encoding,
|
||||
has undergone character replacement)
|
||||
|
||||
Each 4-tuple represents a strategy that the parser can try
|
||||
to convert the document to Unicode and parse it. Each
|
||||
strategy will be tried in turn.
|
||||
|
||||
By default, the only strategy is to parse the markup
|
||||
as-is. See `LXMLTreeBuilderForXML` and
|
||||
`HTMLParserTreeBuilder` for implementations that take into
|
||||
account the quirks of particular parsers.
|
||||
|
||||
:meta private:
|
||||
|
||||
"""
|
||||
yield markup, None, None, False
|
||||
|
||||
def test_fragment_to_document(self, fragment: str) -> str:
|
||||
"""Wrap an HTML fragment to make it look like a document.
|
||||
|
||||
Different parsers do this differently. For instance, lxml
|
||||
introduces an empty <head> tag, and html5lib
|
||||
doesn't. Abstracting this away lets us write simple tests
|
||||
which run HTML fragments through the parser and compare the
|
||||
results against other HTML fragments.
|
||||
|
||||
This method should not be used outside of unit tests.
|
||||
|
||||
:param fragment: A fragment of HTML.
|
||||
:return: A full HTML document.
|
||||
:meta private:
|
||||
"""
|
||||
return fragment
|
||||
|
||||
def set_up_substitutions(self, tag: Tag) -> bool:
|
||||
"""Set up any substitutions that will need to be performed on
|
||||
a `Tag` when it's output as a string.
|
||||
|
||||
By default, this does nothing. See `HTMLTreeBuilder` for a
|
||||
case where this is used.
|
||||
|
||||
:return: Whether or not a substitution was performed.
|
||||
:meta private:
|
||||
"""
|
||||
return False
|
||||
|
||||
def _replace_cdata_list_attribute_values(
|
||||
self, tag_name: str, attrs: _RawOrProcessedAttributeValues
|
||||
) -> _AttributeValues:
|
||||
"""When an attribute value is associated with a tag that can
|
||||
have multiple values for that attribute, convert the string
|
||||
value to a list of strings.
|
||||
|
||||
Basically, replaces class="foo bar" with class=["foo", "bar"]
|
||||
|
||||
NOTE: This method modifies its input in place.
|
||||
|
||||
:param tag_name: The name of a tag.
|
||||
:param attrs: A dictionary containing the tag's attributes.
|
||||
Any appropriate attribute values will be modified in place.
|
||||
:return: The modified dictionary that was originally passed in.
|
||||
"""
|
||||
|
||||
# First, cast the attrs dict to _AttributeValues. This might
|
||||
# not be accurate yet, but it will be by the time this method
|
||||
# returns.
|
||||
modified_attrs = cast(_AttributeValues, attrs)
|
||||
if not modified_attrs or not self.cdata_list_attributes:
|
||||
# Nothing to do.
|
||||
return modified_attrs
|
||||
|
||||
# There is at least a possibility that we need to modify one of
|
||||
# the attribute values.
|
||||
universal: Set[str] = self.cdata_list_attributes.get("*", set())
|
||||
tag_specific = self.cdata_list_attributes.get(tag_name.lower(), None)
|
||||
for attr in list(modified_attrs.keys()):
|
||||
modified_value: _AttributeValue
|
||||
if attr in universal or (tag_specific and attr in tag_specific):
|
||||
# We have a "class"-type attribute whose string
|
||||
# value is a whitespace-separated list of
|
||||
# values. Split it into a list.
|
||||
original_value: _AttributeValue = modified_attrs[attr]
|
||||
if isinstance(original_value, _RawAttributeValue):
|
||||
# This is a _RawAttributeValue (a string) that
|
||||
# needs to be split and converted to a
|
||||
# AttributeValueList so it can be an
|
||||
# _AttributeValue.
|
||||
modified_value = self.attribute_value_list_class(
|
||||
nonwhitespace_re.findall(original_value)
|
||||
)
|
||||
else:
|
||||
# html5lib calls setAttributes twice for the
|
||||
# same tag when rearranging the parse tree. On
|
||||
# the second call the attribute value here is
|
||||
# already a list. This can also happen when a
|
||||
# Tag object is cloned. If this happens, leave
|
||||
# the value alone rather than trying to split
|
||||
# it again.
|
||||
modified_value = original_value
|
||||
modified_attrs[attr] = modified_value
|
||||
return modified_attrs
|
||||
|
||||
|
||||
class SAXTreeBuilder(TreeBuilder):
|
||||
"""A Beautiful Soup treebuilder that listens for SAX events.
|
||||
|
||||
This is not currently used for anything, and it will be removed
|
||||
soon. It was a good idea, but it wasn't properly integrated into the
|
||||
rest of Beautiful Soup, so there have been long stretches where it
|
||||
hasn't worked properly.
|
||||
"""
|
||||
|
||||
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||
warnings.warn(
|
||||
"The SAXTreeBuilder class was deprecated in 4.13.0 and will be removed soon thereafter. It is completely untested and probably doesn't work; do not use it.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
super(SAXTreeBuilder, self).__init__(*args, **kwargs)
|
||||
|
||||
def feed(self, markup: _RawMarkup) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
def close(self) -> None:
|
||||
pass
|
||||
|
||||
def startElement(self, name: str, attrs: Dict[str, str]) -> None:
|
||||
attrs = AttributeDict((key[1], value) for key, value in list(attrs.items()))
|
||||
# print("Start %s, %r" % (name, attrs))
|
||||
assert self.soup is not None
|
||||
self.soup.handle_starttag(name, None, None, attrs)
|
||||
|
||||
def endElement(self, name: str) -> None:
|
||||
# print("End %s" % name)
|
||||
assert self.soup is not None
|
||||
self.soup.handle_endtag(name)
|
||||
|
||||
def startElementNS(
|
||||
self, nsTuple: Tuple[str, str], nodeName: str, attrs: Dict[str, str]
|
||||
) -> None:
|
||||
# Throw away (ns, nodeName) for now.
|
||||
self.startElement(nodeName, attrs)
|
||||
|
||||
def endElementNS(self, nsTuple: Tuple[str, str], nodeName: str) -> None:
|
||||
# Throw away (ns, nodeName) for now.
|
||||
self.endElement(nodeName)
|
||||
# handler.endElementNS((ns, node.nodeName), node.nodeName)
|
||||
|
||||
def startPrefixMapping(self, prefix: str, nodeValue: str) -> None:
|
||||
# Ignore the prefix for now.
|
||||
pass
|
||||
|
||||
def endPrefixMapping(self, prefix: str) -> None:
|
||||
# Ignore the prefix for now.
|
||||
# handler.endPrefixMapping(prefix)
|
||||
pass
|
||||
|
||||
def characters(self, content: str) -> None:
|
||||
assert self.soup is not None
|
||||
self.soup.handle_data(content)
|
||||
|
||||
def startDocument(self) -> None:
|
||||
pass
|
||||
|
||||
def endDocument(self) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class HTMLTreeBuilder(TreeBuilder):
|
||||
"""This TreeBuilder knows facts about HTML, such as which tags are treated
|
||||
specially by the HTML standard.
|
||||
"""
|
||||
|
||||
#: Some HTML tags are defined as having no contents. Beautiful Soup
|
||||
#: treats these specially.
|
||||
DEFAULT_EMPTY_ELEMENT_TAGS: Set[str] = set(
|
||||
[
|
||||
# These are from HTML5.
|
||||
"area",
|
||||
"base",
|
||||
"br",
|
||||
"col",
|
||||
"embed",
|
||||
"hr",
|
||||
"img",
|
||||
"input",
|
||||
"keygen",
|
||||
"link",
|
||||
"menuitem",
|
||||
"meta",
|
||||
"param",
|
||||
"source",
|
||||
"track",
|
||||
"wbr",
|
||||
# These are from earlier versions of HTML and are removed in HTML5.
|
||||
"basefont",
|
||||
"bgsound",
|
||||
"command",
|
||||
"frame",
|
||||
"image",
|
||||
"isindex",
|
||||
"nextid",
|
||||
"spacer",
|
||||
]
|
||||
)
|
||||
|
||||
#: The HTML standard defines these tags as block-level elements. Beautiful
|
||||
#: Soup does not treat these elements differently from other elements,
|
||||
#: but it may do so eventually, and this information is available if
|
||||
#: you need to use it.
|
||||
DEFAULT_BLOCK_ELEMENTS: Set[str] = set(
|
||||
[
|
||||
"address",
|
||||
"article",
|
||||
"aside",
|
||||
"blockquote",
|
||||
"canvas",
|
||||
"dd",
|
||||
"div",
|
||||
"dl",
|
||||
"dt",
|
||||
"fieldset",
|
||||
"figcaption",
|
||||
"figure",
|
||||
"footer",
|
||||
"form",
|
||||
"h1",
|
||||
"h2",
|
||||
"h3",
|
||||
"h4",
|
||||
"h5",
|
||||
"h6",
|
||||
"header",
|
||||
"hr",
|
||||
"li",
|
||||
"main",
|
||||
"nav",
|
||||
"noscript",
|
||||
"ol",
|
||||
"output",
|
||||
"p",
|
||||
"pre",
|
||||
"section",
|
||||
"table",
|
||||
"tfoot",
|
||||
"ul",
|
||||
"video",
|
||||
]
|
||||
)
|
||||
|
||||
#: These HTML tags need special treatment so they can be
|
||||
#: represented by a string class other than `bs4.element.NavigableString`.
|
||||
#:
|
||||
#: For some of these tags, it's because the HTML standard defines
|
||||
#: an unusual content model for them. I made this list by going
|
||||
#: through the HTML spec
|
||||
#: (https://html.spec.whatwg.org/#metadata-content) and looking for
|
||||
#: "metadata content" elements that can contain strings.
|
||||
#:
|
||||
#: The Ruby tags (<rt> and <rp>) are here despite being normal
|
||||
#: "phrasing content" tags, because the content they contain is
|
||||
#: qualitatively different from other text in the document, and it
|
||||
#: can be useful to be able to distinguish it.
|
||||
#:
|
||||
#: TODO: Arguably <noscript> could go here but it seems
|
||||
#: qualitatively different from the other tags.
|
||||
DEFAULT_STRING_CONTAINERS: Dict[str, Type[bs4.element.NavigableString]] = {
|
||||
"rt": RubyTextString,
|
||||
"rp": RubyParenthesisString,
|
||||
"style": Stylesheet,
|
||||
"script": Script,
|
||||
"template": TemplateString,
|
||||
}
|
||||
|
||||
#: The HTML standard defines these attributes as containing a
|
||||
#: space-separated list of values, not a single value. That is,
|
||||
#: class="foo bar" means that the 'class' attribute has two values,
|
||||
#: 'foo' and 'bar', not the single value 'foo bar'. When we
|
||||
#: encounter one of these attributes, we will parse its value into
|
||||
#: a list of values if possible. Upon output, the list will be
|
||||
#: converted back into a string.
|
||||
DEFAULT_CDATA_LIST_ATTRIBUTES: Dict[str, Set[str]] = {
|
||||
"*": {"class", "accesskey", "dropzone"},
|
||||
"a": {"rel", "rev"},
|
||||
"link": {"rel", "rev"},
|
||||
"td": {"headers"},
|
||||
"th": {"headers"},
|
||||
"form": {"accept-charset"},
|
||||
"object": {"archive"},
|
||||
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
|
||||
"area": {"rel"},
|
||||
"icon": {"sizes"},
|
||||
"iframe": {"sandbox"},
|
||||
"output": {"for"},
|
||||
}
|
||||
|
||||
#: By default, whitespace inside these HTML tags will be
|
||||
#: preserved rather than being collapsed.
|
||||
DEFAULT_PRESERVE_WHITESPACE_TAGS: set[str] = set(["pre", "textarea"])
|
||||
|
||||
def set_up_substitutions(self, tag: Tag) -> bool:
|
||||
"""Replace the declared encoding in a <meta> tag with a placeholder,
|
||||
to be substituted when the tag is output to a string.
|
||||
|
||||
An HTML document may come in to Beautiful Soup as one
|
||||
encoding, but exit in a different encoding, and the <meta> tag
|
||||
needs to be changed to reflect this.
|
||||
|
||||
:return: Whether or not a substitution was performed.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
# We are only interested in <meta> tags
|
||||
if tag.name != "meta":
|
||||
return False
|
||||
|
||||
# TODO: This cast will fail in the (very unlikely) scenario
|
||||
# that the programmer who instantiates the TreeBuilder
|
||||
# specifies meta['content'] or meta['charset'] as
|
||||
# cdata_list_attributes.
|
||||
content: Optional[str] = cast(Optional[str], tag.get("content"))
|
||||
charset: Optional[str] = cast(Optional[str], tag.get("charset"))
|
||||
|
||||
# But we can accommodate meta['http-equiv'] being made a
|
||||
# cdata_list_attribute (again, very unlikely) without much
|
||||
# trouble.
|
||||
http_equiv: List[str] = tag.get_attribute_list("http-equiv")
|
||||
|
||||
# We are interested in <meta> tags that say what encoding the
|
||||
# document was originally in. This means HTML 5-style <meta>
|
||||
# tags that provide the "charset" attribute. It also means
|
||||
# HTML 4-style <meta> tags that provide the "content"
|
||||
# attribute and have "http-equiv" set to "content-type".
|
||||
#
|
||||
# In both cases we will replace the value of the appropriate
|
||||
# attribute with a standin object that can take on any
|
||||
# encoding.
|
||||
substituted = False
|
||||
if charset is not None:
|
||||
# HTML 5 style:
|
||||
# <meta charset="utf8">
|
||||
tag["charset"] = CharsetMetaAttributeValue(charset)
|
||||
substituted = True
|
||||
|
||||
elif content is not None and any(
|
||||
x.lower() == "content-type" for x in http_equiv
|
||||
):
|
||||
# HTML 4 style:
|
||||
# <meta http-equiv="content-type" content="text/html; charset=utf8">
|
||||
tag["content"] = ContentMetaAttributeValue(content)
|
||||
substituted = True
|
||||
|
||||
return substituted
|
||||
|
||||
|
||||
class DetectsXMLParsedAsHTML(object):
|
||||
"""A mixin class for any class (a TreeBuilder, or some class used by a
|
||||
TreeBuilder) that's in a position to detect whether an XML
|
||||
document is being incorrectly parsed as HTML, and issue an
|
||||
appropriate warning.
|
||||
|
||||
This requires being able to observe an incoming processing
|
||||
instruction that might be an XML declaration, and also able to
|
||||
observe tags as they're opened. If you can't do that for a given
|
||||
`TreeBuilder`, there's a less reliable implementation based on
|
||||
examining the raw markup.
|
||||
"""
|
||||
|
||||
#: Regular expression for seeing if string markup has an <html> tag.
|
||||
LOOKS_LIKE_HTML: Pattern[str] = re.compile("<[^ +]html", re.I)
|
||||
|
||||
#: Regular expression for seeing if byte markup has an <html> tag.
|
||||
LOOKS_LIKE_HTML_B: Pattern[bytes] = re.compile(b"<[^ +]html", re.I)
|
||||
|
||||
#: The start of an XML document string.
|
||||
XML_PREFIX: str = "<?xml"
|
||||
|
||||
#: The start of an XML document bytestring.
|
||||
XML_PREFIX_B: bytes = b"<?xml"
|
||||
|
||||
# This is typed as str, not `ProcessingInstruction`, because this
|
||||
# check may be run before any Beautiful Soup objects are created.
|
||||
_first_processing_instruction: Optional[str] #: :meta private:
|
||||
_root_tag_name: Optional[str] #: :meta private:
|
||||
|
||||
@classmethod
|
||||
def warn_if_markup_looks_like_xml(
|
||||
cls, markup: Optional[_RawMarkup], stacklevel: int = 3
|
||||
) -> bool:
|
||||
"""Perform a check on some markup to see if it looks like XML
|
||||
that's not XHTML. If so, issue a warning.
|
||||
|
||||
This is much less reliable than doing the check while parsing,
|
||||
but some of the tree builders can't do that.
|
||||
|
||||
:param stacklevel: The stacklevel of the code calling this\
|
||||
function.
|
||||
|
||||
:return: True if the markup looks like non-XHTML XML, False
|
||||
otherwise.
|
||||
"""
|
||||
if markup is None:
|
||||
return False
|
||||
markup = markup[:500]
|
||||
if isinstance(markup, bytes):
|
||||
markup_b: bytes = markup
|
||||
looks_like_xml = markup_b.startswith(
|
||||
cls.XML_PREFIX_B
|
||||
) and not cls.LOOKS_LIKE_HTML_B.search(markup)
|
||||
else:
|
||||
markup_s: str = markup
|
||||
looks_like_xml = markup_s.startswith(
|
||||
cls.XML_PREFIX
|
||||
) and not cls.LOOKS_LIKE_HTML.search(markup)
|
||||
|
||||
if looks_like_xml:
|
||||
cls._warn(stacklevel=stacklevel + 2)
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _warn(cls, stacklevel: int = 5) -> None:
|
||||
"""Issue a warning about XML being parsed as HTML."""
|
||||
warnings.warn(
|
||||
XMLParsedAsHTMLWarning.MESSAGE,
|
||||
XMLParsedAsHTMLWarning,
|
||||
stacklevel=stacklevel,
|
||||
)
|
||||
|
||||
def _initialize_xml_detector(self) -> None:
|
||||
"""Call this method before parsing a document."""
|
||||
self._first_processing_instruction = None
|
||||
self._root_tag_name = None
|
||||
|
||||
def _document_might_be_xml(self, processing_instruction: str) -> None:
|
||||
"""Call this method when encountering an XML declaration, or a
|
||||
"processing instruction" that might be an XML declaration.
|
||||
|
||||
This helps Beautiful Soup detect potential issues later, if
|
||||
the XML document turns out to be a non-XHTML document that's
|
||||
being parsed as XML.
|
||||
"""
|
||||
if (
|
||||
self._first_processing_instruction is not None
|
||||
or self._root_tag_name is not None
|
||||
):
|
||||
# The document has already started. Don't bother checking
|
||||
# anymore.
|
||||
return
|
||||
|
||||
self._first_processing_instruction = processing_instruction
|
||||
|
||||
# We won't know until we encounter the first tag whether or
|
||||
# not this is actually a problem.
|
||||
|
||||
def _root_tag_encountered(self, name: str) -> None:
|
||||
"""Call this when you encounter the document's root tag.
|
||||
|
||||
This is where we actually check whether an XML document is
|
||||
being incorrectly parsed as HTML, and issue the warning.
|
||||
"""
|
||||
if self._root_tag_name is not None:
|
||||
# This method was incorrectly called multiple times. Do
|
||||
# nothing.
|
||||
return
|
||||
|
||||
self._root_tag_name = name
|
||||
|
||||
if (
|
||||
name != "html"
|
||||
and self._first_processing_instruction is not None
|
||||
and self._first_processing_instruction.lower().startswith("xml ")
|
||||
):
|
||||
# We encountered an XML declaration and then a tag other
|
||||
# than 'html'. This is a reliable indicator that a
|
||||
# non-XHTML document is being parsed as XML.
|
||||
self._warn(stacklevel=10)
|
||||
|
||||
|
||||
def register_treebuilders_from(module: ModuleType) -> None:
|
||||
"""Copy TreeBuilders from the given module into this module."""
|
||||
this_module = sys.modules[__name__]
|
||||
for name in module.__all__:
|
||||
obj = getattr(module, name)
|
||||
|
||||
if issubclass(obj, TreeBuilder):
|
||||
setattr(this_module, name, obj)
|
||||
this_module.__all__.append(name)
|
||||
# Register the builder while we're at it.
|
||||
this_module.builder_registry.register(obj)
|
||||
|
||||
|
||||
# Builders are registered in reverse order of priority, so that custom
|
||||
# builder registrations will take precedence. In general, we want lxml
|
||||
# to take precedence over html5lib, because it's faster. And we only
|
||||
# want to use HTMLParser as a last resort.
|
||||
from . import _htmlparser # noqa: E402
|
||||
|
||||
register_treebuilders_from(_htmlparser)
|
||||
try:
|
||||
from . import _html5lib
|
||||
|
||||
register_treebuilders_from(_html5lib)
|
||||
except ImportError:
|
||||
# They don't have html5lib installed.
|
||||
pass
|
||||
try:
|
||||
from . import _lxml
|
||||
|
||||
register_treebuilders_from(_lxml)
|
||||
except ImportError:
|
||||
# They don't have lxml installed.
|
||||
pass
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
594
gui/lib/python3.12/site-packages/bs4/builder/_html5lib.py
Normal file
594
gui/lib/python3.12/site-packages/bs4/builder/_html5lib.py
Normal file
@ -0,0 +1,594 @@
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = [
|
||||
"HTML5TreeBuilder",
|
||||
]
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
cast,
|
||||
Dict,
|
||||
Iterable,
|
||||
Optional,
|
||||
Sequence,
|
||||
TYPE_CHECKING,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
from typing_extensions import TypeAlias
|
||||
from bs4._typing import (
|
||||
_AttributeValue,
|
||||
_AttributeValues,
|
||||
_Encoding,
|
||||
_Encodings,
|
||||
_NamespaceURL,
|
||||
_RawMarkup,
|
||||
)
|
||||
|
||||
import warnings
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
PERMISSIVE,
|
||||
HTML,
|
||||
HTML_5,
|
||||
HTMLTreeBuilder,
|
||||
)
|
||||
from bs4.element import (
|
||||
NamespacedAttribute,
|
||||
PageElement,
|
||||
nonwhitespace_re,
|
||||
)
|
||||
import html5lib
|
||||
from html5lib.constants import (
|
||||
namespaces,
|
||||
)
|
||||
from bs4.element import (
|
||||
Comment,
|
||||
Doctype,
|
||||
NavigableString,
|
||||
Tag,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from html5lib.treebuilders import base as treebuilder_base
|
||||
|
||||
|
||||
class HTML5TreeBuilder(HTMLTreeBuilder):
|
||||
"""Use `html5lib <https://github.com/html5lib/html5lib-python>`_ to
|
||||
build a tree.
|
||||
|
||||
Note that `HTML5TreeBuilder` does not support some common HTML
|
||||
`TreeBuilder` features. Some of these features could theoretically
|
||||
be implemented, but at the very least it's quite difficult,
|
||||
because html5lib moves the parse tree around as it's being built.
|
||||
|
||||
Specifically:
|
||||
|
||||
* This `TreeBuilder` doesn't use different subclasses of
|
||||
`NavigableString` (e.g. `Script`) based on the name of the tag
|
||||
in which the string was found.
|
||||
* You can't use a `SoupStrainer` to parse only part of a document.
|
||||
"""
|
||||
|
||||
NAME: str = "html5lib"
|
||||
|
||||
features: Sequence[str] = [NAME, PERMISSIVE, HTML_5, HTML]
|
||||
|
||||
#: html5lib can tell us which line number and position in the
|
||||
#: original file is the source of an element.
|
||||
TRACKS_LINE_NUMBERS: bool = True
|
||||
|
||||
underlying_builder: "TreeBuilderForHtml5lib" #: :meta private:
|
||||
user_specified_encoding: Optional[_Encoding]
|
||||
|
||||
def prepare_markup(
|
||||
self,
|
||||
markup: _RawMarkup,
|
||||
user_specified_encoding: Optional[_Encoding] = None,
|
||||
document_declared_encoding: Optional[_Encoding] = None,
|
||||
exclude_encodings: Optional[_Encodings] = None,
|
||||
) -> Iterable[Tuple[_RawMarkup, Optional[_Encoding], Optional[_Encoding], bool]]:
|
||||
# Store the user-specified encoding for use later on.
|
||||
self.user_specified_encoding = user_specified_encoding
|
||||
|
||||
# document_declared_encoding and exclude_encodings aren't used
|
||||
# ATM because the html5lib TreeBuilder doesn't use
|
||||
# UnicodeDammit.
|
||||
for variable, name in (
|
||||
(document_declared_encoding, "document_declared_encoding"),
|
||||
(exclude_encodings, "exclude_encodings"),
|
||||
):
|
||||
if variable:
|
||||
warnings.warn(
|
||||
f"You provided a value for {name}, but the html5lib tree builder doesn't support {name}.",
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
# html5lib only parses HTML, so if it's given XML that's worth
|
||||
# noting.
|
||||
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup, stacklevel=3)
|
||||
|
||||
yield (markup, None, None, False)
|
||||
|
||||
# These methods are defined by Beautiful Soup.
|
||||
def feed(self, markup: _RawMarkup) -> None:
|
||||
"""Run some incoming markup through some parsing process,
|
||||
populating the `BeautifulSoup` object in `HTML5TreeBuilder.soup`.
|
||||
"""
|
||||
if self.soup is not None and self.soup.parse_only is not None:
|
||||
warnings.warn(
|
||||
"You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.",
|
||||
stacklevel=4,
|
||||
)
|
||||
|
||||
# self.underlying_builder is probably None now, but it'll be set
|
||||
# when html5lib calls self.create_treebuilder().
|
||||
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
|
||||
assert self.underlying_builder is not None
|
||||
self.underlying_builder.parser = parser
|
||||
extra_kwargs = dict()
|
||||
if not isinstance(markup, str):
|
||||
# kwargs, specifically override_encoding, will eventually
|
||||
# be passed in to html5lib's
|
||||
# HTMLBinaryInputStream.__init__.
|
||||
extra_kwargs["override_encoding"] = self.user_specified_encoding
|
||||
|
||||
doc = parser.parse(markup, **extra_kwargs)
|
||||
|
||||
# Set the character encoding detected by the tokenizer.
|
||||
if isinstance(markup, str):
|
||||
# We need to special-case this because html5lib sets
|
||||
# charEncoding to UTF-8 if it gets Unicode input.
|
||||
doc.original_encoding = None
|
||||
else:
|
||||
original_encoding = parser.tokenizer.stream.charEncoding[0]
|
||||
# The encoding is an html5lib Encoding object. We want to
|
||||
# use a string for compatibility with other tree builders.
|
||||
original_encoding = original_encoding.name
|
||||
doc.original_encoding = original_encoding
|
||||
self.underlying_builder.parser = None
|
||||
|
||||
def create_treebuilder(
|
||||
self, namespaceHTMLElements: bool
|
||||
) -> "TreeBuilderForHtml5lib":
|
||||
"""Called by html5lib to instantiate the kind of class it
|
||||
calls a 'TreeBuilder'.
|
||||
|
||||
:param namespaceHTMLElements: Whether or not to namespace HTML elements.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
self.underlying_builder = TreeBuilderForHtml5lib(
|
||||
namespaceHTMLElements, self.soup, store_line_numbers=self.store_line_numbers
|
||||
)
|
||||
return self.underlying_builder
|
||||
|
||||
def test_fragment_to_document(self, fragment: str) -> str:
|
||||
"""See `TreeBuilder`."""
|
||||
return "<html><head></head><body>%s</body></html>" % fragment
|
||||
|
||||
|
||||
class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
||||
soup: "BeautifulSoup" #: :meta private:
|
||||
parser: Optional[html5lib.HTMLParser] #: :meta private:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
namespaceHTMLElements: bool,
|
||||
soup: Optional["BeautifulSoup"] = None,
|
||||
store_line_numbers: bool = True,
|
||||
**kwargs: Any,
|
||||
):
|
||||
if soup:
|
||||
self.soup = soup
|
||||
else:
|
||||
warnings.warn(
|
||||
"The optionality of the 'soup' argument to the TreeBuilderForHtml5lib constructor is deprecated as of Beautiful Soup 4.13.0: 'soup' is now required. If you can't pass in a BeautifulSoup object here, or you get this warning and it seems mysterious to you, please contact the Beautiful Soup developer team for possible un-deprecation.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
# TODO: Why is the parser 'html.parser' here? Using
|
||||
# html5lib doesn't cause an infinite loop and is more
|
||||
# accurate. Best to get rid of this entire section, I think.
|
||||
self.soup = BeautifulSoup(
|
||||
"", "html.parser", store_line_numbers=store_line_numbers, **kwargs
|
||||
)
|
||||
# TODO: What are **kwargs exactly? Should they be passed in
|
||||
# here in addition to/instead of being passed to the BeautifulSoup
|
||||
# constructor?
|
||||
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
|
||||
|
||||
# This will be set later to a real html5lib HTMLParser object,
|
||||
# which we can use to track the current line number.
|
||||
self.parser = None
|
||||
self.store_line_numbers = store_line_numbers
|
||||
|
||||
def documentClass(self) -> "Element":
|
||||
self.soup.reset()
|
||||
return Element(self.soup, self.soup, None)
|
||||
|
||||
def insertDoctype(self, token: Dict[str, Any]) -> None:
|
||||
name: str = cast(str, token["name"])
|
||||
publicId: Optional[str] = cast(Optional[str], token["publicId"])
|
||||
systemId: Optional[str] = cast(Optional[str], token["systemId"])
|
||||
|
||||
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
|
||||
self.soup.object_was_parsed(doctype)
|
||||
|
||||
def elementClass(self, name: str, namespace: str) -> "Element":
|
||||
sourceline: Optional[int] = None
|
||||
sourcepos: Optional[int] = None
|
||||
if self.parser is not None and self.store_line_numbers:
|
||||
# This represents the point immediately after the end of the
|
||||
# tag. We don't know when the tag started, but we do know
|
||||
# where it ended -- the character just before this one.
|
||||
sourceline, sourcepos = self.parser.tokenizer.stream.position()
|
||||
assert sourcepos is not None
|
||||
sourcepos = sourcepos - 1
|
||||
tag = self.soup.new_tag(
|
||||
name, namespace, sourceline=sourceline, sourcepos=sourcepos
|
||||
)
|
||||
|
||||
return Element(tag, self.soup, namespace)
|
||||
|
||||
def commentClass(self, data: str) -> "TextNode":
|
||||
return TextNode(Comment(data), self.soup)
|
||||
|
||||
def fragmentClass(self) -> "Element":
|
||||
"""This is only used by html5lib HTMLParser.parseFragment(),
|
||||
which is never used by Beautiful Soup, only by the html5lib
|
||||
unit tests. Since we don't currently hook into those tests,
|
||||
the implementation is left blank.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def getFragment(self) -> "Element":
|
||||
"""This is only used by the html5lib unit tests. Since we
|
||||
don't currently hook into those tests, the implementation is
|
||||
left blank.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def appendChild(self, node: "Element") -> None:
|
||||
# TODO: This code is not covered by the BS4 tests, and
|
||||
# apparently not triggered by the html5lib test suite either.
|
||||
# But it doesn't seem test-specific and there are calls to it
|
||||
# (or a method with the same name) all over html5lib, so I'm
|
||||
# leaving the implementation in place rather than replacing it
|
||||
# with NotImplementedError()
|
||||
self.soup.append(node.element)
|
||||
|
||||
def getDocument(self) -> "BeautifulSoup":
|
||||
return self.soup
|
||||
|
||||
def testSerializer(self, element: "Element") -> str:
|
||||
"""This is only used by the html5lib unit tests. Since we
|
||||
don't currently hook into those tests, the implementation is
|
||||
left blank.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class AttrList(object):
|
||||
"""Represents a Tag's attributes in a way compatible with html5lib."""
|
||||
|
||||
element: Tag
|
||||
attrs: _AttributeValues
|
||||
|
||||
def __init__(self, element: Tag):
|
||||
self.element = element
|
||||
self.attrs = dict(self.element.attrs)
|
||||
|
||||
def __iter__(self) -> Iterable[Tuple[str, _AttributeValue]]:
|
||||
return list(self.attrs.items()).__iter__()
|
||||
|
||||
def __setitem__(self, name: str, value: _AttributeValue) -> None:
|
||||
# If this attribute is a multi-valued attribute for this element,
|
||||
# turn its value into a list.
|
||||
list_attr = self.element.cdata_list_attributes or {}
|
||||
if name in list_attr.get("*", []) or (
|
||||
self.element.name in list_attr
|
||||
and name in list_attr.get(self.element.name, [])
|
||||
):
|
||||
# A node that is being cloned may have already undergone
|
||||
# this procedure. Check for this and skip it.
|
||||
if not isinstance(value, list):
|
||||
assert isinstance(value, str)
|
||||
value = self.element.attribute_value_list_class(
|
||||
nonwhitespace_re.findall(value)
|
||||
)
|
||||
self.element[name] = value
|
||||
|
||||
def items(self) -> Iterable[Tuple[str, _AttributeValue]]:
|
||||
return list(self.attrs.items())
|
||||
|
||||
def keys(self) -> Iterable[str]:
|
||||
return list(self.attrs.keys())
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.attrs)
|
||||
|
||||
def __getitem__(self, name: str) -> _AttributeValue:
|
||||
return self.attrs[name]
|
||||
|
||||
def __contains__(self, name: str) -> bool:
|
||||
return name in list(self.attrs.keys())
|
||||
|
||||
|
||||
class BeautifulSoupNode(treebuilder_base.Node):
|
||||
element: PageElement
|
||||
soup: "BeautifulSoup"
|
||||
namespace: Optional[_NamespaceURL]
|
||||
|
||||
@property
|
||||
def nodeType(self) -> int:
|
||||
"""Return the html5lib constant corresponding to the type of
|
||||
the underlying DOM object.
|
||||
|
||||
NOTE: This property is only accessed by the html5lib test
|
||||
suite, not by Beautiful Soup proper.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
# TODO-TYPING: typeshed stubs are incorrect about this;
|
||||
# cloneNode returns a new Node, not None.
|
||||
def cloneNode(self) -> treebuilder_base.Node:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class Element(BeautifulSoupNode):
|
||||
element: Tag
|
||||
namespace: Optional[_NamespaceURL]
|
||||
|
||||
def __init__(
|
||||
self, element: Tag, soup: "BeautifulSoup", namespace: Optional[_NamespaceURL]
|
||||
):
|
||||
treebuilder_base.Node.__init__(self, element.name)
|
||||
self.element = element
|
||||
self.soup = soup
|
||||
self.namespace = namespace
|
||||
|
||||
def appendChild(self, node: "BeautifulSoupNode") -> None:
|
||||
string_child: Optional[NavigableString] = None
|
||||
child: PageElement
|
||||
if type(node.element) is NavigableString:
|
||||
string_child = child = node.element
|
||||
else:
|
||||
child = node.element
|
||||
node.parent = self
|
||||
|
||||
if (
|
||||
child is not None
|
||||
and child.parent is not None
|
||||
and not isinstance(child, str)
|
||||
):
|
||||
node.element.extract()
|
||||
|
||||
if (
|
||||
string_child is not None
|
||||
and self.element.contents
|
||||
and type(self.element.contents[-1]) is NavigableString
|
||||
):
|
||||
# We are appending a string onto another string.
|
||||
# TODO This has O(n^2) performance, for input like
|
||||
# "a</a>a</a>a</a>..."
|
||||
old_element = self.element.contents[-1]
|
||||
new_element = self.soup.new_string(old_element + string_child)
|
||||
old_element.replace_with(new_element)
|
||||
self.soup._most_recent_element = new_element
|
||||
else:
|
||||
if isinstance(node, str):
|
||||
# Create a brand new NavigableString from this string.
|
||||
child = self.soup.new_string(node)
|
||||
|
||||
# Tell Beautiful Soup to act as if it parsed this element
|
||||
# immediately after the parent's last descendant. (Or
|
||||
# immediately after the parent, if it has no children.)
|
||||
if self.element.contents:
|
||||
most_recent_element = self.element._last_descendant(False)
|
||||
elif self.element.next_element is not None:
|
||||
# Something from further ahead in the parse tree is
|
||||
# being inserted into this earlier element. This is
|
||||
# very annoying because it means an expensive search
|
||||
# for the last element in the tree.
|
||||
most_recent_element = self.soup._last_descendant()
|
||||
else:
|
||||
most_recent_element = self.element
|
||||
|
||||
self.soup.object_was_parsed(
|
||||
child, parent=self.element, most_recent_element=most_recent_element
|
||||
)
|
||||
|
||||
def getAttributes(self) -> AttrList:
|
||||
if isinstance(self.element, Comment):
|
||||
return {}
|
||||
return AttrList(self.element)
|
||||
|
||||
# An HTML5lib attribute name may either be a single string,
|
||||
# or a tuple (namespace, name).
|
||||
_Html5libAttributeName: TypeAlias = Union[str, Tuple[str, str]]
|
||||
# Now we can define the type this method accepts as a dictionary
|
||||
# mapping those attribute names to single string values.
|
||||
_Html5libAttributes: TypeAlias = Dict[_Html5libAttributeName, str]
|
||||
|
||||
def setAttributes(self, attributes: Optional[_Html5libAttributes]) -> None:
|
||||
if attributes is not None and len(attributes) > 0:
|
||||
# Replace any namespaced attributes with
|
||||
# NamespacedAttribute objects.
|
||||
for name, value in list(attributes.items()):
|
||||
if isinstance(name, tuple):
|
||||
new_name = NamespacedAttribute(*name)
|
||||
del attributes[name]
|
||||
attributes[new_name] = value
|
||||
|
||||
# We can now cast attributes to the type of Dict
|
||||
# used by Beautiful Soup.
|
||||
normalized_attributes = cast(_AttributeValues, attributes)
|
||||
|
||||
# Values for tags like 'class' came in as single strings;
|
||||
# replace them with lists of strings as appropriate.
|
||||
self.soup.builder._replace_cdata_list_attribute_values(
|
||||
self.name, normalized_attributes
|
||||
)
|
||||
|
||||
# Then set the attributes on the Tag associated with this
|
||||
# BeautifulSoupNode.
|
||||
for name, value_or_values in list(normalized_attributes.items()):
|
||||
self.element[name] = value_or_values
|
||||
|
||||
# The attributes may contain variables that need substitution.
|
||||
# Call set_up_substitutions manually.
|
||||
#
|
||||
# The Tag constructor called this method when the Tag was created,
|
||||
# but we just set/changed the attributes, so call it again.
|
||||
self.soup.builder.set_up_substitutions(self.element)
|
||||
|
||||
attributes = property(getAttributes, setAttributes)
|
||||
|
||||
def insertText(
|
||||
self, data: str, insertBefore: Optional["BeautifulSoupNode"] = None
|
||||
) -> None:
|
||||
text = TextNode(self.soup.new_string(data), self.soup)
|
||||
if insertBefore:
|
||||
self.insertBefore(text, insertBefore)
|
||||
else:
|
||||
self.appendChild(text)
|
||||
|
||||
def insertBefore(
|
||||
self, node: "BeautifulSoupNode", refNode: "BeautifulSoupNode"
|
||||
) -> None:
|
||||
index = self.element.index(refNode.element)
|
||||
if (
|
||||
type(node.element) is NavigableString
|
||||
and self.element.contents
|
||||
and type(self.element.contents[index - 1]) is NavigableString
|
||||
):
|
||||
# (See comments in appendChild)
|
||||
old_node = self.element.contents[index - 1]
|
||||
assert type(old_node) is NavigableString
|
||||
new_str = self.soup.new_string(old_node + node.element)
|
||||
old_node.replace_with(new_str)
|
||||
else:
|
||||
self.element.insert(index, node.element)
|
||||
node.parent = self
|
||||
|
||||
def removeChild(self, node: "Element") -> None:
|
||||
node.element.extract()
|
||||
|
||||
def reparentChildren(self, new_parent: "Element") -> None:
|
||||
"""Move all of this tag's children into another tag."""
|
||||
# print("MOVE", self.element.contents)
|
||||
# print("FROM", self.element)
|
||||
# print("TO", new_parent.element)
|
||||
|
||||
element = self.element
|
||||
new_parent_element = new_parent.element
|
||||
# Determine what this tag's next_element will be once all the children
|
||||
# are removed.
|
||||
final_next_element = element.next_sibling
|
||||
|
||||
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
|
||||
if len(new_parent_element.contents) > 0:
|
||||
# The new parent already contains children. We will be
|
||||
# appending this tag's children to the end.
|
||||
|
||||
# We can make this assertion since we know new_parent has
|
||||
# children.
|
||||
assert new_parents_last_descendant is not None
|
||||
new_parents_last_child = new_parent_element.contents[-1]
|
||||
new_parents_last_descendant_next_element = (
|
||||
new_parents_last_descendant.next_element
|
||||
)
|
||||
else:
|
||||
# The new parent contains no children.
|
||||
new_parents_last_child = None
|
||||
new_parents_last_descendant_next_element = new_parent_element.next_element
|
||||
|
||||
to_append = element.contents
|
||||
if len(to_append) > 0:
|
||||
# Set the first child's previous_element and previous_sibling
|
||||
# to elements within the new parent
|
||||
first_child = to_append[0]
|
||||
if new_parents_last_descendant is not None:
|
||||
first_child.previous_element = new_parents_last_descendant
|
||||
else:
|
||||
first_child.previous_element = new_parent_element
|
||||
first_child.previous_sibling = new_parents_last_child
|
||||
if new_parents_last_descendant is not None:
|
||||
new_parents_last_descendant.next_element = first_child
|
||||
else:
|
||||
new_parent_element.next_element = first_child
|
||||
if new_parents_last_child is not None:
|
||||
new_parents_last_child.next_sibling = first_child
|
||||
|
||||
# Find the very last element being moved. It is now the
|
||||
# parent's last descendant. It has no .next_sibling and
|
||||
# its .next_element is whatever the previous last
|
||||
# descendant had.
|
||||
last_childs_last_descendant = to_append[-1]._last_descendant(
|
||||
is_initialized=False, accept_self=True
|
||||
)
|
||||
|
||||
# Since we passed accept_self=True into _last_descendant,
|
||||
# there's no possibility that the result is None.
|
||||
assert last_childs_last_descendant is not None
|
||||
last_childs_last_descendant.next_element = (
|
||||
new_parents_last_descendant_next_element
|
||||
)
|
||||
if new_parents_last_descendant_next_element is not None:
|
||||
# TODO-COVERAGE: This code has no test coverage and
|
||||
# I'm not sure how to get html5lib to go through this
|
||||
# path, but it's just the other side of the previous
|
||||
# line.
|
||||
new_parents_last_descendant_next_element.previous_element = (
|
||||
last_childs_last_descendant
|
||||
)
|
||||
last_childs_last_descendant.next_sibling = None
|
||||
|
||||
for child in to_append:
|
||||
child.parent = new_parent_element
|
||||
new_parent_element.contents.append(child)
|
||||
|
||||
# Now that this element has no children, change its .next_element.
|
||||
element.contents = []
|
||||
element.next_element = final_next_element
|
||||
|
||||
# print("DONE WITH MOVE")
|
||||
# print("FROM", self.element)
|
||||
# print("TO", new_parent_element)
|
||||
|
||||
# TODO-TYPING: typeshed stubs are incorrect about this;
|
||||
# hasContent returns a boolean, not None.
|
||||
def hasContent(self) -> bool:
|
||||
return len(self.element.contents) > 0
|
||||
|
||||
# TODO-TYPING: typeshed stubs are incorrect about this;
|
||||
# cloneNode returns a new Node, not None.
|
||||
def cloneNode(self) -> treebuilder_base.Node:
|
||||
tag = self.soup.new_tag(self.element.name, self.namespace)
|
||||
node = Element(tag, self.soup, self.namespace)
|
||||
for key, value in self.attributes:
|
||||
node.attributes[key] = value
|
||||
return node
|
||||
|
||||
def getNameTuple(self) -> Tuple[Optional[_NamespaceURL], str]:
|
||||
if self.namespace is None:
|
||||
return namespaces["html"], self.name
|
||||
else:
|
||||
return self.namespace, self.name
|
||||
|
||||
nameTuple = property(getNameTuple)
|
||||
|
||||
|
||||
class TextNode(BeautifulSoupNode):
|
||||
element: NavigableString
|
||||
|
||||
def __init__(self, element: NavigableString, soup: "BeautifulSoup"):
|
||||
treebuilder_base.Node.__init__(self, None)
|
||||
self.element = element
|
||||
self.soup = soup
|
||||
474
gui/lib/python3.12/site-packages/bs4/builder/_htmlparser.py
Normal file
474
gui/lib/python3.12/site-packages/bs4/builder/_htmlparser.py
Normal file
@ -0,0 +1,474 @@
|
||||
# encoding: utf-8
|
||||
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
|
||||
from __future__ import annotations
|
||||
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = [
|
||||
"HTMLParserTreeBuilder",
|
||||
]
|
||||
|
||||
from html.parser import HTMLParser
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
cast,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
TYPE_CHECKING,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
|
||||
from bs4.element import (
|
||||
AttributeDict,
|
||||
CData,
|
||||
Comment,
|
||||
Declaration,
|
||||
Doctype,
|
||||
ProcessingInstruction,
|
||||
)
|
||||
from bs4.dammit import EntitySubstitution, UnicodeDammit
|
||||
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
HTML,
|
||||
HTMLTreeBuilder,
|
||||
STRICT,
|
||||
)
|
||||
|
||||
from bs4.exceptions import ParserRejectedMarkup
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import NavigableString
|
||||
from bs4._typing import (
|
||||
_Encoding,
|
||||
_Encodings,
|
||||
_RawMarkup,
|
||||
)
|
||||
|
||||
HTMLPARSER = "html.parser"
|
||||
|
||||
_DuplicateAttributeHandler = Callable[[Dict[str, str], str, str], None]
|
||||
|
||||
|
||||
class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
|
||||
#: Constant to handle duplicate attributes by ignoring later values
|
||||
#: and keeping the earlier ones.
|
||||
REPLACE: str = "replace"
|
||||
|
||||
#: Constant to handle duplicate attributes by replacing earlier values
|
||||
#: with later ones.
|
||||
IGNORE: str = "ignore"
|
||||
|
||||
"""A subclass of the Python standard library's HTMLParser class, which
|
||||
listens for HTMLParser events and translates them into calls
|
||||
to Beautiful Soup's tree construction API.
|
||||
|
||||
:param on_duplicate_attribute: A strategy for what to do if a
|
||||
tag includes the same attribute more than once. Accepted
|
||||
values are: REPLACE (replace earlier values with later
|
||||
ones, the default), IGNORE (keep the earliest value
|
||||
encountered), or a callable. A callable must take three
|
||||
arguments: the dictionary of attributes already processed,
|
||||
the name of the duplicate attribute, and the most recent value
|
||||
encountered.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
soup: BeautifulSoup,
|
||||
*args: Any,
|
||||
on_duplicate_attribute: Union[str, _DuplicateAttributeHandler] = REPLACE,
|
||||
**kwargs: Any,
|
||||
):
|
||||
self.soup = soup
|
||||
self.on_duplicate_attribute = on_duplicate_attribute
|
||||
self.attribute_dict_class = soup.builder.attribute_dict_class
|
||||
HTMLParser.__init__(self, *args, **kwargs)
|
||||
|
||||
# Keep a list of empty-element tags that were encountered
|
||||
# without an explicit closing tag. If we encounter a closing tag
|
||||
# of this type, we'll associate it with one of those entries.
|
||||
#
|
||||
# This isn't a stack because we don't care about the
|
||||
# order. It's a list of closing tags we've already handled and
|
||||
# will ignore, assuming they ever show up.
|
||||
self.already_closed_empty_element = []
|
||||
|
||||
self._initialize_xml_detector()
|
||||
|
||||
on_duplicate_attribute: Union[str, _DuplicateAttributeHandler]
|
||||
already_closed_empty_element: List[str]
|
||||
soup: BeautifulSoup
|
||||
|
||||
def error(self, message: str) -> None:
|
||||
# NOTE: This method is required so long as Python 3.9 is
|
||||
# supported. The corresponding code is removed from HTMLParser
|
||||
# in 3.5, but not removed from ParserBase until 3.10.
|
||||
# https://github.com/python/cpython/issues/76025
|
||||
#
|
||||
# The original implementation turned the error into a warning,
|
||||
# but in every case I discovered, this made HTMLParser
|
||||
# immediately crash with an error message that was less
|
||||
# helpful than the warning. The new implementation makes it
|
||||
# more clear that html.parser just can't parse this
|
||||
# markup. The 3.10 implementation does the same, though it
|
||||
# raises AssertionError rather than calling a method. (We
|
||||
# catch this error and wrap it in a ParserRejectedMarkup.)
|
||||
raise ParserRejectedMarkup(message)
|
||||
|
||||
def handle_startendtag(
|
||||
self, name: str, attrs: List[Tuple[str, Optional[str]]]
|
||||
) -> None:
|
||||
"""Handle an incoming empty-element tag.
|
||||
|
||||
html.parser only calls this method when the markup looks like
|
||||
<tag/>.
|
||||
"""
|
||||
# `handle_empty_element` tells handle_starttag not to close the tag
|
||||
# just because its name matches a known empty-element tag. We
|
||||
# know that this is an empty-element tag, and we want to call
|
||||
# handle_endtag ourselves.
|
||||
self.handle_starttag(name, attrs, handle_empty_element=False)
|
||||
self.handle_endtag(name)
|
||||
|
||||
def handle_starttag(
|
||||
self,
|
||||
name: str,
|
||||
attrs: List[Tuple[str, Optional[str]]],
|
||||
handle_empty_element: bool = True,
|
||||
) -> None:
|
||||
"""Handle an opening tag, e.g. '<tag>'
|
||||
|
||||
:param handle_empty_element: True if this tag is known to be
|
||||
an empty-element tag (i.e. there is not expected to be any
|
||||
closing tag).
|
||||
"""
|
||||
# TODO: handle namespaces here?
|
||||
attr_dict: AttributeDict = self.attribute_dict_class()
|
||||
for key, value in attrs:
|
||||
# Change None attribute values to the empty string
|
||||
# for consistency with the other tree builders.
|
||||
if value is None:
|
||||
value = ""
|
||||
if key in attr_dict:
|
||||
# A single attribute shows up multiple times in this
|
||||
# tag. How to handle it depends on the
|
||||
# on_duplicate_attribute setting.
|
||||
on_dupe = self.on_duplicate_attribute
|
||||
if on_dupe == self.IGNORE:
|
||||
pass
|
||||
elif on_dupe in (None, self.REPLACE):
|
||||
attr_dict[key] = value
|
||||
else:
|
||||
on_dupe = cast(_DuplicateAttributeHandler, on_dupe)
|
||||
on_dupe(attr_dict, key, value)
|
||||
else:
|
||||
attr_dict[key] = value
|
||||
# print("START", name)
|
||||
sourceline: Optional[int]
|
||||
sourcepos: Optional[int]
|
||||
if self.soup.builder.store_line_numbers:
|
||||
sourceline, sourcepos = self.getpos()
|
||||
else:
|
||||
sourceline = sourcepos = None
|
||||
tag = self.soup.handle_starttag(
|
||||
name, None, None, attr_dict, sourceline=sourceline, sourcepos=sourcepos
|
||||
)
|
||||
if tag and tag.is_empty_element and handle_empty_element:
|
||||
# Unlike other parsers, html.parser doesn't send separate end tag
|
||||
# events for empty-element tags. (It's handled in
|
||||
# handle_startendtag, but only if the original markup looked like
|
||||
# <tag/>.)
|
||||
#
|
||||
# So we need to call handle_endtag() ourselves. Since we
|
||||
# know the start event is identical to the end event, we
|
||||
# don't want handle_endtag() to cross off any previous end
|
||||
# events for tags of this name.
|
||||
self.handle_endtag(name, check_already_closed=False)
|
||||
|
||||
# But we might encounter an explicit closing tag for this tag
|
||||
# later on. If so, we want to ignore it.
|
||||
self.already_closed_empty_element.append(name)
|
||||
|
||||
if self._root_tag_name is None:
|
||||
self._root_tag_encountered(name)
|
||||
|
||||
def handle_endtag(self, name: str, check_already_closed: bool = True) -> None:
|
||||
"""Handle a closing tag, e.g. '</tag>'
|
||||
|
||||
:param name: A tag name.
|
||||
:param check_already_closed: True if this tag is expected to
|
||||
be the closing portion of an empty-element tag,
|
||||
e.g. '<tag></tag>'.
|
||||
"""
|
||||
# print("END", name)
|
||||
if check_already_closed and name in self.already_closed_empty_element:
|
||||
# This is a redundant end tag for an empty-element tag.
|
||||
# We've already called handle_endtag() for it, so just
|
||||
# check it off the list.
|
||||
# print("ALREADY CLOSED", name)
|
||||
self.already_closed_empty_element.remove(name)
|
||||
else:
|
||||
self.soup.handle_endtag(name)
|
||||
|
||||
def handle_data(self, data: str) -> None:
|
||||
"""Handle some textual data that shows up between tags."""
|
||||
self.soup.handle_data(data)
|
||||
|
||||
def handle_charref(self, name: str) -> None:
|
||||
"""Handle a numeric character reference by converting it to the
|
||||
corresponding Unicode character and treating it as textual
|
||||
data.
|
||||
|
||||
:param name: Character number, possibly in hexadecimal.
|
||||
"""
|
||||
# TODO: This was originally a workaround for a bug in
|
||||
# HTMLParser. (http://bugs.python.org/issue13633) The bug has
|
||||
# been fixed, but removing this code still makes some
|
||||
# Beautiful Soup tests fail. This needs investigation.
|
||||
if name.startswith("x"):
|
||||
real_name = int(name.lstrip("x"), 16)
|
||||
elif name.startswith("X"):
|
||||
real_name = int(name.lstrip("X"), 16)
|
||||
else:
|
||||
real_name = int(name)
|
||||
|
||||
data = None
|
||||
if real_name < 256:
|
||||
# HTML numeric entities are supposed to reference Unicode
|
||||
# code points, but sometimes they reference code points in
|
||||
# some other encoding (ahem, Windows-1252). E.g. “
|
||||
# instead of É for LEFT DOUBLE QUOTATION MARK. This
|
||||
# code tries to detect this situation and compensate.
|
||||
for encoding in (self.soup.original_encoding, "windows-1252"):
|
||||
if not encoding:
|
||||
continue
|
||||
try:
|
||||
data = bytearray([real_name]).decode(encoding)
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
if not data:
|
||||
try:
|
||||
data = chr(real_name)
|
||||
except (ValueError, OverflowError):
|
||||
pass
|
||||
data = data or "\N{REPLACEMENT CHARACTER}"
|
||||
self.handle_data(data)
|
||||
|
||||
def handle_entityref(self, name: str) -> None:
|
||||
"""Handle a named entity reference by converting it to the
|
||||
corresponding Unicode character(s) and treating it as textual
|
||||
data.
|
||||
|
||||
:param name: Name of the entity reference.
|
||||
"""
|
||||
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
|
||||
if character is not None:
|
||||
data = character
|
||||
else:
|
||||
# If this were XML, it would be ambiguous whether "&foo"
|
||||
# was an character entity reference with a missing
|
||||
# semicolon or the literal string "&foo". Since this is
|
||||
# HTML, we have a complete list of all character entity references,
|
||||
# and this one wasn't found, so assume it's the literal string "&foo".
|
||||
data = "&%s" % name
|
||||
self.handle_data(data)
|
||||
|
||||
def handle_comment(self, data: str) -> None:
|
||||
"""Handle an HTML comment.
|
||||
|
||||
:param data: The text of the comment.
|
||||
"""
|
||||
self.soup.endData()
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(Comment)
|
||||
|
||||
def handle_decl(self, data: str) -> None:
|
||||
"""Handle a DOCTYPE declaration.
|
||||
|
||||
:param data: The text of the declaration.
|
||||
"""
|
||||
self.soup.endData()
|
||||
data = data[len("DOCTYPE ") :]
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(Doctype)
|
||||
|
||||
def unknown_decl(self, data: str) -> None:
|
||||
"""Handle a declaration of unknown type -- probably a CDATA block.
|
||||
|
||||
:param data: The text of the declaration.
|
||||
"""
|
||||
cls: Type[NavigableString]
|
||||
if data.upper().startswith("CDATA["):
|
||||
cls = CData
|
||||
data = data[len("CDATA[") :]
|
||||
else:
|
||||
cls = Declaration
|
||||
self.soup.endData()
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(cls)
|
||||
|
||||
def handle_pi(self, data: str) -> None:
|
||||
"""Handle a processing instruction.
|
||||
|
||||
:param data: The text of the instruction.
|
||||
"""
|
||||
self.soup.endData()
|
||||
self.soup.handle_data(data)
|
||||
self._document_might_be_xml(data)
|
||||
self.soup.endData(ProcessingInstruction)
|
||||
|
||||
|
||||
class HTMLParserTreeBuilder(HTMLTreeBuilder):
|
||||
"""A Beautiful soup `bs4.builder.TreeBuilder` that uses the
|
||||
:py:class:`html.parser.HTMLParser` parser, found in the Python
|
||||
standard library.
|
||||
|
||||
"""
|
||||
|
||||
is_xml: bool = False
|
||||
picklable: bool = True
|
||||
NAME: str = HTMLPARSER
|
||||
features: Iterable[str] = [NAME, HTML, STRICT]
|
||||
parser_args: Tuple[Iterable[Any], Dict[str, Any]]
|
||||
|
||||
#: The html.parser knows which line number and position in the
|
||||
#: original file is the source of an element.
|
||||
TRACKS_LINE_NUMBERS: bool = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
parser_args: Optional[Iterable[Any]] = None,
|
||||
parser_kwargs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Constructor.
|
||||
|
||||
:param parser_args: Positional arguments to pass into
|
||||
the BeautifulSoupHTMLParser constructor, once it's
|
||||
invoked.
|
||||
:param parser_kwargs: Keyword arguments to pass into
|
||||
the BeautifulSoupHTMLParser constructor, once it's
|
||||
invoked.
|
||||
:param kwargs: Keyword arguments for the superclass constructor.
|
||||
"""
|
||||
# Some keyword arguments will be pulled out of kwargs and placed
|
||||
# into parser_kwargs.
|
||||
extra_parser_kwargs = dict()
|
||||
for arg in ("on_duplicate_attribute",):
|
||||
if arg in kwargs:
|
||||
value = kwargs.pop(arg)
|
||||
extra_parser_kwargs[arg] = value
|
||||
super(HTMLParserTreeBuilder, self).__init__(**kwargs)
|
||||
parser_args = parser_args or []
|
||||
parser_kwargs = parser_kwargs or {}
|
||||
parser_kwargs.update(extra_parser_kwargs)
|
||||
parser_kwargs["convert_charrefs"] = False
|
||||
self.parser_args = (parser_args, parser_kwargs)
|
||||
|
||||
def prepare_markup(
|
||||
self,
|
||||
markup: _RawMarkup,
|
||||
user_specified_encoding: Optional[_Encoding] = None,
|
||||
document_declared_encoding: Optional[_Encoding] = None,
|
||||
exclude_encodings: Optional[_Encodings] = None,
|
||||
) -> Iterable[Tuple[str, Optional[_Encoding], Optional[_Encoding], bool]]:
|
||||
"""Run any preliminary steps necessary to make incoming markup
|
||||
acceptable to the parser.
|
||||
|
||||
:param markup: Some markup -- probably a bytestring.
|
||||
:param user_specified_encoding: The user asked to try this encoding.
|
||||
:param document_declared_encoding: The markup itself claims to be
|
||||
in this encoding.
|
||||
:param exclude_encodings: The user asked _not_ to try any of
|
||||
these encodings.
|
||||
|
||||
:yield: A series of 4-tuples: (markup, encoding, declared encoding,
|
||||
has undergone character replacement)
|
||||
|
||||
Each 4-tuple represents a strategy for parsing the document.
|
||||
This TreeBuilder uses Unicode, Dammit to convert the markup
|
||||
into Unicode, so the ``markup`` element of the tuple will
|
||||
always be a string.
|
||||
"""
|
||||
if isinstance(markup, str):
|
||||
# Parse Unicode as-is.
|
||||
yield (markup, None, None, False)
|
||||
return
|
||||
|
||||
# Ask UnicodeDammit to sniff the most likely encoding.
|
||||
|
||||
known_definite_encodings: List[_Encoding] = []
|
||||
if user_specified_encoding:
|
||||
# This was provided by the end-user; treat it as a known
|
||||
# definite encoding per the algorithm laid out in the
|
||||
# HTML5 spec. (See the EncodingDetector class for
|
||||
# details.)
|
||||
known_definite_encodings.append(user_specified_encoding)
|
||||
|
||||
user_encodings: List[_Encoding] = []
|
||||
if document_declared_encoding:
|
||||
# This was found in the document; treat it as a slightly
|
||||
# lower-priority user encoding.
|
||||
user_encodings.append(document_declared_encoding)
|
||||
|
||||
dammit = UnicodeDammit(
|
||||
markup,
|
||||
known_definite_encodings=known_definite_encodings,
|
||||
user_encodings=user_encodings,
|
||||
is_html=True,
|
||||
exclude_encodings=exclude_encodings,
|
||||
)
|
||||
|
||||
if dammit.unicode_markup is None:
|
||||
# In every case I've seen, Unicode, Dammit is able to
|
||||
# convert the markup into Unicode, even if it needs to use
|
||||
# REPLACEMENT CHARACTER. But there is a code path that
|
||||
# could result in unicode_markup being None, and
|
||||
# HTMLParser can only parse Unicode, so here we handle
|
||||
# that code path.
|
||||
raise ParserRejectedMarkup(
|
||||
"Could not convert input to Unicode, and html.parser will not accept bytestrings."
|
||||
)
|
||||
else:
|
||||
yield (
|
||||
dammit.unicode_markup,
|
||||
dammit.original_encoding,
|
||||
dammit.declared_html_encoding,
|
||||
dammit.contains_replacement_characters,
|
||||
)
|
||||
|
||||
def feed(self, markup: _RawMarkup) -> None:
|
||||
args, kwargs = self.parser_args
|
||||
|
||||
# HTMLParser.feed will only handle str, but
|
||||
# BeautifulSoup.markup is allowed to be _RawMarkup, because
|
||||
# it's set by the yield value of
|
||||
# TreeBuilder.prepare_markup. Fortunately,
|
||||
# HTMLParserTreeBuilder.prepare_markup always yields a str
|
||||
# (UnicodeDammit.unicode_markup).
|
||||
assert isinstance(markup, str)
|
||||
|
||||
# We know BeautifulSoup calls TreeBuilder.initialize_soup
|
||||
# before calling feed(), so we can assume self.soup
|
||||
# is set.
|
||||
assert self.soup is not None
|
||||
parser = BeautifulSoupHTMLParser(self.soup, *args, **kwargs)
|
||||
|
||||
try:
|
||||
parser.feed(markup)
|
||||
parser.close()
|
||||
except AssertionError as e:
|
||||
# html.parser raises AssertionError in rare cases to
|
||||
# indicate a fatal problem with the markup, especially
|
||||
# when there's an error in the doctype declaration.
|
||||
raise ParserRejectedMarkup(e)
|
||||
parser.already_closed_empty_element = []
|
||||
490
gui/lib/python3.12/site-packages/bs4/builder/_lxml.py
Normal file
490
gui/lib/python3.12/site-packages/bs4/builder/_lxml.py
Normal file
@ -0,0 +1,490 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import annotations
|
||||
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = [
|
||||
"LXMLTreeBuilderForXML",
|
||||
"LXMLTreeBuilder",
|
||||
]
|
||||
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
Type,
|
||||
TYPE_CHECKING,
|
||||
Union,
|
||||
)
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from io import BytesIO
|
||||
from io import StringIO
|
||||
from lxml import etree
|
||||
from bs4.element import (
|
||||
AttributeDict,
|
||||
XMLAttributeDict,
|
||||
Comment,
|
||||
Doctype,
|
||||
NamespacedAttribute,
|
||||
ProcessingInstruction,
|
||||
XMLProcessingInstruction,
|
||||
)
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
FAST,
|
||||
HTML,
|
||||
HTMLTreeBuilder,
|
||||
PERMISSIVE,
|
||||
TreeBuilder,
|
||||
XML,
|
||||
)
|
||||
from bs4.dammit import EncodingDetector
|
||||
from bs4.exceptions import ParserRejectedMarkup
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from bs4._typing import (
|
||||
_Encoding,
|
||||
_Encodings,
|
||||
_NamespacePrefix,
|
||||
_NamespaceURL,
|
||||
_NamespaceMapping,
|
||||
_InvertedNamespaceMapping,
|
||||
_RawMarkup,
|
||||
)
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
LXML: str = "lxml"
|
||||
|
||||
|
||||
def _invert(d: dict[Any, Any]) -> dict[Any, Any]:
|
||||
"Invert a dictionary."
|
||||
return dict((v, k) for k, v in list(d.items()))
|
||||
|
||||
|
||||
_LXMLParser: TypeAlias = Union[etree.XMLParser, etree.HTMLParser]
|
||||
_ParserOrParserClass: TypeAlias = Union[
|
||||
_LXMLParser, Type[etree.XMLParser], Type[etree.HTMLParser]
|
||||
]
|
||||
|
||||
|
||||
class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
DEFAULT_PARSER_CLASS: Type[etree.XMLParser] = etree.XMLParser
|
||||
|
||||
is_xml: bool = True
|
||||
|
||||
processing_instruction_class: Type[ProcessingInstruction]
|
||||
|
||||
NAME: str = "lxml-xml"
|
||||
ALTERNATE_NAMES: Iterable[str] = ["xml"]
|
||||
|
||||
# Well, it's permissive by XML parser standards.
|
||||
features: Iterable[str] = [NAME, LXML, XML, FAST, PERMISSIVE]
|
||||
|
||||
CHUNK_SIZE: int = 512
|
||||
|
||||
# This namespace mapping is specified in the XML Namespace
|
||||
# standard.
|
||||
DEFAULT_NSMAPS: _NamespaceMapping = dict(xml="http://www.w3.org/XML/1998/namespace")
|
||||
|
||||
DEFAULT_NSMAPS_INVERTED: _InvertedNamespaceMapping = _invert(DEFAULT_NSMAPS)
|
||||
|
||||
nsmaps: List[Optional[_InvertedNamespaceMapping]]
|
||||
empty_element_tags: Set[str]
|
||||
parser: Any
|
||||
_default_parser: Optional[etree.XMLParser]
|
||||
|
||||
# NOTE: If we parsed Element objects and looked at .sourceline,
|
||||
# we'd be able to see the line numbers from the original document.
|
||||
# But instead we build an XMLParser or HTMLParser object to serve
|
||||
# as the target of parse messages, and those messages don't include
|
||||
# line numbers.
|
||||
# See: https://bugs.launchpad.net/lxml/+bug/1846906
|
||||
|
||||
def initialize_soup(self, soup: BeautifulSoup) -> None:
|
||||
"""Let the BeautifulSoup object know about the standard namespace
|
||||
mapping.
|
||||
|
||||
:param soup: A `BeautifulSoup`.
|
||||
"""
|
||||
# Beyond this point, self.soup is set, so we can assume (and
|
||||
# assert) it's not None whenever necessary.
|
||||
super(LXMLTreeBuilderForXML, self).initialize_soup(soup)
|
||||
self._register_namespaces(self.DEFAULT_NSMAPS)
|
||||
|
||||
def _register_namespaces(self, mapping: Dict[str, str]) -> None:
|
||||
"""Let the BeautifulSoup object know about namespaces encountered
|
||||
while parsing the document.
|
||||
|
||||
This might be useful later on when creating CSS selectors.
|
||||
|
||||
This will track (almost) all namespaces, even ones that were
|
||||
only in scope for part of the document. If two namespaces have
|
||||
the same prefix, only the first one encountered will be
|
||||
tracked. Un-prefixed namespaces are not tracked.
|
||||
|
||||
:param mapping: A dictionary mapping namespace prefixes to URIs.
|
||||
"""
|
||||
assert self.soup is not None
|
||||
for key, value in list(mapping.items()):
|
||||
# This is 'if key' and not 'if key is not None' because we
|
||||
# don't track un-prefixed namespaces. Soupselect will
|
||||
# treat an un-prefixed namespace as the default, which
|
||||
# causes confusion in some cases.
|
||||
if key and key not in self.soup._namespaces:
|
||||
# Let the BeautifulSoup object know about a new namespace.
|
||||
# If there are multiple namespaces defined with the same
|
||||
# prefix, the first one in the document takes precedence.
|
||||
self.soup._namespaces[key] = value
|
||||
|
||||
def default_parser(self, encoding: Optional[_Encoding]) -> _ParserOrParserClass:
|
||||
"""Find the default parser for the given encoding.
|
||||
|
||||
:return: Either a parser object or a class, which
|
||||
will be instantiated with default arguments.
|
||||
"""
|
||||
if self._default_parser is not None:
|
||||
return self._default_parser
|
||||
return self.DEFAULT_PARSER_CLASS(target=self, recover=True, encoding=encoding)
|
||||
|
||||
def parser_for(self, encoding: Optional[_Encoding]) -> _LXMLParser:
|
||||
"""Instantiate an appropriate parser for the given encoding.
|
||||
|
||||
:param encoding: A string.
|
||||
:return: A parser object such as an `etree.XMLParser`.
|
||||
"""
|
||||
# Use the default parser.
|
||||
parser = self.default_parser(encoding)
|
||||
|
||||
if callable(parser):
|
||||
# Instantiate the parser with default arguments
|
||||
parser = parser(target=self, recover=True, encoding=encoding)
|
||||
return parser
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
parser: Optional[etree.XMLParser] = None,
|
||||
empty_element_tags: Optional[Set[str]] = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
# TODO: Issue a warning if parser is present but not a
|
||||
# callable, since that means there's no way to create new
|
||||
# parsers for different encodings.
|
||||
self._default_parser = parser
|
||||
self.soup = None
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
|
||||
self.active_namespace_prefixes = [dict(self.DEFAULT_NSMAPS)]
|
||||
if "attribute_dict_class" not in kwargs:
|
||||
kwargs["attribute_dict_class"] = XMLAttributeDict
|
||||
super(LXMLTreeBuilderForXML, self).__init__(**kwargs)
|
||||
|
||||
def _getNsTag(self, tag: str) -> Tuple[Optional[str], str]:
|
||||
# Split the namespace URL out of a fully-qualified lxml tag
|
||||
# name. Copied from lxml's src/lxml/sax.py.
|
||||
if tag[0] == "{":
|
||||
namespace, name = tag[1:].split("}", 1)
|
||||
return (namespace, name)
|
||||
else:
|
||||
return (None, tag)
|
||||
|
||||
def prepare_markup(
|
||||
self,
|
||||
markup: _RawMarkup,
|
||||
user_specified_encoding: Optional[_Encoding] = None,
|
||||
document_declared_encoding: Optional[_Encoding] = None,
|
||||
exclude_encodings: Optional[_Encodings] = None,
|
||||
) -> Iterable[
|
||||
Tuple[Union[str, bytes], Optional[_Encoding], Optional[_Encoding], bool]
|
||||
]:
|
||||
"""Run any preliminary steps necessary to make incoming markup
|
||||
acceptable to the parser.
|
||||
|
||||
lxml really wants to get a bytestring and convert it to
|
||||
Unicode itself. So instead of using UnicodeDammit to convert
|
||||
the bytestring to Unicode using different encodings, this
|
||||
implementation uses EncodingDetector to iterate over the
|
||||
encodings, and tell lxml to try to parse the document as each
|
||||
one in turn.
|
||||
|
||||
:param markup: Some markup -- hopefully a bytestring.
|
||||
:param user_specified_encoding: The user asked to try this encoding.
|
||||
:param document_declared_encoding: The markup itself claims to be
|
||||
in this encoding.
|
||||
:param exclude_encodings: The user asked _not_ to try any of
|
||||
these encodings.
|
||||
|
||||
:yield: A series of 4-tuples: (markup, encoding, declared encoding,
|
||||
has undergone character replacement)
|
||||
|
||||
Each 4-tuple represents a strategy for converting the
|
||||
document to Unicode and parsing it. Each strategy will be tried
|
||||
in turn.
|
||||
"""
|
||||
is_html = not self.is_xml
|
||||
if is_html:
|
||||
self.processing_instruction_class = ProcessingInstruction
|
||||
# We're in HTML mode, so if we're given XML, that's worth
|
||||
# noting.
|
||||
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup, stacklevel=3)
|
||||
else:
|
||||
self.processing_instruction_class = XMLProcessingInstruction
|
||||
|
||||
if isinstance(markup, str):
|
||||
# We were given Unicode. Maybe lxml can parse Unicode on
|
||||
# this system?
|
||||
|
||||
# TODO: This is a workaround for
|
||||
# https://bugs.launchpad.net/lxml/+bug/1948551.
|
||||
# We can remove it once the upstream issue is fixed.
|
||||
if len(markup) > 0 and markup[0] == "\N{BYTE ORDER MARK}":
|
||||
markup = markup[1:]
|
||||
yield markup, None, document_declared_encoding, False
|
||||
|
||||
if isinstance(markup, str):
|
||||
# No, apparently not. Convert the Unicode to UTF-8 and
|
||||
# tell lxml to parse it as UTF-8.
|
||||
yield (markup.encode("utf8"), "utf8", document_declared_encoding, False)
|
||||
|
||||
# Since the document was Unicode in the first place, there
|
||||
# is no need to try any more strategies; we know this will
|
||||
# work.
|
||||
return
|
||||
|
||||
known_definite_encodings: List[_Encoding] = []
|
||||
if user_specified_encoding:
|
||||
# This was provided by the end-user; treat it as a known
|
||||
# definite encoding per the algorithm laid out in the
|
||||
# HTML5 spec. (See the EncodingDetector class for
|
||||
# details.)
|
||||
known_definite_encodings.append(user_specified_encoding)
|
||||
|
||||
user_encodings: List[_Encoding] = []
|
||||
if document_declared_encoding:
|
||||
# This was found in the document; treat it as a slightly
|
||||
# lower-priority user encoding.
|
||||
user_encodings.append(document_declared_encoding)
|
||||
|
||||
detector = EncodingDetector(
|
||||
markup,
|
||||
known_definite_encodings=known_definite_encodings,
|
||||
user_encodings=user_encodings,
|
||||
is_html=is_html,
|
||||
exclude_encodings=exclude_encodings,
|
||||
)
|
||||
for encoding in detector.encodings:
|
||||
yield (detector.markup, encoding, document_declared_encoding, False)
|
||||
|
||||
def feed(self, markup: _RawMarkup) -> None:
|
||||
io: Union[BytesIO, StringIO]
|
||||
if isinstance(markup, bytes):
|
||||
io = BytesIO(markup)
|
||||
elif isinstance(markup, str):
|
||||
io = StringIO(markup)
|
||||
|
||||
# initialize_soup is called before feed, so we know this
|
||||
# is not None.
|
||||
assert self.soup is not None
|
||||
|
||||
# Call feed() at least once, even if the markup is empty,
|
||||
# or the parser won't be initialized.
|
||||
data = io.read(self.CHUNK_SIZE)
|
||||
try:
|
||||
self.parser = self.parser_for(self.soup.original_encoding)
|
||||
self.parser.feed(data)
|
||||
while len(data) != 0:
|
||||
# Now call feed() on the rest of the data, chunk by chunk.
|
||||
data = io.read(self.CHUNK_SIZE)
|
||||
if len(data) != 0:
|
||||
self.parser.feed(data)
|
||||
self.parser.close()
|
||||
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
|
||||
raise ParserRejectedMarkup(e)
|
||||
|
||||
def close(self) -> None:
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
|
||||
|
||||
def start(
|
||||
self,
|
||||
tag: str | bytes,
|
||||
attrs: Dict[str | bytes, str | bytes],
|
||||
nsmap: _NamespaceMapping = {},
|
||||
) -> None:
|
||||
# This is called by lxml code as a result of calling
|
||||
# BeautifulSoup.feed(), and we know self.soup is set by the time feed()
|
||||
# is called.
|
||||
assert self.soup is not None
|
||||
assert isinstance(tag, str)
|
||||
|
||||
# We need to recreate the attribute dict for three
|
||||
# reasons. First, for type checking, so we can assert there
|
||||
# are no bytestrings in the keys or values. Second, because we
|
||||
# need a mutable dict--lxml might send us an immutable
|
||||
# dictproxy. Third, so we can handle namespaced attribute
|
||||
# names by converting the keys to NamespacedAttributes.
|
||||
new_attrs: Dict[Union[str, NamespacedAttribute], str] = (
|
||||
self.attribute_dict_class()
|
||||
)
|
||||
for k, v in attrs.items():
|
||||
assert isinstance(k, str)
|
||||
assert isinstance(v, str)
|
||||
new_attrs[k] = v
|
||||
|
||||
nsprefix: Optional[_NamespacePrefix] = None
|
||||
namespace: Optional[_NamespaceURL] = None
|
||||
# Invert each namespace map as it comes in.
|
||||
if len(nsmap) == 0 and len(self.nsmaps) > 1:
|
||||
# There are no new namespaces for this tag, but
|
||||
# non-default namespaces are in play, so we need a
|
||||
# separate tag stack to know when they end.
|
||||
self.nsmaps.append(None)
|
||||
elif len(nsmap) > 0:
|
||||
# A new namespace mapping has come into play.
|
||||
|
||||
# First, Let the BeautifulSoup object know about it.
|
||||
self._register_namespaces(nsmap)
|
||||
|
||||
# Then, add it to our running list of inverted namespace
|
||||
# mappings.
|
||||
self.nsmaps.append(_invert(nsmap))
|
||||
|
||||
# The currently active namespace prefixes have
|
||||
# changed. Calculate the new mapping so it can be stored
|
||||
# with all Tag objects created while these prefixes are in
|
||||
# scope.
|
||||
current_mapping = dict(self.active_namespace_prefixes[-1])
|
||||
current_mapping.update(nsmap)
|
||||
|
||||
# We should not track un-prefixed namespaces as we can only hold one
|
||||
# and it will be recognized as the default namespace by soupsieve,
|
||||
# which may be confusing in some situations.
|
||||
if "" in current_mapping:
|
||||
del current_mapping[""]
|
||||
self.active_namespace_prefixes.append(current_mapping)
|
||||
|
||||
# Also treat the namespace mapping as a set of attributes on the
|
||||
# tag, so we can recreate it later.
|
||||
for prefix, namespace in list(nsmap.items()):
|
||||
attribute = NamespacedAttribute(
|
||||
"xmlns", prefix, "http://www.w3.org/2000/xmlns/"
|
||||
)
|
||||
new_attrs[attribute] = namespace
|
||||
|
||||
# Namespaces are in play. Find any attributes that came in
|
||||
# from lxml with namespaces attached to their names, and
|
||||
# turn then into NamespacedAttribute objects.
|
||||
final_attrs: AttributeDict = self.attribute_dict_class()
|
||||
for attr, value in list(new_attrs.items()):
|
||||
namespace, attr = self._getNsTag(attr)
|
||||
if namespace is None:
|
||||
final_attrs[attr] = value
|
||||
else:
|
||||
nsprefix = self._prefix_for_namespace(namespace)
|
||||
attr = NamespacedAttribute(nsprefix, attr, namespace)
|
||||
final_attrs[attr] = value
|
||||
|
||||
namespace, tag = self._getNsTag(tag)
|
||||
nsprefix = self._prefix_for_namespace(namespace)
|
||||
self.soup.handle_starttag(
|
||||
tag,
|
||||
namespace,
|
||||
nsprefix,
|
||||
final_attrs,
|
||||
namespaces=self.active_namespace_prefixes[-1],
|
||||
)
|
||||
|
||||
def _prefix_for_namespace(
|
||||
self, namespace: Optional[_NamespaceURL]
|
||||
) -> Optional[_NamespacePrefix]:
|
||||
"""Find the currently active prefix for the given namespace."""
|
||||
if namespace is None:
|
||||
return None
|
||||
for inverted_nsmap in reversed(self.nsmaps):
|
||||
if inverted_nsmap is not None and namespace in inverted_nsmap:
|
||||
return inverted_nsmap[namespace]
|
||||
return None
|
||||
|
||||
def end(self, name: str | bytes) -> None:
|
||||
assert self.soup is not None
|
||||
assert isinstance(name, str)
|
||||
self.soup.endData()
|
||||
namespace, name = self._getNsTag(name)
|
||||
nsprefix = None
|
||||
if namespace is not None:
|
||||
for inverted_nsmap in reversed(self.nsmaps):
|
||||
if inverted_nsmap is not None and namespace in inverted_nsmap:
|
||||
nsprefix = inverted_nsmap[namespace]
|
||||
break
|
||||
self.soup.handle_endtag(name, nsprefix)
|
||||
if len(self.nsmaps) > 1:
|
||||
# This tag, or one of its parents, introduced a namespace
|
||||
# mapping, so pop it off the stack.
|
||||
out_of_scope_nsmap = self.nsmaps.pop()
|
||||
|
||||
if out_of_scope_nsmap is not None:
|
||||
# This tag introduced a namespace mapping which is no
|
||||
# longer in scope. Recalculate the currently active
|
||||
# namespace prefixes.
|
||||
self.active_namespace_prefixes.pop()
|
||||
|
||||
def pi(self, target: str, data: str) -> None:
|
||||
assert self.soup is not None
|
||||
self.soup.endData()
|
||||
data = target + " " + data
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(self.processing_instruction_class)
|
||||
|
||||
def data(self, data: str | bytes) -> None:
|
||||
assert self.soup is not None
|
||||
assert isinstance(data, str)
|
||||
self.soup.handle_data(data)
|
||||
|
||||
def doctype(self, name: str, pubid: str, system: str) -> None:
|
||||
assert self.soup is not None
|
||||
self.soup.endData()
|
||||
doctype_string = Doctype._string_for_name_and_ids(name, pubid, system)
|
||||
self.soup.handle_data(doctype_string)
|
||||
self.soup.endData(containerClass=Doctype)
|
||||
|
||||
def comment(self, text: str | bytes) -> None:
|
||||
"Handle comments as Comment objects."
|
||||
assert self.soup is not None
|
||||
assert isinstance(text, str)
|
||||
self.soup.endData()
|
||||
self.soup.handle_data(text)
|
||||
self.soup.endData(Comment)
|
||||
|
||||
def test_fragment_to_document(self, fragment: str) -> str:
|
||||
"""See `TreeBuilder`."""
|
||||
return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
|
||||
|
||||
|
||||
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
|
||||
NAME: str = LXML
|
||||
ALTERNATE_NAMES: Iterable[str] = ["lxml-html"]
|
||||
|
||||
features: Iterable[str] = list(ALTERNATE_NAMES) + [NAME, HTML, FAST, PERMISSIVE]
|
||||
is_xml: bool = False
|
||||
|
||||
def default_parser(self, encoding: Optional[_Encoding]) -> _ParserOrParserClass:
|
||||
return etree.HTMLParser
|
||||
|
||||
def feed(self, markup: _RawMarkup) -> None:
|
||||
# We know self.soup is set by the time feed() is called.
|
||||
assert self.soup is not None
|
||||
encoding = self.soup.original_encoding
|
||||
try:
|
||||
self.parser = self.parser_for(encoding)
|
||||
self.parser.feed(markup)
|
||||
self.parser.close()
|
||||
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
|
||||
raise ParserRejectedMarkup(e)
|
||||
|
||||
def test_fragment_to_document(self, fragment: str) -> str:
|
||||
"""See `TreeBuilder`."""
|
||||
return "<html><body>%s</body></html>" % fragment
|
||||
338
gui/lib/python3.12/site-packages/bs4/css.py
Normal file
338
gui/lib/python3.12/site-packages/bs4/css.py
Normal file
@ -0,0 +1,338 @@
|
||||
"""Integration code for CSS selectors using `Soup Sieve <https://facelessuser.github.io/soupsieve/>`_ (pypi: ``soupsieve``).
|
||||
|
||||
Acquire a `CSS` object through the `element.Tag.css` attribute of
|
||||
the starting point of your CSS selector, or (if you want to run a
|
||||
selector against the entire document) of the `BeautifulSoup` object
|
||||
itself.
|
||||
|
||||
The main advantage of doing this instead of using ``soupsieve``
|
||||
functions is that you don't need to keep passing the `element.Tag` to be
|
||||
selected against, since the `CSS` object is permanently scoped to that
|
||||
`element.Tag`.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from types import ModuleType
|
||||
from typing import (
|
||||
Any,
|
||||
cast,
|
||||
Iterable,
|
||||
Iterator,
|
||||
Optional,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
import warnings
|
||||
from bs4._typing import _NamespaceMapping
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from soupsieve import SoupSieve
|
||||
from bs4 import element
|
||||
from bs4.element import ResultSet, Tag
|
||||
|
||||
soupsieve: Optional[ModuleType]
|
||||
try:
|
||||
import soupsieve
|
||||
except ImportError:
|
||||
soupsieve = None
|
||||
warnings.warn(
|
||||
"The soupsieve package is not installed. CSS selectors cannot be used."
|
||||
)
|
||||
|
||||
|
||||
class CSS(object):
|
||||
"""A proxy object against the ``soupsieve`` library, to simplify its
|
||||
CSS selector API.
|
||||
|
||||
You don't need to instantiate this class yourself; instead, use
|
||||
`element.Tag.css`.
|
||||
|
||||
:param tag: All CSS selectors run by this object will use this as
|
||||
their starting point.
|
||||
|
||||
:param api: An optional drop-in replacement for the ``soupsieve`` module,
|
||||
intended for use in unit tests.
|
||||
"""
|
||||
|
||||
def __init__(self, tag: element.Tag, api: Optional[ModuleType] = None):
|
||||
if api is None:
|
||||
api = soupsieve
|
||||
if api is None:
|
||||
raise NotImplementedError(
|
||||
"Cannot execute CSS selectors because the soupsieve package is not installed."
|
||||
)
|
||||
self.api = api
|
||||
self.tag = tag
|
||||
|
||||
def escape(self, ident: str) -> str:
|
||||
"""Escape a CSS identifier.
|
||||
|
||||
This is a simple wrapper around `soupsieve.escape() <https://facelessuser.github.io/soupsieve/api/#soupsieveescape>`_. See the
|
||||
documentation for that function for more information.
|
||||
"""
|
||||
if soupsieve is None:
|
||||
raise NotImplementedError(
|
||||
"Cannot escape CSS identifiers because the soupsieve package is not installed."
|
||||
)
|
||||
return cast(str, self.api.escape(ident))
|
||||
|
||||
def _ns(
|
||||
self, ns: Optional[_NamespaceMapping], select: str
|
||||
) -> Optional[_NamespaceMapping]:
|
||||
"""Normalize a dictionary of namespaces."""
|
||||
if not isinstance(select, self.api.SoupSieve) and ns is None:
|
||||
# If the selector is a precompiled pattern, it already has
|
||||
# a namespace context compiled in, which cannot be
|
||||
# replaced.
|
||||
ns = self.tag._namespaces
|
||||
return ns
|
||||
|
||||
def _rs(self, results: Iterable[Tag]) -> ResultSet[Tag]:
|
||||
"""Normalize a list of results to a py:class:`ResultSet`.
|
||||
|
||||
A py:class:`ResultSet` is more consistent with the rest of
|
||||
Beautiful Soup's API, and :py:meth:`ResultSet.__getattr__` has
|
||||
a helpful error message if you try to treat a list of results
|
||||
as a single result (a common mistake).
|
||||
"""
|
||||
# Import here to avoid circular import
|
||||
from bs4 import ResultSet
|
||||
|
||||
return ResultSet(None, results)
|
||||
|
||||
def compile(
|
||||
self,
|
||||
select: str,
|
||||
namespaces: Optional[_NamespaceMapping] = None,
|
||||
flags: int = 0,
|
||||
**kwargs: Any,
|
||||
) -> SoupSieve:
|
||||
"""Pre-compile a selector and return the compiled object.
|
||||
|
||||
:param selector: A CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will use the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
`soupsieve.compile() <https://facelessuser.github.io/soupsieve/api/#soupsievecompile>`_ method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into Soup Sieve's
|
||||
`soupsieve.compile() <https://facelessuser.github.io/soupsieve/api/#soupsievecompile>`_ method.
|
||||
|
||||
:return: A precompiled selector object.
|
||||
:rtype: soupsieve.SoupSieve
|
||||
"""
|
||||
return self.api.compile(select, self._ns(namespaces, select), flags, **kwargs)
|
||||
|
||||
def select_one(
|
||||
self,
|
||||
select: str,
|
||||
namespaces: Optional[_NamespaceMapping] = None,
|
||||
flags: int = 0,
|
||||
**kwargs: Any,
|
||||
) -> element.Tag | None:
|
||||
"""Perform a CSS selection operation on the current Tag and return the
|
||||
first result, if any.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the `soupsieve.select_one() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect_one>`_ method.
|
||||
|
||||
:param selector: A CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will use the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
`soupsieve.select_one() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect_one>`_ method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into Soup Sieve's
|
||||
`soupsieve.select_one() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect_one>`_ method.
|
||||
"""
|
||||
return self.api.select_one(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
|
||||
def select(
|
||||
self,
|
||||
select: str,
|
||||
namespaces: Optional[_NamespaceMapping] = None,
|
||||
limit: int = 0,
|
||||
flags: int = 0,
|
||||
**kwargs: Any,
|
||||
) -> ResultSet[element.Tag]:
|
||||
"""Perform a CSS selection operation on the current `element.Tag`.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the `soupsieve.select() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect>`_ method.
|
||||
|
||||
:param selector: A CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param limit: After finding this number of results, stop looking.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
`soupsieve.select() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect>`_ method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into Soup Sieve's
|
||||
`soupsieve.select() <https://facelessuser.github.io/soupsieve/api/#soupsieveselect>`_ method.
|
||||
"""
|
||||
if limit is None:
|
||||
limit = 0
|
||||
|
||||
return self._rs(
|
||||
self.api.select(
|
||||
select, self.tag, self._ns(namespaces, select), limit, flags, **kwargs
|
||||
)
|
||||
)
|
||||
|
||||
def iselect(
|
||||
self,
|
||||
select: str,
|
||||
namespaces: Optional[_NamespaceMapping] = None,
|
||||
limit: int = 0,
|
||||
flags: int = 0,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[element.Tag]:
|
||||
"""Perform a CSS selection operation on the current `element.Tag`.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the `soupsieve.iselect()
|
||||
<https://facelessuser.github.io/soupsieve/api/#soupsieveiselect>`_
|
||||
method. It is the same as select(), but it returns a generator
|
||||
instead of a list.
|
||||
|
||||
:param selector: A string containing a CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param limit: After finding this number of results, stop looking.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
`soupsieve.iselect() <https://facelessuser.github.io/soupsieve/api/#soupsieveiselect>`_ method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into Soup Sieve's
|
||||
`soupsieve.iselect() <https://facelessuser.github.io/soupsieve/api/#soupsieveiselect>`_ method.
|
||||
"""
|
||||
return self.api.iselect(
|
||||
select, self.tag, self._ns(namespaces, select), limit, flags, **kwargs
|
||||
)
|
||||
|
||||
def closest(
|
||||
self,
|
||||
select: str,
|
||||
namespaces: Optional[_NamespaceMapping] = None,
|
||||
flags: int = 0,
|
||||
**kwargs: Any,
|
||||
) -> Optional[element.Tag]:
|
||||
"""Find the `element.Tag` closest to this one that matches the given selector.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the `soupsieve.closest()
|
||||
<https://facelessuser.github.io/soupsieve/api/#soupsieveclosest>`_
|
||||
method.
|
||||
|
||||
:param selector: A string containing a CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
`soupsieve.closest() <https://facelessuser.github.io/soupsieve/api/#soupsieveclosest>`_ method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into Soup Sieve's
|
||||
`soupsieve.closest() <https://facelessuser.github.io/soupsieve/api/#soupsieveclosest>`_ method.
|
||||
|
||||
"""
|
||||
return self.api.closest(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
|
||||
def match(
|
||||
self,
|
||||
select: str,
|
||||
namespaces: Optional[_NamespaceMapping] = None,
|
||||
flags: int = 0,
|
||||
**kwargs: Any,
|
||||
) -> bool:
|
||||
"""Check whether or not this `element.Tag` matches the given CSS selector.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the `soupsieve.match()
|
||||
<https://facelessuser.github.io/soupsieve/api/#soupsievematch>`_
|
||||
method.
|
||||
|
||||
:param: a CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
`soupsieve.match()
|
||||
<https://facelessuser.github.io/soupsieve/api/#soupsievematch>`_
|
||||
method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
`soupsieve.match()
|
||||
<https://facelessuser.github.io/soupsieve/api/#soupsievematch>`_
|
||||
method.
|
||||
"""
|
||||
return cast(
|
||||
bool,
|
||||
self.api.match(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
),
|
||||
)
|
||||
|
||||
def filter(
|
||||
self,
|
||||
select: str,
|
||||
namespaces: Optional[_NamespaceMapping] = None,
|
||||
flags: int = 0,
|
||||
**kwargs: Any,
|
||||
) -> ResultSet[element.Tag]:
|
||||
"""Filter this `element.Tag`'s direct children based on the given CSS selector.
|
||||
|
||||
This uses the Soup Sieve library. It works the same way as
|
||||
passing a `element.Tag` into that library's `soupsieve.filter()
|
||||
<https://facelessuser.github.io/soupsieve/api/#soupsievefilter>`_
|
||||
method. For more information, see the documentation for
|
||||
`soupsieve.filter()
|
||||
<https://facelessuser.github.io/soupsieve/api/#soupsievefilter>`_.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
`soupsieve.filter()
|
||||
<https://facelessuser.github.io/soupsieve/api/#soupsievefilter>`_
|
||||
method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
`soupsieve.filter()
|
||||
<https://facelessuser.github.io/soupsieve/api/#soupsievefilter>`_
|
||||
method.
|
||||
"""
|
||||
return self._rs(
|
||||
self.api.filter(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
)
|
||||
1408
gui/lib/python3.12/site-packages/bs4/dammit.py
Normal file
1408
gui/lib/python3.12/site-packages/bs4/dammit.py
Normal file
File diff suppressed because it is too large
Load Diff
268
gui/lib/python3.12/site-packages/bs4/diagnose.py
Normal file
268
gui/lib/python3.12/site-packages/bs4/diagnose.py
Normal file
@ -0,0 +1,268 @@
|
||||
"""Diagnostic functions, mainly for use when doing tech support."""
|
||||
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
import cProfile
|
||||
from io import BytesIO
|
||||
from html.parser import HTMLParser
|
||||
import bs4
|
||||
from bs4 import BeautifulSoup, __version__
|
||||
from bs4.builder import builder_registry
|
||||
from typing import (
|
||||
Any,
|
||||
IO,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from bs4._typing import _IncomingMarkup
|
||||
|
||||
import pstats
|
||||
import random
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
|
||||
def diagnose(data: "_IncomingMarkup") -> None:
|
||||
"""Diagnostic suite for isolating common problems.
|
||||
|
||||
:param data: Some markup that needs to be explained.
|
||||
:return: None; diagnostics are printed to standard output.
|
||||
"""
|
||||
print(("Diagnostic running on Beautiful Soup %s" % __version__))
|
||||
print(("Python version %s" % sys.version))
|
||||
|
||||
basic_parsers = ["html.parser", "html5lib", "lxml"]
|
||||
for name in basic_parsers:
|
||||
for builder in builder_registry.builders:
|
||||
if name in builder.features:
|
||||
break
|
||||
else:
|
||||
basic_parsers.remove(name)
|
||||
print(
|
||||
("I noticed that %s is not installed. Installing it may help." % name)
|
||||
)
|
||||
|
||||
if "lxml" in basic_parsers:
|
||||
basic_parsers.append("lxml-xml")
|
||||
try:
|
||||
from lxml import etree
|
||||
|
||||
print(("Found lxml version %s" % ".".join(map(str, etree.LXML_VERSION))))
|
||||
except ImportError:
|
||||
print("lxml is not installed or couldn't be imported.")
|
||||
|
||||
if "html5lib" in basic_parsers:
|
||||
try:
|
||||
import html5lib
|
||||
|
||||
print(("Found html5lib version %s" % html5lib.__version__))
|
||||
except ImportError:
|
||||
print("html5lib is not installed or couldn't be imported.")
|
||||
|
||||
if hasattr(data, "read"):
|
||||
data = data.read()
|
||||
|
||||
for parser in basic_parsers:
|
||||
print(("Trying to parse your markup with %s" % parser))
|
||||
success = False
|
||||
try:
|
||||
soup = BeautifulSoup(data, features=parser)
|
||||
success = True
|
||||
except Exception:
|
||||
print(("%s could not parse the markup." % parser))
|
||||
traceback.print_exc()
|
||||
if success:
|
||||
print(("Here's what %s did with the markup:" % parser))
|
||||
print((soup.prettify()))
|
||||
|
||||
print(("-" * 80))
|
||||
|
||||
|
||||
def lxml_trace(data: "_IncomingMarkup", html: bool = True, **kwargs: Any) -> None:
|
||||
"""Print out the lxml events that occur during parsing.
|
||||
|
||||
This lets you see how lxml parses a document when no Beautiful
|
||||
Soup code is running. You can use this to determine whether
|
||||
an lxml-specific problem is in Beautiful Soup's lxml tree builders
|
||||
or in lxml itself.
|
||||
|
||||
:param data: Some markup.
|
||||
:param html: If True, markup will be parsed with lxml's HTML parser.
|
||||
if False, lxml's XML parser will be used.
|
||||
"""
|
||||
from lxml import etree
|
||||
|
||||
recover = kwargs.pop("recover", True)
|
||||
if isinstance(data, str):
|
||||
data = data.encode("utf8")
|
||||
if not isinstance(data, IO):
|
||||
reader = BytesIO(data)
|
||||
for event, element in etree.iterparse(reader, html=html, recover=recover, **kwargs):
|
||||
print(("%s, %4s, %s" % (event, element.tag, element.text)))
|
||||
|
||||
|
||||
class AnnouncingParser(HTMLParser):
|
||||
"""Subclass of HTMLParser that announces parse events, without doing
|
||||
anything else.
|
||||
|
||||
You can use this to get a picture of how html.parser sees a given
|
||||
document. The easiest way to do this is to call `htmlparser_trace`.
|
||||
"""
|
||||
|
||||
def _p(self, s: str) -> None:
|
||||
print(s)
|
||||
|
||||
def handle_starttag(
|
||||
self,
|
||||
name: str,
|
||||
attrs: List[Tuple[str, Optional[str]]],
|
||||
handle_empty_element: bool = True,
|
||||
) -> None:
|
||||
self._p(f"{name} {attrs} START")
|
||||
|
||||
def handle_endtag(self, name: str, check_already_closed: bool = True) -> None:
|
||||
self._p("%s END" % name)
|
||||
|
||||
def handle_data(self, data: str) -> None:
|
||||
self._p("%s DATA" % data)
|
||||
|
||||
def handle_charref(self, name: str) -> None:
|
||||
self._p("%s CHARREF" % name)
|
||||
|
||||
def handle_entityref(self, name: str) -> None:
|
||||
self._p("%s ENTITYREF" % name)
|
||||
|
||||
def handle_comment(self, data: str) -> None:
|
||||
self._p("%s COMMENT" % data)
|
||||
|
||||
def handle_decl(self, data: str) -> None:
|
||||
self._p("%s DECL" % data)
|
||||
|
||||
def unknown_decl(self, data: str) -> None:
|
||||
self._p("%s UNKNOWN-DECL" % data)
|
||||
|
||||
def handle_pi(self, data: str) -> None:
|
||||
self._p("%s PI" % data)
|
||||
|
||||
|
||||
def htmlparser_trace(data: str) -> None:
|
||||
"""Print out the HTMLParser events that occur during parsing.
|
||||
|
||||
This lets you see how HTMLParser parses a document when no
|
||||
Beautiful Soup code is running.
|
||||
|
||||
:param data: Some markup.
|
||||
"""
|
||||
parser = AnnouncingParser()
|
||||
parser.feed(data)
|
||||
|
||||
|
||||
_vowels: str = "aeiou"
|
||||
_consonants: str = "bcdfghjklmnpqrstvwxyz"
|
||||
|
||||
|
||||
def rword(length: int = 5) -> str:
|
||||
"""Generate a random word-like string.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
s = ""
|
||||
for i in range(length):
|
||||
if i % 2 == 0:
|
||||
t = _consonants
|
||||
else:
|
||||
t = _vowels
|
||||
s += random.choice(t)
|
||||
return s
|
||||
|
||||
|
||||
def rsentence(length: int = 4) -> str:
|
||||
"""Generate a random sentence-like string.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return " ".join(rword(random.randint(4, 9)) for i in range(length))
|
||||
|
||||
|
||||
def rdoc(num_elements: int = 1000) -> str:
|
||||
"""Randomly generate an invalid HTML document.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
tag_names = ["p", "div", "span", "i", "b", "script", "table"]
|
||||
elements = []
|
||||
for i in range(num_elements):
|
||||
choice = random.randint(0, 3)
|
||||
if choice == 0:
|
||||
# New tag.
|
||||
tag_name = random.choice(tag_names)
|
||||
elements.append("<%s>" % tag_name)
|
||||
elif choice == 1:
|
||||
elements.append(rsentence(random.randint(1, 4)))
|
||||
elif choice == 2:
|
||||
# Close a tag.
|
||||
tag_name = random.choice(tag_names)
|
||||
elements.append("</%s>" % tag_name)
|
||||
return "<html>" + "\n".join(elements) + "</html>"
|
||||
|
||||
|
||||
def benchmark_parsers(num_elements: int = 100000) -> None:
|
||||
"""Very basic head-to-head performance benchmark."""
|
||||
print(("Comparative parser benchmark on Beautiful Soup %s" % __version__))
|
||||
data = rdoc(num_elements)
|
||||
print(("Generated a large invalid HTML document (%d bytes)." % len(data)))
|
||||
|
||||
for parser_name in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
|
||||
success = False
|
||||
try:
|
||||
a = time.time()
|
||||
BeautifulSoup(data, parser_name)
|
||||
b = time.time()
|
||||
success = True
|
||||
except Exception:
|
||||
print(("%s could not parse the markup." % parser_name))
|
||||
traceback.print_exc()
|
||||
if success:
|
||||
print(("BS4+%s parsed the markup in %.2fs." % (parser_name, b - a)))
|
||||
|
||||
from lxml import etree
|
||||
|
||||
a = time.time()
|
||||
etree.HTML(data)
|
||||
b = time.time()
|
||||
print(("Raw lxml parsed the markup in %.2fs." % (b - a)))
|
||||
|
||||
import html5lib
|
||||
|
||||
parser = html5lib.HTMLParser()
|
||||
a = time.time()
|
||||
parser.parse(data)
|
||||
b = time.time()
|
||||
print(("Raw html5lib parsed the markup in %.2fs." % (b - a)))
|
||||
|
||||
|
||||
def profile(num_elements: int = 100000, parser: str = "lxml") -> None:
|
||||
"""Use Python's profiler on a randomly generated document."""
|
||||
filehandle = tempfile.NamedTemporaryFile()
|
||||
filename = filehandle.name
|
||||
|
||||
data = rdoc(num_elements)
|
||||
vars = dict(bs4=bs4, data=data, parser=parser)
|
||||
cProfile.runctx("bs4.BeautifulSoup(data, parser)", vars, vars, filename)
|
||||
|
||||
stats = pstats.Stats(filename)
|
||||
# stats.strip_dirs()
|
||||
stats.sort_stats("cumulative")
|
||||
stats.print_stats("_html5lib|bs4", 50)
|
||||
|
||||
|
||||
# If this file is run as a script, standard input is diagnosed.
|
||||
if __name__ == "__main__":
|
||||
diagnose(sys.stdin.read())
|
||||
2886
gui/lib/python3.12/site-packages/bs4/element.py
Normal file
2886
gui/lib/python3.12/site-packages/bs4/element.py
Normal file
File diff suppressed because it is too large
Load Diff
28
gui/lib/python3.12/site-packages/bs4/exceptions.py
Normal file
28
gui/lib/python3.12/site-packages/bs4/exceptions.py
Normal file
@ -0,0 +1,28 @@
|
||||
"""Exceptions defined by Beautiful Soup itself."""
|
||||
|
||||
from typing import Union
|
||||
|
||||
|
||||
class StopParsing(Exception):
|
||||
"""Exception raised by a TreeBuilder if it's unable to continue parsing."""
|
||||
|
||||
|
||||
class FeatureNotFound(ValueError):
|
||||
"""Exception raised by the BeautifulSoup constructor if no parser with the
|
||||
requested features is found.
|
||||
"""
|
||||
|
||||
|
||||
class ParserRejectedMarkup(Exception):
|
||||
"""An Exception to be raised when the underlying parser simply
|
||||
refuses to parse the given markup.
|
||||
"""
|
||||
|
||||
def __init__(self, message_or_exception: Union[str, Exception]):
|
||||
"""Explain why the parser rejected the given markup, either
|
||||
with a textual explanation or another exception.
|
||||
"""
|
||||
if isinstance(message_or_exception, Exception):
|
||||
e = message_or_exception
|
||||
message_or_exception = "%s: %s" % (e.__class__.__name__, str(e))
|
||||
super(ParserRejectedMarkup, self).__init__(message_or_exception)
|
||||
755
gui/lib/python3.12/site-packages/bs4/filter.py
Normal file
755
gui/lib/python3.12/site-packages/bs4/filter.py
Normal file
@ -0,0 +1,755 @@
|
||||
from __future__ import annotations
|
||||
from collections import defaultdict
|
||||
import re
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
cast,
|
||||
Dict,
|
||||
Iterator,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
import warnings
|
||||
|
||||
from bs4._deprecation import _deprecated
|
||||
from bs4.element import (
|
||||
AttributeDict,
|
||||
NavigableString,
|
||||
PageElement,
|
||||
ResultSet,
|
||||
Tag,
|
||||
)
|
||||
from bs4._typing import (
|
||||
_AtMostOneElement,
|
||||
_AttributeValue,
|
||||
_OneElement,
|
||||
_PageElementMatchFunction,
|
||||
_QueryResults,
|
||||
_RawAttributeValues,
|
||||
_RegularExpressionProtocol,
|
||||
_StrainableAttribute,
|
||||
_StrainableElement,
|
||||
_StrainableString,
|
||||
_StringMatchFunction,
|
||||
_TagMatchFunction,
|
||||
)
|
||||
|
||||
|
||||
class ElementFilter(object):
|
||||
"""`ElementFilter` encapsulates the logic necessary to decide:
|
||||
|
||||
1. whether a `PageElement` (a `Tag` or a `NavigableString`) matches a
|
||||
user-specified query.
|
||||
|
||||
2. whether a given sequence of markup found during initial parsing
|
||||
should be turned into a `PageElement` at all, or simply discarded.
|
||||
|
||||
The base class is the simplest `ElementFilter`. By default, it
|
||||
matches everything and allows all markup to become `PageElement`
|
||||
objects. You can make it more selective by passing in a
|
||||
user-defined match function, or defining a subclass.
|
||||
|
||||
Most users of Beautiful Soup will never need to use
|
||||
`ElementFilter`, or its more capable subclass
|
||||
`SoupStrainer`. Instead, they will use methods like
|
||||
:py:meth:`Tag.find`, which will convert their arguments into
|
||||
`SoupStrainer` objects and run them against the tree.
|
||||
|
||||
However, if you find yourself wanting to treat the arguments to
|
||||
Beautiful Soup's find_*() methods as first-class objects, those
|
||||
objects will be `SoupStrainer` objects. You can create them
|
||||
yourself and then make use of functions like
|
||||
`ElementFilter.filter()`.
|
||||
"""
|
||||
|
||||
match_function: Optional[_PageElementMatchFunction]
|
||||
|
||||
def __init__(self, match_function: Optional[_PageElementMatchFunction] = None):
|
||||
"""Pass in a match function to easily customize the behavior of
|
||||
`ElementFilter.match` without needing to subclass.
|
||||
|
||||
:param match_function: A function that takes a `PageElement`
|
||||
and returns `True` if that `PageElement` matches some criteria.
|
||||
"""
|
||||
self.match_function = match_function
|
||||
|
||||
@property
|
||||
def includes_everything(self) -> bool:
|
||||
"""Does this `ElementFilter` obviously include everything? If so,
|
||||
the filter process can be made much faster.
|
||||
|
||||
The `ElementFilter` might turn out to include everything even
|
||||
if this returns `False`, but it won't include everything in an
|
||||
obvious way.
|
||||
|
||||
The base `ElementFilter` implementation includes things based on
|
||||
the match function, so includes_everything is only true if
|
||||
there is no match function.
|
||||
"""
|
||||
return not self.match_function
|
||||
|
||||
@property
|
||||
def excludes_everything(self) -> bool:
|
||||
"""Does this `ElementFilter` obviously exclude everything? If
|
||||
so, Beautiful Soup will issue a warning if you try to use it
|
||||
when parsing a document.
|
||||
|
||||
The `ElementFilter` might turn out to exclude everything even
|
||||
if this returns `False`, but it won't exclude everything in an
|
||||
obvious way.
|
||||
|
||||
The base `ElementFilter` implementation excludes things based
|
||||
on a match function we can't inspect, so excludes_everything
|
||||
is always false.
|
||||
"""
|
||||
return False
|
||||
|
||||
def match(self, element: PageElement, _known_rules:bool=False) -> bool:
|
||||
"""Does the given PageElement match the rules set down by this
|
||||
ElementFilter?
|
||||
|
||||
The base implementation delegates to the function passed in to
|
||||
the constructor.
|
||||
|
||||
:param _known_rules: Defined for compatibility with
|
||||
SoupStrainer._match(). Used more for consistency than because
|
||||
we need the performance optimization.
|
||||
"""
|
||||
if not _known_rules and self.includes_everything:
|
||||
return True
|
||||
if not self.match_function:
|
||||
return True
|
||||
return self.match_function(element)
|
||||
|
||||
def filter(self, generator: Iterator[PageElement]) -> Iterator[_OneElement]:
|
||||
"""The most generic search method offered by Beautiful Soup.
|
||||
|
||||
Acts like Python's built-in `filter`, using
|
||||
`ElementFilter.match` as the filtering function.
|
||||
"""
|
||||
# If there are no rules at all, don't bother filtering. Let
|
||||
# anything through.
|
||||
if self.includes_everything:
|
||||
for i in generator:
|
||||
yield i
|
||||
while True:
|
||||
try:
|
||||
i = next(generator)
|
||||
except StopIteration:
|
||||
break
|
||||
if i:
|
||||
if self.match(i, _known_rules=True):
|
||||
yield cast("_OneElement", i)
|
||||
|
||||
def find(self, generator: Iterator[PageElement]) -> _AtMostOneElement:
|
||||
"""A lower-level equivalent of :py:meth:`Tag.find`.
|
||||
|
||||
You can pass in your own generator for iterating over
|
||||
`PageElement` objects. The first one that matches this
|
||||
`ElementFilter` will be returned.
|
||||
|
||||
:param generator: A way of iterating over `PageElement`
|
||||
objects.
|
||||
"""
|
||||
for match in self.filter(generator):
|
||||
return match
|
||||
return None
|
||||
|
||||
def find_all(
|
||||
self, generator: Iterator[PageElement], limit: Optional[int] = None
|
||||
) -> _QueryResults:
|
||||
"""A lower-level equivalent of :py:meth:`Tag.find_all`.
|
||||
|
||||
You can pass in your own generator for iterating over
|
||||
`PageElement` objects. Only elements that match this
|
||||
`ElementFilter` will be returned in the :py:class:`ResultSet`.
|
||||
|
||||
:param generator: A way of iterating over `PageElement`
|
||||
objects.
|
||||
|
||||
:param limit: Stop looking after finding this many results.
|
||||
"""
|
||||
results: _QueryResults = ResultSet(self)
|
||||
for match in self.filter(generator):
|
||||
results.append(match)
|
||||
if limit is not None and len(results) >= limit:
|
||||
break
|
||||
return results
|
||||
|
||||
def allow_tag_creation(
|
||||
self, nsprefix: Optional[str], name: str, attrs: Optional[_RawAttributeValues]
|
||||
) -> bool:
|
||||
"""Based on the name and attributes of a tag, see whether this
|
||||
`ElementFilter` will allow a `Tag` object to even be created.
|
||||
|
||||
By default, all tags are parsed. To change this, subclass
|
||||
`ElementFilter`.
|
||||
|
||||
:param name: The name of the prospective tag.
|
||||
:param attrs: The attributes of the prospective tag.
|
||||
"""
|
||||
return True
|
||||
|
||||
def allow_string_creation(self, string: str) -> bool:
|
||||
"""Based on the content of a string, see whether this
|
||||
`ElementFilter` will allow a `NavigableString` object based on
|
||||
this string to be added to the parse tree.
|
||||
|
||||
By default, all strings are processed into `NavigableString`
|
||||
objects. To change this, subclass `ElementFilter`.
|
||||
|
||||
:param str: The string under consideration.
|
||||
"""
|
||||
return True
|
||||
|
||||
|
||||
class MatchRule(object):
|
||||
"""Each MatchRule encapsulates the logic behind a single argument
|
||||
passed in to one of the Beautiful Soup find* methods.
|
||||
"""
|
||||
|
||||
string: Optional[str]
|
||||
pattern: Optional[_RegularExpressionProtocol]
|
||||
present: Optional[bool]
|
||||
exclude_everything: Optional[bool]
|
||||
# TODO-TYPING: All MatchRule objects also have an attribute
|
||||
# ``function``, but the type of the function depends on the
|
||||
# subclass.
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
string: Optional[Union[str, bytes]] = None,
|
||||
pattern: Optional[_RegularExpressionProtocol] = None,
|
||||
function: Optional[Callable] = None,
|
||||
present: Optional[bool] = None,
|
||||
exclude_everything: Optional[bool] = None
|
||||
):
|
||||
if isinstance(string, bytes):
|
||||
string = string.decode("utf8")
|
||||
self.string = string
|
||||
if isinstance(pattern, bytes):
|
||||
self.pattern = re.compile(pattern.decode("utf8"))
|
||||
elif isinstance(pattern, str):
|
||||
self.pattern = re.compile(pattern)
|
||||
else:
|
||||
self.pattern = pattern
|
||||
self.function = function
|
||||
self.present = present
|
||||
self.exclude_everything = exclude_everything
|
||||
|
||||
values = [
|
||||
x
|
||||
for x in (self.string, self.pattern, self.function, self.present, self.exclude_everything)
|
||||
if x is not None
|
||||
]
|
||||
if len(values) == 0:
|
||||
raise ValueError(
|
||||
"Either string, pattern, function, present, or exclude_everything must be provided."
|
||||
)
|
||||
if len(values) > 1:
|
||||
raise ValueError(
|
||||
"At most one of string, pattern, function, present, and exclude_everything must be provided."
|
||||
)
|
||||
|
||||
def _base_match(self, string: Optional[str]) -> Optional[bool]:
|
||||
"""Run the 'cheap' portion of a match, trying to get an answer without
|
||||
calling a potentially expensive custom function.
|
||||
|
||||
:return: True or False if we have a (positive or negative)
|
||||
match; None if we need to keep trying.
|
||||
"""
|
||||
# self.exclude_everything matches nothing.
|
||||
if self.exclude_everything:
|
||||
return False
|
||||
|
||||
# self.present==True matches everything except None.
|
||||
if self.present is True:
|
||||
return string is not None
|
||||
|
||||
# self.present==False matches _only_ None.
|
||||
if self.present is False:
|
||||
return string is None
|
||||
|
||||
# self.string does an exact string match.
|
||||
if self.string is not None:
|
||||
# print(f"{self.string} ?= {string}")
|
||||
return self.string == string
|
||||
|
||||
# self.pattern does a regular expression search.
|
||||
if self.pattern is not None:
|
||||
# print(f"{self.pattern} ?~ {string}")
|
||||
if string is None:
|
||||
return False
|
||||
return self.pattern.search(string) is not None
|
||||
|
||||
return None
|
||||
|
||||
def matches_string(self, string: Optional[str]) -> bool:
|
||||
_base_result = self._base_match(string)
|
||||
if _base_result is not None:
|
||||
# No need to invoke the test function.
|
||||
return _base_result
|
||||
if self.function is not None and not self.function(string):
|
||||
# print(f"{self.function}({string}) == False")
|
||||
return False
|
||||
return True
|
||||
|
||||
def __repr__(self) -> str:
|
||||
cls = type(self).__name__
|
||||
return f"<{cls} string={self.string} pattern={self.pattern} function={self.function} present={self.present}>"
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return (
|
||||
isinstance(other, MatchRule)
|
||||
and self.string == other.string
|
||||
and self.pattern == other.pattern
|
||||
and self.function == other.function
|
||||
and self.present == other.present
|
||||
)
|
||||
|
||||
|
||||
class TagNameMatchRule(MatchRule):
|
||||
"""A MatchRule implementing the rules for matches against tag name."""
|
||||
|
||||
function: Optional[_TagMatchFunction]
|
||||
|
||||
def matches_tag(self, tag: Tag) -> bool:
|
||||
base_value = self._base_match(tag.name)
|
||||
if base_value is not None:
|
||||
return base_value
|
||||
|
||||
# The only remaining possibility is that the match is determined
|
||||
# by a function call. Call the function.
|
||||
function = cast(_TagMatchFunction, self.function)
|
||||
if function(tag):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class AttributeValueMatchRule(MatchRule):
|
||||
"""A MatchRule implementing the rules for matches against attribute value."""
|
||||
|
||||
function: Optional[_StringMatchFunction]
|
||||
|
||||
|
||||
class StringMatchRule(MatchRule):
|
||||
"""A MatchRule implementing the rules for matches against a NavigableString."""
|
||||
|
||||
function: Optional[_StringMatchFunction]
|
||||
|
||||
|
||||
class SoupStrainer(ElementFilter):
|
||||
"""The `ElementFilter` subclass used internally by Beautiful Soup.
|
||||
|
||||
A `SoupStrainer` encapsulates the logic necessary to perform the
|
||||
kind of matches supported by methods such as
|
||||
:py:meth:`Tag.find`. `SoupStrainer` objects are primarily created
|
||||
internally, but you can create one yourself and pass it in as
|
||||
``parse_only`` to the `BeautifulSoup` constructor, to parse a
|
||||
subset of a large document.
|
||||
|
||||
Internally, `SoupStrainer` objects work by converting the
|
||||
constructor arguments into `MatchRule` objects. Incoming
|
||||
tags/markup are matched against those rules.
|
||||
|
||||
:param name: One or more restrictions on the tags found in a document.
|
||||
|
||||
:param attrs: A dictionary that maps attribute names to
|
||||
restrictions on tags that use those attributes.
|
||||
|
||||
:param string: One or more restrictions on the strings found in a
|
||||
document.
|
||||
|
||||
:param kwargs: A dictionary that maps attribute names to restrictions
|
||||
on tags that use those attributes. These restrictions are additive to
|
||||
any specified in ``attrs``.
|
||||
|
||||
"""
|
||||
|
||||
name_rules: List[TagNameMatchRule]
|
||||
attribute_rules: Dict[str, List[AttributeValueMatchRule]]
|
||||
string_rules: List[StringMatchRule]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: Optional[_StrainableElement] = None,
|
||||
attrs: Dict[str, _StrainableAttribute] = {},
|
||||
string: Optional[_StrainableString] = None,
|
||||
**kwargs: _StrainableAttribute,
|
||||
):
|
||||
if string is None and "text" in kwargs:
|
||||
string = cast(Optional[_StrainableString], kwargs.pop("text"))
|
||||
warnings.warn(
|
||||
"As of version 4.11.0, the 'text' argument to the SoupStrainer constructor is deprecated. Use 'string' instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
if name is None and not attrs and not string and not kwargs:
|
||||
# Special case for backwards compatibility. Instantiating
|
||||
# a SoupStrainer with no arguments whatsoever gets you one
|
||||
# that matches all Tags, and only Tags.
|
||||
self.name_rules = [TagNameMatchRule(present=True)]
|
||||
else:
|
||||
self.name_rules = cast(
|
||||
List[TagNameMatchRule], list(self._make_match_rules(name, TagNameMatchRule))
|
||||
)
|
||||
self.attribute_rules = defaultdict(list)
|
||||
|
||||
if not isinstance(attrs, dict):
|
||||
# Passing something other than a dictionary as attrs is
|
||||
# sugar for matching that thing against the 'class'
|
||||
# attribute.
|
||||
attrs = {"class": attrs}
|
||||
|
||||
for attrdict in attrs, kwargs:
|
||||
for attr, value in attrdict.items():
|
||||
if attr == "class_" and attrdict is kwargs:
|
||||
# If you pass in 'class_' as part of kwargs, it's
|
||||
# because class is a Python reserved word. If you
|
||||
# pass it in as part of the attrs dict, it's
|
||||
# because you really are looking for an attribute
|
||||
# called 'class_'.
|
||||
attr = "class"
|
||||
|
||||
if value is None:
|
||||
value = False
|
||||
for rule_obj in self._make_match_rules(value, AttributeValueMatchRule):
|
||||
self.attribute_rules[attr].append(
|
||||
cast(AttributeValueMatchRule, rule_obj)
|
||||
)
|
||||
|
||||
self.string_rules = cast(
|
||||
List[StringMatchRule], list(self._make_match_rules(string, StringMatchRule))
|
||||
)
|
||||
|
||||
#: DEPRECATED 4.13.0: You shouldn't need to check this under
|
||||
#: any name (.string or .text), and if you do, you're probably
|
||||
#: not taking into account all of the types of values this
|
||||
#: variable might have. Look at the .string_rules list instead.
|
||||
self.__string = string
|
||||
|
||||
@property
|
||||
def includes_everything(self) -> bool:
|
||||
"""Check whether the provided rules will obviously include
|
||||
everything. (They might include everything even if this returns `False`,
|
||||
but not in an obvious way.)
|
||||
"""
|
||||
return not self.name_rules and not self.string_rules and not self.attribute_rules
|
||||
|
||||
@property
|
||||
def excludes_everything(self) -> bool:
|
||||
"""Check whether the provided rules will obviously exclude
|
||||
everything. (They might exclude everything even if this returns `False`,
|
||||
but not in an obvious way.)
|
||||
"""
|
||||
if (self.string_rules and (self.name_rules or self.attribute_rules)):
|
||||
# This is self-contradictory, so the rules exclude everything.
|
||||
return True
|
||||
|
||||
# If there's a rule that ended up treated as an "exclude everything"
|
||||
# rule due to creating a logical inconsistency, then the rules
|
||||
# exclude everything.
|
||||
if any(x.exclude_everything for x in self.string_rules):
|
||||
return True
|
||||
if any(x.exclude_everything for x in self.name_rules):
|
||||
return True
|
||||
for ruleset in self.attribute_rules.values():
|
||||
if any(x.exclude_everything for x in ruleset):
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def string(self) -> Optional[_StrainableString]:
|
||||
":meta private:"
|
||||
warnings.warn(
|
||||
"Access to deprecated property string. (Look at .string_rules instead) -- Deprecated since version 4.13.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return self.__string
|
||||
|
||||
@property
|
||||
def text(self) -> Optional[_StrainableString]:
|
||||
":meta private:"
|
||||
warnings.warn(
|
||||
"Access to deprecated property text. (Look at .string_rules instead) -- Deprecated since version 4.13.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return self.__string
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__} name={self.name_rules} attrs={self.attribute_rules} string={self.string_rules}>"
|
||||
|
||||
@classmethod
|
||||
def _make_match_rules(
|
||||
cls,
|
||||
obj: Optional[Union[_StrainableElement, _StrainableAttribute]],
|
||||
rule_class: Type[MatchRule],
|
||||
) -> Iterator[MatchRule]:
|
||||
"""Convert a vaguely-specific 'object' into one or more well-defined
|
||||
`MatchRule` objects.
|
||||
|
||||
:param obj: Some kind of object that corresponds to one or more
|
||||
matching rules.
|
||||
:param rule_class: Create instances of this `MatchRule` subclass.
|
||||
"""
|
||||
if obj is None:
|
||||
return
|
||||
if isinstance(obj, (str, bytes)):
|
||||
yield rule_class(string=obj)
|
||||
elif isinstance(obj, bool):
|
||||
yield rule_class(present=obj)
|
||||
elif callable(obj):
|
||||
yield rule_class(function=obj)
|
||||
elif isinstance(obj, _RegularExpressionProtocol):
|
||||
yield rule_class(pattern=obj)
|
||||
elif hasattr(obj, "__iter__"):
|
||||
if not obj:
|
||||
# The attribute is being matched against the null set,
|
||||
# which means it should exclude everything.
|
||||
yield rule_class(exclude_everything=True)
|
||||
for o in obj:
|
||||
if not isinstance(o, (bytes, str)) and hasattr(o, "__iter__"):
|
||||
# This is almost certainly the user's
|
||||
# mistake. This list contains another list, which
|
||||
# opens up the possibility of infinite
|
||||
# self-reference. In the interests of avoiding
|
||||
# infinite recursion, we'll treat this as an
|
||||
# impossible match and issue a rule that excludes
|
||||
# everything, rather than looking inside.
|
||||
warnings.warn(
|
||||
f"Ignoring nested list {o} to avoid the possibility of infinite recursion.",
|
||||
stacklevel=5,
|
||||
)
|
||||
yield rule_class(exclude_everything=True)
|
||||
continue
|
||||
for x in cls._make_match_rules(o, rule_class):
|
||||
yield x
|
||||
else:
|
||||
yield rule_class(string=str(obj))
|
||||
|
||||
def matches_tag(self, tag: Tag) -> bool:
|
||||
"""Do the rules of this `SoupStrainer` trigger a match against the
|
||||
given `Tag`?
|
||||
|
||||
If the `SoupStrainer` has any `TagNameMatchRule`, at least one
|
||||
must match the `Tag` or its `Tag.name`.
|
||||
|
||||
If there are any `AttributeValueMatchRule` for a given
|
||||
attribute, at least one of them must match the attribute
|
||||
value.
|
||||
|
||||
If there are any `StringMatchRule`, at least one must match,
|
||||
but a `SoupStrainer` that *only* contains `StringMatchRule`
|
||||
cannot match a `Tag`, only a `NavigableString`.
|
||||
"""
|
||||
# If there are no rules at all, let anything through.
|
||||
#if self.includes_everything:
|
||||
# return True
|
||||
|
||||
# String rules cannot not match a Tag on their own.
|
||||
if not self.name_rules and not self.attribute_rules:
|
||||
return False
|
||||
|
||||
# Optimization for a very common case where the user is
|
||||
# searching for a tag with one specific name, and we're
|
||||
# looking at a tag with a different name.
|
||||
if (
|
||||
not tag.prefix
|
||||
and len(self.name_rules) == 1
|
||||
and self.name_rules[0].string is not None
|
||||
and tag.name != self.name_rules[0].string
|
||||
):
|
||||
return False
|
||||
|
||||
# If there are name rules, at least one must match. It can
|
||||
# match either the Tag object itself or the prefixed name of
|
||||
# the tag.
|
||||
prefixed_name = None
|
||||
if tag.prefix:
|
||||
prefixed_name = f"{tag.prefix}:{tag.name}"
|
||||
if self.name_rules:
|
||||
name_matches = False
|
||||
for rule in self.name_rules:
|
||||
# attrs = " ".join(
|
||||
# [f"{k}={v}" for k, v in sorted(tag.attrs.items())]
|
||||
# )
|
||||
# print(f"Testing <{tag.name} {attrs}>{tag.string}</{tag.name}> against {rule}")
|
||||
if rule.matches_tag(tag) or (
|
||||
prefixed_name is not None and rule.matches_string(prefixed_name)
|
||||
):
|
||||
name_matches = True
|
||||
break
|
||||
|
||||
if not name_matches:
|
||||
return False
|
||||
|
||||
# If there are attribute rules for a given attribute, at least
|
||||
# one of them must match. If there are rules for multiple
|
||||
# attributes, each attribute must have at least one match.
|
||||
for attr, rules in self.attribute_rules.items():
|
||||
attr_value = tag.get(attr, None)
|
||||
this_attr_match = self._attribute_match(attr_value, rules)
|
||||
if not this_attr_match:
|
||||
return False
|
||||
|
||||
# If there are string rules, at least one must match.
|
||||
if self.string_rules:
|
||||
_str = tag.string
|
||||
if _str is None:
|
||||
return False
|
||||
if not self.matches_any_string_rule(_str):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _attribute_match(
|
||||
self,
|
||||
attr_value: Optional[_AttributeValue],
|
||||
rules: Iterable[AttributeValueMatchRule],
|
||||
) -> bool:
|
||||
attr_values: Sequence[Optional[str]]
|
||||
if isinstance(attr_value, list):
|
||||
attr_values = attr_value
|
||||
else:
|
||||
attr_values = [cast(str, attr_value)]
|
||||
|
||||
def _match_attribute_value_helper(attr_values: Sequence[Optional[str]]) -> bool:
|
||||
for rule in rules:
|
||||
for attr_value in attr_values:
|
||||
if rule.matches_string(attr_value):
|
||||
return True
|
||||
return False
|
||||
|
||||
this_attr_match = _match_attribute_value_helper(attr_values)
|
||||
if not this_attr_match and len(attr_values) > 1:
|
||||
# This cast converts Optional[str] to plain str.
|
||||
#
|
||||
# We know if there's more than one value, there can't be
|
||||
# any None in the list, because Beautiful Soup never uses
|
||||
# None as a value of a multi-valued attribute, and if None
|
||||
# is passed in as attr_value, it's turned into a list with
|
||||
# a single element (thus len(attr_values) > 1 fails).
|
||||
attr_values = cast(Sequence[str], attr_values)
|
||||
|
||||
# Try again but treat the attribute value
|
||||
# as a single string.
|
||||
joined_attr_value = " ".join(attr_values)
|
||||
this_attr_match = _match_attribute_value_helper([joined_attr_value])
|
||||
return this_attr_match
|
||||
|
||||
def allow_tag_creation(
|
||||
self, nsprefix: Optional[str], name: str, attrs: Optional[_RawAttributeValues]
|
||||
) -> bool:
|
||||
"""Based on the name and attributes of a tag, see whether this
|
||||
`SoupStrainer` will allow a `Tag` object to even be created.
|
||||
|
||||
:param name: The name of the prospective tag.
|
||||
:param attrs: The attributes of the prospective tag.
|
||||
"""
|
||||
if self.string_rules:
|
||||
# A SoupStrainer that has string rules can't be used to
|
||||
# manage tag creation, because the string rule can't be
|
||||
# evaluated until after the tag and all of its contents
|
||||
# have been parsed.
|
||||
return False
|
||||
prefixed_name = None
|
||||
if nsprefix:
|
||||
prefixed_name = f"{nsprefix}:{name}"
|
||||
if self.name_rules:
|
||||
# At least one name rule must match.
|
||||
name_match = False
|
||||
for rule in self.name_rules:
|
||||
for x in name, prefixed_name:
|
||||
if x is not None:
|
||||
if rule.matches_string(x):
|
||||
name_match = True
|
||||
break
|
||||
if not name_match:
|
||||
return False
|
||||
|
||||
# For each attribute that has rules, at least one rule must
|
||||
# match.
|
||||
if attrs is None:
|
||||
attrs = AttributeDict()
|
||||
for attr, rules in self.attribute_rules.items():
|
||||
attr_value = attrs.get(attr)
|
||||
if not self._attribute_match(attr_value, rules):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def allow_string_creation(self, string: str) -> bool:
|
||||
"""Based on the content of a markup string, see whether this
|
||||
`SoupStrainer` will allow it to be instantiated as a
|
||||
`NavigableString` object, or whether it should be ignored.
|
||||
"""
|
||||
if self.name_rules or self.attribute_rules:
|
||||
# A SoupStrainer that has name or attribute rules won't
|
||||
# match any strings; it's designed to match tags with
|
||||
# certain properties.
|
||||
return False
|
||||
if not self.string_rules:
|
||||
# A SoupStrainer with no string rules will match
|
||||
# all strings.
|
||||
return True
|
||||
if not self.matches_any_string_rule(string):
|
||||
return False
|
||||
return True
|
||||
|
||||
def matches_any_string_rule(self, string: str) -> bool:
|
||||
"""See whether the content of a string matches any of
|
||||
this `SoupStrainer`'s string rules.
|
||||
"""
|
||||
if not self.string_rules:
|
||||
return True
|
||||
for string_rule in self.string_rules:
|
||||
if string_rule.matches_string(string):
|
||||
return True
|
||||
return False
|
||||
|
||||
def match(self, element: PageElement, _known_rules: bool=False) -> bool:
|
||||
"""Does the given `PageElement` match the rules set down by this
|
||||
`SoupStrainer`?
|
||||
|
||||
The find_* methods rely heavily on this method to find matches.
|
||||
|
||||
:param element: A `PageElement`.
|
||||
:param _known_rules: Set to true in the common case where
|
||||
we already checked and found at least one rule in this SoupStrainer
|
||||
that might exclude a PageElement. Without this, we need
|
||||
to check .includes_everything every time, just to be safe.
|
||||
:return: `True` if the element matches this `SoupStrainer`'s rules; `False` otherwise.
|
||||
"""
|
||||
# If there are no rules at all, let anything through.
|
||||
if not _known_rules and self.includes_everything:
|
||||
return True
|
||||
if isinstance(element, Tag):
|
||||
return self.matches_tag(element)
|
||||
assert isinstance(element, NavigableString)
|
||||
if not (self.name_rules or self.attribute_rules):
|
||||
# A NavigableString can only match a SoupStrainer that
|
||||
# does not define any name or attribute rules.
|
||||
# Then it comes down to the string rules.
|
||||
return self.matches_any_string_rule(element)
|
||||
return False
|
||||
|
||||
@_deprecated("allow_tag_creation", "4.13.0")
|
||||
def search_tag(self, name: str, attrs: Optional[_RawAttributeValues]) -> bool:
|
||||
"""A less elegant version of `allow_tag_creation`. Deprecated as of 4.13.0"""
|
||||
":meta private:"
|
||||
return self.allow_tag_creation(None, name, attrs)
|
||||
|
||||
@_deprecated("match", "4.13.0")
|
||||
def search(self, element: PageElement) -> Optional[PageElement]:
|
||||
"""A less elegant version of match(). Deprecated as of 4.13.0.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return element if self.match(element) else None
|
||||
276
gui/lib/python3.12/site-packages/bs4/formatter.py
Normal file
276
gui/lib/python3.12/site-packages/bs4/formatter.py
Normal file
@ -0,0 +1,276 @@
|
||||
from __future__ import annotations
|
||||
from typing import Callable, Dict, Iterable, Optional, Set, Tuple, TYPE_CHECKING, Union
|
||||
from typing_extensions import TypeAlias
|
||||
from bs4.dammit import EntitySubstitution
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from bs4._typing import _AttributeValue
|
||||
|
||||
|
||||
class Formatter(EntitySubstitution):
|
||||
"""Describes a strategy to use when outputting a parse tree to a string.
|
||||
|
||||
Some parts of this strategy come from the distinction between
|
||||
HTML4, HTML5, and XML. Others are configurable by the user.
|
||||
|
||||
Formatters are passed in as the `formatter` argument to methods
|
||||
like `bs4.element.Tag.encode`. Most people won't need to
|
||||
think about formatters, and most people who need to think about
|
||||
them can pass in one of these predefined strings as `formatter`
|
||||
rather than making a new Formatter object:
|
||||
|
||||
For HTML documents:
|
||||
* 'html' - HTML entity substitution for generic HTML documents. (default)
|
||||
* 'html5' - HTML entity substitution for HTML5 documents, as
|
||||
well as some optimizations in the way tags are rendered.
|
||||
* 'html5-4.12.0' - The version of the 'html5' formatter used prior to
|
||||
Beautiful Soup 4.13.0.
|
||||
* 'minimal' - Only make the substitutions necessary to guarantee
|
||||
valid HTML.
|
||||
* None - Do not perform any substitution. This will be faster
|
||||
but may result in invalid markup.
|
||||
|
||||
For XML documents:
|
||||
* 'html' - Entity substitution for XHTML documents.
|
||||
* 'minimal' - Only make the substitutions necessary to guarantee
|
||||
valid XML. (default)
|
||||
* None - Do not perform any substitution. This will be faster
|
||||
but may result in invalid markup.
|
||||
|
||||
"""
|
||||
|
||||
#: Constant name denoting HTML markup
|
||||
HTML: str = "html"
|
||||
|
||||
#: Constant name denoting XML markup
|
||||
XML: str = "xml"
|
||||
|
||||
#: Default values for the various constructor options when the
|
||||
#: markup language is HTML.
|
||||
HTML_DEFAULTS: Dict[str, Set[str]] = dict(
|
||||
cdata_containing_tags=set(["script", "style"]),
|
||||
)
|
||||
|
||||
language: Optional[str] #: :meta private:
|
||||
entity_substitution: Optional[_EntitySubstitutionFunction] #: :meta private:
|
||||
void_element_close_prefix: str #: :meta private:
|
||||
cdata_containing_tags: Set[str] #: :meta private:
|
||||
indent: str #: :meta private:
|
||||
|
||||
#: If this is set to true by the constructor, then attributes whose
|
||||
#: values are sent to the empty string will be treated as HTML
|
||||
#: boolean attributes. (Attributes whose value is None are always
|
||||
#: rendered this way.)
|
||||
empty_attributes_are_booleans: bool
|
||||
|
||||
def _default(
|
||||
self, language: str, value: Optional[Set[str]], kwarg: str
|
||||
) -> Set[str]:
|
||||
if value is not None:
|
||||
return value
|
||||
if language == self.XML:
|
||||
# When XML is the markup language in use, all of the
|
||||
# defaults are the empty list.
|
||||
return set()
|
||||
|
||||
# Otherwise, it depends on what's in HTML_DEFAULTS.
|
||||
return self.HTML_DEFAULTS[kwarg]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
language: Optional[str] = None,
|
||||
entity_substitution: Optional[_EntitySubstitutionFunction] = None,
|
||||
void_element_close_prefix: str = "/",
|
||||
cdata_containing_tags: Optional[Set[str]] = None,
|
||||
empty_attributes_are_booleans: bool = False,
|
||||
indent: Union[int,str] = 1,
|
||||
):
|
||||
r"""Constructor.
|
||||
|
||||
:param language: This should be `Formatter.XML` if you are formatting
|
||||
XML markup and `Formatter.HTML` if you are formatting HTML markup.
|
||||
|
||||
:param entity_substitution: A function to call to replace special
|
||||
characters with XML/HTML entities. For examples, see
|
||||
bs4.dammit.EntitySubstitution.substitute_html and substitute_xml.
|
||||
:param void_element_close_prefix: By default, void elements
|
||||
are represented as <tag/> (XML rules) rather than <tag>
|
||||
(HTML rules). To get <tag>, pass in the empty string.
|
||||
:param cdata_containing_tags: The set of tags that are defined
|
||||
as containing CDATA in this dialect. For example, in HTML,
|
||||
<script> and <style> tags are defined as containing CDATA,
|
||||
and their contents should not be formatted.
|
||||
:param empty_attributes_are_booleans: If this is set to true,
|
||||
then attributes whose values are sent to the empty string
|
||||
will be treated as `HTML boolean
|
||||
attributes<https://dev.w3.org/html5/spec-LC/common-microsyntaxes.html#boolean-attributes>`_. (Attributes
|
||||
whose value is None are always rendered this way.)
|
||||
:param indent: If indent is a non-negative integer or string,
|
||||
then the contents of elements will be indented
|
||||
appropriately when pretty-printing. An indent level of 0,
|
||||
negative, or "" will only insert newlines. Using a
|
||||
positive integer indent indents that many spaces per
|
||||
level. If indent is a string (such as "\t"), that string
|
||||
is used to indent each level. The default behavior is to
|
||||
indent one space per level.
|
||||
|
||||
"""
|
||||
self.language = language or self.HTML
|
||||
self.entity_substitution = entity_substitution
|
||||
self.void_element_close_prefix = void_element_close_prefix
|
||||
self.cdata_containing_tags = self._default(
|
||||
self.language, cdata_containing_tags, "cdata_containing_tags"
|
||||
)
|
||||
self.empty_attributes_are_booleans = empty_attributes_are_booleans
|
||||
if indent is None:
|
||||
indent = 0
|
||||
indent_str: str
|
||||
if isinstance(indent, int):
|
||||
if indent < 0:
|
||||
indent = 0
|
||||
indent_str = " " * indent
|
||||
elif isinstance(indent, str):
|
||||
indent_str = indent
|
||||
else:
|
||||
indent_str = " "
|
||||
self.indent = indent_str
|
||||
|
||||
def substitute(self, ns: str) -> str:
|
||||
"""Process a string that needs to undergo entity substitution.
|
||||
This may be a string encountered in an attribute value or as
|
||||
text.
|
||||
|
||||
:param ns: A string.
|
||||
:return: The same string but with certain characters replaced by named
|
||||
or numeric entities.
|
||||
"""
|
||||
if not self.entity_substitution:
|
||||
return ns
|
||||
from .element import NavigableString
|
||||
|
||||
if (
|
||||
isinstance(ns, NavigableString)
|
||||
and ns.parent is not None
|
||||
and ns.parent.name in self.cdata_containing_tags
|
||||
):
|
||||
# Do nothing.
|
||||
return ns
|
||||
# Substitute.
|
||||
return self.entity_substitution(ns)
|
||||
|
||||
def attribute_value(self, value: str) -> str:
|
||||
"""Process the value of an attribute.
|
||||
|
||||
:param ns: A string.
|
||||
:return: A string with certain characters replaced by named
|
||||
or numeric entities.
|
||||
"""
|
||||
return self.substitute(value)
|
||||
|
||||
def attributes(
|
||||
self, tag: bs4.element.Tag
|
||||
) -> Iterable[Tuple[str, Optional[_AttributeValue]]]:
|
||||
"""Reorder a tag's attributes however you want.
|
||||
|
||||
By default, attributes are sorted alphabetically. This makes
|
||||
behavior consistent between Python 2 and Python 3, and preserves
|
||||
backwards compatibility with older versions of Beautiful Soup.
|
||||
|
||||
If `empty_attributes_are_booleans` is True, then
|
||||
attributes whose values are set to the empty string will be
|
||||
treated as boolean attributes.
|
||||
"""
|
||||
if tag.attrs is None:
|
||||
return []
|
||||
|
||||
items: Iterable[Tuple[str, _AttributeValue]] = list(tag.attrs.items())
|
||||
return sorted(
|
||||
(k, (None if self.empty_attributes_are_booleans and v == "" else v))
|
||||
for k, v in items
|
||||
)
|
||||
|
||||
|
||||
class HTMLFormatter(Formatter):
|
||||
"""A generic Formatter for HTML."""
|
||||
|
||||
REGISTRY: Dict[Optional[str], HTMLFormatter] = {}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
entity_substitution: Optional[_EntitySubstitutionFunction] = None,
|
||||
void_element_close_prefix: str = "/",
|
||||
cdata_containing_tags: Optional[Set[str]] = None,
|
||||
empty_attributes_are_booleans: bool = False,
|
||||
indent: Union[int,str] = 1,
|
||||
):
|
||||
super(HTMLFormatter, self).__init__(
|
||||
self.HTML,
|
||||
entity_substitution,
|
||||
void_element_close_prefix,
|
||||
cdata_containing_tags,
|
||||
empty_attributes_are_booleans,
|
||||
indent=indent
|
||||
)
|
||||
|
||||
|
||||
class XMLFormatter(Formatter):
|
||||
"""A generic Formatter for XML."""
|
||||
|
||||
REGISTRY: Dict[Optional[str], XMLFormatter] = {}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
entity_substitution: Optional[_EntitySubstitutionFunction] = None,
|
||||
void_element_close_prefix: str = "/",
|
||||
cdata_containing_tags: Optional[Set[str]] = None,
|
||||
empty_attributes_are_booleans: bool = False,
|
||||
indent: Union[int,str] = 1,
|
||||
):
|
||||
super(XMLFormatter, self).__init__(
|
||||
self.XML,
|
||||
entity_substitution,
|
||||
void_element_close_prefix,
|
||||
cdata_containing_tags,
|
||||
empty_attributes_are_booleans,
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
# Set up aliases for the default formatters.
|
||||
HTMLFormatter.REGISTRY["html"] = HTMLFormatter(
|
||||
entity_substitution=EntitySubstitution.substitute_html
|
||||
)
|
||||
|
||||
HTMLFormatter.REGISTRY["html5"] = HTMLFormatter(
|
||||
entity_substitution=EntitySubstitution.substitute_html5,
|
||||
void_element_close_prefix="",
|
||||
empty_attributes_are_booleans=True,
|
||||
)
|
||||
HTMLFormatter.REGISTRY["html5-4.12"] = HTMLFormatter(
|
||||
entity_substitution=EntitySubstitution.substitute_html,
|
||||
void_element_close_prefix="",
|
||||
empty_attributes_are_booleans=True,
|
||||
)
|
||||
HTMLFormatter.REGISTRY["minimal"] = HTMLFormatter(
|
||||
entity_substitution=EntitySubstitution.substitute_xml
|
||||
)
|
||||
HTMLFormatter.REGISTRY[None] = HTMLFormatter(entity_substitution=None)
|
||||
XMLFormatter.REGISTRY["html"] = XMLFormatter(
|
||||
entity_substitution=EntitySubstitution.substitute_html
|
||||
)
|
||||
XMLFormatter.REGISTRY["minimal"] = XMLFormatter(
|
||||
entity_substitution=EntitySubstitution.substitute_xml
|
||||
)
|
||||
|
||||
XMLFormatter.REGISTRY[None] = XMLFormatter(entity_substitution=None)
|
||||
|
||||
# Define type aliases to improve readability.
|
||||
#
|
||||
|
||||
#: A function to call to replace special characters with XML or HTML
|
||||
#: entities.
|
||||
_EntitySubstitutionFunction: TypeAlias = Callable[[str], str]
|
||||
|
||||
# Many of the output-centered methods take an argument that can either
|
||||
# be a Formatter object or the name of a Formatter to be looked up.
|
||||
_FormatterOrName = Union[Formatter, str]
|
||||
0
gui/lib/python3.12/site-packages/bs4/py.typed
Normal file
0
gui/lib/python3.12/site-packages/bs4/py.typed
Normal file
1305
gui/lib/python3.12/site-packages/bs4/tests/__init__.py
Normal file
1305
gui/lib/python3.12/site-packages/bs4/tests/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1 @@
|
||||
<20><> <20> <css
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user