diff --git a/README.md b/README.md index b11dacc..09f3aac 100644 --- a/README.md +++ b/README.md @@ -289,4 +289,219 @@ config = HandleLabel( ) response = client.label.handle_label(instance_id, config, instance_token) -``` \ No newline at end of file +``` + +## WebSocket + +O cliente Evolution API suporta conexão via WebSocket para receber eventos em tempo real. Aqui está um guia de como usar: + +### Configuração Básica + +```python +from evolutionapi.services.websocket import WebSocketManager +import logging + +# Configuração do logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) + +# Configuração do WebSocket +websocket = WebSocketManager( + base_url="http://localhost:8081", # URL da sua Evolution API + instance_id="teste", # ID da sua instância + api_token="seu-token-aqui" # Token de autenticação +) +``` + +### Registrando Handlers de Eventos + +Você pode registrar handlers para diferentes tipos de eventos: + +```python +def handle_message(data): + print(f"Nova mensagem recebida: {data}") + +def handle_qrcode(data): + print(f"QR Code atualizado: {data}") + +# Registrando handlers +websocket.on("messages.upsert", handle_message) +websocket.on("qrcode.updated", handle_qrcode) +``` + +### Eventos Disponíveis + +Os eventos disponíveis são: + +#### Eventos de Instância +- `application.startup`: Disparado quando a aplicação inicia +- `instance.create`: Disparado quando uma nova instância é criada +- `instance.delete`: Disparado quando uma instância é deletada +- `remove.instance`: Disparado quando uma instância é removida +- `logout.instance`: Disparado quando uma instância faz logout + +#### Eventos de Conexão e QR Code +- `qrcode.updated`: Disparado quando o QR Code é atualizado +- `connection.update`: Disparado quando o status da conexão muda +- `status.instance`: Disparado quando o status da instância muda +- `creds.update`: Disparado quando as credenciais são atualizadas + +#### Eventos de Mensagens +- `messages.set`: Disparado quando mensagens são definidas +- `messages.upsert`: Disparado quando novas mensagens são recebidas +- `messages.edited`: Disparado quando mensagens são editadas +- `messages.update`: Disparado quando mensagens são atualizadas +- `messages.delete`: Disparado quando mensagens são deletadas +- `send.message`: Disparado quando uma mensagem é enviada +- `messaging-history.set`: Disparado quando o histórico de mensagens é definido + +#### Eventos de Contatos +- `contacts.set`: Disparado quando contatos são definidos +- `contacts.upsert`: Disparado quando novos contatos são adicionados +- `contacts.update`: Disparado quando contatos são atualizados + +#### Eventos de Chats +- `chats.set`: Disparado quando chats são definidos +- `chats.update`: Disparado quando chats são atualizados +- `chats.upsert`: Disparado quando novos chats são adicionados +- `chats.delete`: Disparado quando chats são deletados + +#### Eventos de Grupos +- `groups.upsert`: Disparado quando grupos são criados/atualizados +- `groups.update`: Disparado quando grupos são atualizados +- `group-participants.update`: Disparado quando participantes de um grupo são atualizados + +#### Eventos de Presença +- `presence.update`: Disparado quando o status de presença é atualizado + +#### Eventos de Chamadas +- `call`: Disparado quando há uma chamada + +#### Eventos de Typebot +- `typebot.start`: Disparado quando um typebot inicia +- `typebot.change-status`: Disparado quando o status do typebot muda + +#### Eventos de Labels +- `labels.edit`: Disparado quando labels são editados +- `labels.association`: Disparado quando labels são associados/desassociados + +### Exemplo de Uso com Eventos Específicos + +```python +def handle_messages(data): + logger.info(f"Nova mensagem: {data}") + +def handle_contacts(data): + logger.info(f"Contatos atualizados: {data}") + +def handle_groups(data): + logger.info(f"Grupos atualizados: {data}") + +def handle_presence(data): + logger.info(f"Status de presença: {data}") + +# Registrando handlers para diferentes eventos +websocket.on("messages.upsert", handle_messages) +websocket.on("contacts.upsert", handle_contacts) +websocket.on("groups.upsert", handle_groups) +websocket.on("presence.update", handle_presence) +``` + +### Exemplo Completo + +```python +from evolutionapi.services.websocket import WebSocketManager +import logging +import time + +# Configuração do logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + +def handle_message(data): + logger.info(f"Nova mensagem recebida: {data}") + +def handle_qrcode(data): + logger.info(f"QR Code atualizado: {data}") + +def handle_connection(data): + logger.info(f"Status da conexão: {data}") + +def main(): + # Inicializa o WebSocket + websocket = WebSocketManager( + base_url="http://localhost:8081", + instance_id="teste", + api_token="seu-token-aqui" + ) + + # Registra os handlers + websocket.on("messages.upsert", handle_message) + websocket.on("qrcode.updated", handle_qrcode) + websocket.on("connection.update", handle_connection) + + try: + # Conecta ao WebSocket + websocket.connect() + logger.info("Conectado ao WebSocket. Aguardando eventos...") + + # Mantém o programa rodando + while True: + time.sleep(1) + + except KeyboardInterrupt: + logger.info("Encerrando conexão...") + websocket.disconnect() + except Exception as e: + logger.error(f"Erro: {e}") + websocket.disconnect() + +if __name__ == "__main__": + main() + +### Recursos Adicionais + +#### Reconexão Automática + +O WebSocket Manager possui reconexão automática com backoff exponencial: + +```python +websocket = WebSocketManager( + base_url="http://localhost:8081", + instance_id="teste", + api_token="seu-token-aqui", + max_retries=5, # Número máximo de tentativas de reconexão + retry_delay=1.0 # Delay inicial entre tentativas em segundos +) +``` + +#### Logging + +O WebSocket Manager utiliza o sistema de logging do Python. Você pode ajustar o nível de log conforme necessário: + +```python +# Para ver mais detalhes +logging.getLogger("evolutionapi.services.websocket").setLevel(logging.DEBUG) +``` + +### Tratamento de Erros + +O WebSocket Manager possui tratamento de erros robusto: + +- Reconexão automática em caso de desconexão +- Logs detalhados de erros +- Tratamento de eventos inválidos +- Validação de dados recebidos + +### Dicas de Uso + +1. Sempre use try/except ao conectar ao WebSocket +2. Implemente handlers para todos os eventos que você precisa monitorar +3. Use logging para debug e monitoramento +4. Considere implementar um mecanismo de heartbeat se necessário +5. Mantenha o token de API seguro e não o exponha em logs \ No newline at end of file diff --git a/env/bin/wsdump b/env/bin/wsdump new file mode 100755 index 0000000..cffc8dc --- /dev/null +++ b/env/bin/wsdump @@ -0,0 +1,8 @@ +#!/home/davidson/Projects/evolution_client/python/env/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from websocket._wsdump import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/INSTALLER b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/LICENSE b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/LICENSE new file mode 100644 index 0000000..d1cc6f8 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/LICENSE @@ -0,0 +1,376 @@ +Mozilla Public License Version 2.0 +================================== + +Copyright 2009-2024 Joshua Bronson. All rights reserved. + + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/METADATA b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/METADATA new file mode 100644 index 0000000..5356d23 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/METADATA @@ -0,0 +1,260 @@ +Metadata-Version: 2.1 +Name: bidict +Version: 0.23.1 +Summary: The bidirectional mapping library for Python. +Author-email: Joshua Bronson +License: MPL 2.0 +Project-URL: Changelog, https://bidict.readthedocs.io/changelog.html +Project-URL: Documentation, https://bidict.readthedocs.io +Project-URL: Funding, https://bidict.readthedocs.io/#sponsoring +Project-URL: Repository, https://github.com/jab/bidict +Keywords: bidict,bimap,bidirectional,dict,dictionary,mapping,collections +Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +License-File: LICENSE + +.. role:: doc +.. (Forward declaration for the "doc" role that Sphinx defines for interop with renderers that + are often used to show this doc and that are unaware of Sphinx (GitHub.com, PyPI.org, etc.). + Use :doc: rather than :ref: here for better interop as well.) + + +bidict +====== + +*The bidirectional mapping library for Python.* + + +Status +------ + +.. image:: https://img.shields.io/pypi/v/bidict.svg + :target: https://pypi.org/project/bidict + :alt: Latest release + +.. image:: https://img.shields.io/readthedocs/bidict/main.svg + :target: https://bidict.readthedocs.io/en/main/ + :alt: Documentation + +.. image:: https://github.com/jab/bidict/actions/workflows/test.yml/badge.svg + :target: https://github.com/jab/bidict/actions/workflows/test.yml?query=branch%3Amain + :alt: GitHub Actions CI status + +.. image:: https://img.shields.io/pypi/l/bidict.svg + :target: https://raw.githubusercontent.com/jab/bidict/main/LICENSE + :alt: License + +.. image:: https://static.pepy.tech/badge/bidict + :target: https://pepy.tech/project/bidict + :alt: PyPI Downloads + +.. image:: https://img.shields.io/badge/GitHub-sponsor-ff69b4 + :target: https://github.com/sponsors/jab + :alt: Sponsor + + +Features +-------- + +- Mature: Depended on by + Google, Venmo, CERN, Baidu, Tencent, + and teams across the world since 2009 + +- Familiar, Pythonic APIs + that are carefully designed for + safety, simplicity, flexibility, and ergonomics + +- Lightweight, with no runtime dependencies + outside Python's standard library + +- Implemented in + concise, well-factored, fully type-hinted Python code + that is optimized for running efficiently + as well as for long-term maintenance and stability + (as well as `joy <#learning-from-bidict>`__) + +- Extensively `documented `__ + +- 100% test coverage + running continuously across all supported Python versions + (including property-based tests and benchmarks) + + +Installation +------------ + +``pip install bidict`` + + +Quick Start +----------- + +.. code:: python + + >>> from bidict import bidict + >>> element_by_symbol = bidict({'H': 'hydrogen'}) + >>> element_by_symbol['H'] + 'hydrogen' + >>> element_by_symbol.inverse['hydrogen'] + 'H' + + +For more usage documentation, +head to the :doc:`intro` [#fn-intro]_ +and proceed from there. + + +Enterprise Support +------------------ + +Enterprise-level support for bidict can be obtained via the +`Tidelift subscription `__ +or by `contacting me directly `__. + +I have a US-based LLC set up for invoicing, +and I have 15+ years of professional experience +delivering software and support to companies successfully. + +You can also sponsor my work through several platforms, including GitHub Sponsors. +See the `Sponsoring <#sponsoring>`__ section below for details, +including rationale and examples of companies +supporting the open source projects they depend on. + + +Voluntary Community Support +--------------------------- + +Please search through already-asked questions and answers +in `GitHub Discussions `__ +and the `issue tracker `__ +in case your question has already been addressed. + +Otherwise, please feel free to +`start a new discussion `__ +or `create a new issue `__ on GitHub +for voluntary community support. + + +Notice of Usage +--------------- + +If you use bidict, +and especially if your usage or your organization is significant in some way, +please let me know in any of the following ways: + +- `star bidict on GitHub `__ +- post in `GitHub Discussions `__ +- `email me `__ + + +Changelog +--------- + +For bidict release notes, see the :doc:`changelog`. [#fn-changelog]_ + + +Release Notifications +--------------------- + +.. duplicated in CHANGELOG.rst: + (Would use `.. include::` but GitHub's renderer doesn't support it.) + +Watch `bidict releases on GitHub `__ +to be notified when new versions of bidict are published. +Click the "Watch" dropdown, choose "Custom", and then choose "Releases". + + +Learning from bidict +-------------------- + +One of the best things about bidict +is that it touches a surprising number of +interesting Python corners, +especially given its small size and scope. + +Check out :doc:`learning-from-bidict` [#fn-learning]_ +if you're interested in learning more. + + +Contributing +------------ + +I have been bidict's sole maintainer +and `active contributor `__ +since I started the project ~15 years ago. + +Your help would be most welcome! +See the :doc:`contributors-guide` [#fn-contributing]_ +for more information. + + +Sponsoring +---------- + +.. duplicated in CONTRIBUTING.rst + (Would use `.. include::` but GitHub's renderer doesn't support it.) + +.. image:: https://img.shields.io/badge/GitHub-sponsor-ff69b4 + :target: https://github.com/sponsors/jab + :alt: Sponsor through GitHub + +Bidict is the product of thousands of hours of my unpaid work +over the 15+ years that I've been the sole maintainer. + +If bidict has helped you or your company accomplish your work, +please sponsor my work through one of the following, +and/or ask your company to do the same: + +- `GitHub `__ +- `PayPal `__ +- `Tidelift `__ +- `thanks.dev `__ +- `Gumroad `__ +- `a support engagement with my LLC <#enterprise-support>`__ + +If you're not sure which to use, GitHub is an easy option, +especially if you already have a GitHub account. +Just choose a monthly or one-time amount, and GitHub handles everything else. +Your bidict sponsorship on GitHub will automatically go +on the same regular bill as any other GitHub charges you pay for. +PayPal is another easy option for one-time contributions. + +See the following for rationale and examples of companies +supporting the open source projects they depend on +in this manner: + +- ``__ +- ``__ +- ``__ + +.. - ``__ +.. - ``__ +.. - ``__ + + +Finding Documentation +--------------------- + +If you're viewing this on ``__, +note that multiple versions of the documentation are available, +and you can choose a different version using the popup menu at the bottom-right. +Please make sure you're viewing the version of the documentation +that corresponds to the version of bidict you'd like to use. + +If you're viewing this on GitHub, PyPI, or some other place +that can't render and link this documentation properly +and are seeing broken links, +try these alternate links instead: + +.. [#fn-intro] ``__ | ``__ + +.. [#fn-changelog] ``__ | ``__ + +.. [#fn-learning] ``__ | ``__ + +.. [#fn-contributing] ``__ | ``__ diff --git a/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/RECORD b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/RECORD new file mode 100644 index 0000000..d826b27 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/RECORD @@ -0,0 +1,31 @@ +bidict-0.23.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +bidict-0.23.1.dist-info/LICENSE,sha256=8_U63OyqSNc6ZuI4-lupBstBh2eDtF0ooTRrMULuvZo,16784 +bidict-0.23.1.dist-info/METADATA,sha256=2ovIRm6Df8gdwAMekGqkeBSF5TWj2mv1jpmh4W4ks7o,8704 +bidict-0.23.1.dist-info/RECORD,, +bidict-0.23.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +bidict-0.23.1.dist-info/top_level.txt,sha256=WuQO02jp0ODioS7sJoaHg3JJ5_3h6Sxo9RITvNGPYmc,7 +bidict/__init__.py,sha256=pL87KsrDpBsl3AG09LQk1t1TSFt0hIJVYa2POMdErN8,4398 +bidict/__pycache__/__init__.cpython-310.pyc,, +bidict/__pycache__/_abc.cpython-310.pyc,, +bidict/__pycache__/_base.cpython-310.pyc,, +bidict/__pycache__/_bidict.cpython-310.pyc,, +bidict/__pycache__/_dup.cpython-310.pyc,, +bidict/__pycache__/_exc.cpython-310.pyc,, +bidict/__pycache__/_frozen.cpython-310.pyc,, +bidict/__pycache__/_iter.cpython-310.pyc,, +bidict/__pycache__/_orderedbase.cpython-310.pyc,, +bidict/__pycache__/_orderedbidict.cpython-310.pyc,, +bidict/__pycache__/_typing.cpython-310.pyc,, +bidict/__pycache__/metadata.cpython-310.pyc,, +bidict/_abc.py,sha256=SMCNdCsmqSWg0OGnMZtnnXY8edjXcyZup5tva4HBm_c,3172 +bidict/_base.py,sha256=YiauA0aj52fNB6cfZ4gBt6OV-CRQoZm7WVhuw1nT-Cg,24439 +bidict/_bidict.py,sha256=Sr-RoEzWOaxpnDRbDJ7ngaGRIsyGnqZgzvR-NyT4jl4,6923 +bidict/_dup.py,sha256=YAn5gWA6lwMBA5A6ebVF19UTZyambGS8WxmbK4TN1Ww,2079 +bidict/_exc.py,sha256=HnD_WgteI5PrXa3zBx9RUiGlgnZTO6CF4nIU9p3-njk,1066 +bidict/_frozen.py,sha256=p4TaRHKeyTs0KmlpwSnZiTlN_CR4J97kAgBpNdZHQMs,1771 +bidict/_iter.py,sha256=zVUx-hJ1M4YuJROoFWRjPKlcaFnyo1AAuRpOaKAFhOQ,1530 +bidict/_orderedbase.py,sha256=M7v5rHa7vrym9Z3DxQBFQDxjnrr39Z8p26V0c1PggoE,8942 +bidict/_orderedbidict.py,sha256=pPnmC19mIISrj8_yjnb-4r_ti1B74tD5eTd08DETNuI,7080 +bidict/_typing.py,sha256=AylMZpBhEFTQegfziPSxfKkKLk7oUsH6o3awDIg2z_k,1289 +bidict/metadata.py,sha256=BMIKu6fBY_OKeV_q48EpumE7MdmFw8rFcdaUz8kcIYk,573 +bidict/py.typed,sha256=RJao5SVFYIp8IfbxhL_SpZkBQYe3XXzPlobSRdh4B_c,16 diff --git a/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/WHEEL b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/WHEEL new file mode 100644 index 0000000..98c0d20 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/top_level.txt b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/top_level.txt new file mode 100644 index 0000000..6ff5b04 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict-0.23.1.dist-info/top_level.txt @@ -0,0 +1 @@ +bidict diff --git a/env/lib/python3.10/site-packages/bidict/__init__.py b/env/lib/python3.10/site-packages/bidict/__init__.py new file mode 100644 index 0000000..07e5ba5 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/__init__.py @@ -0,0 +1,103 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +# ============================================================================ +# * Welcome to the bidict source code * +# ============================================================================ + +# Reading through the code? You'll find a "Code review nav" comment like the one +# below at the top and bottom of the key source files. Follow these cues to take +# a path through the code that's optimized for familiarizing yourself with it. +# +# If you're not reading this on https://github.com/jab/bidict already, go there +# to ensure you have the latest version of the code. While there, you can also +# star the project, watch it for updates, fork the code, and submit an issue or +# pull request with any proposed changes. More information can be found linked +# from README.rst, which is also shown on https://github.com/jab/bidict. + +# * Code review nav * +# ============================================================================ +# Current: __init__.py Next: _abc.py → +# ============================================================================ + + +"""The bidirectional mapping library for Python. + +---- + +bidict by example: + +.. code-block:: python + + >>> from bidict import bidict + >>> element_by_symbol = bidict({'H': 'hydrogen'}) + >>> element_by_symbol['H'] + 'hydrogen' + >>> element_by_symbol.inverse['hydrogen'] + 'H' + + +Please see https://github.com/jab/bidict for the most up-to-date code and +https://bidict.readthedocs.io for the most up-to-date documentation +if you are reading this elsewhere. + +---- + +.. :copyright: (c) 2009-2024 Joshua Bronson. +.. :license: MPLv2. See LICENSE for details. +""" + +# Use private aliases to not re-export these publicly (for Sphinx automodule with imported-members). +from __future__ import annotations as _annotations + +from contextlib import suppress as _suppress + +from ._abc import BidirectionalMapping as BidirectionalMapping +from ._abc import MutableBidirectionalMapping as MutableBidirectionalMapping +from ._base import BidictBase as BidictBase +from ._base import BidictKeysView as BidictKeysView +from ._base import GeneratedBidictInverse as GeneratedBidictInverse +from ._bidict import MutableBidict as MutableBidict +from ._bidict import bidict as bidict +from ._dup import DROP_NEW as DROP_NEW +from ._dup import DROP_OLD as DROP_OLD +from ._dup import ON_DUP_DEFAULT as ON_DUP_DEFAULT +from ._dup import ON_DUP_DROP_OLD as ON_DUP_DROP_OLD +from ._dup import ON_DUP_RAISE as ON_DUP_RAISE +from ._dup import RAISE as RAISE +from ._dup import OnDup as OnDup +from ._dup import OnDupAction as OnDupAction +from ._exc import BidictException as BidictException +from ._exc import DuplicationError as DuplicationError +from ._exc import KeyAndValueDuplicationError as KeyAndValueDuplicationError +from ._exc import KeyDuplicationError as KeyDuplicationError +from ._exc import ValueDuplicationError as ValueDuplicationError +from ._frozen import frozenbidict as frozenbidict +from ._iter import inverted as inverted +from ._orderedbase import OrderedBidictBase as OrderedBidictBase +from ._orderedbidict import OrderedBidict as OrderedBidict +from .metadata import __author__ as __author__ +from .metadata import __copyright__ as __copyright__ +from .metadata import __description__ as __description__ +from .metadata import __license__ as __license__ +from .metadata import __url__ as __url__ +from .metadata import __version__ as __version__ + + +# Set __module__ of re-exported classes to the 'bidict' top-level module, so that e.g. +# 'bidict.bidict' shows up as 'bidict.bidict` rather than 'bidict._bidict.bidict'. +for _obj in tuple(locals().values()): # pragma: no cover + if not getattr(_obj, '__module__', '').startswith('bidict.'): + continue + with _suppress(AttributeError): + _obj.__module__ = 'bidict' + + +# * Code review nav * +# ============================================================================ +# Current: __init__.py Next: _abc.py → +# ============================================================================ diff --git a/env/lib/python3.10/site-packages/bidict/_abc.py b/env/lib/python3.10/site-packages/bidict/_abc.py new file mode 100644 index 0000000..d4a30aa --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/_abc.py @@ -0,0 +1,79 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +# * Code review nav * +# (see comments in __init__.py) +# ============================================================================ +# ← Prev: __init__.py Current: _abc.py Next: _base.py → +# ============================================================================ + + +"""Provide the :class:`BidirectionalMapping` abstract base class.""" + +from __future__ import annotations + +import typing as t +from abc import abstractmethod + +from ._typing import KT +from ._typing import VT + + +class BidirectionalMapping(t.Mapping[KT, VT]): + """Abstract base class for bidirectional mapping types. + + Extends :class:`collections.abc.Mapping` primarily by adding the + (abstract) :attr:`inverse` property, + which implementers of :class:`BidirectionalMapping` + should override to return a reference to the inverse + :class:`BidirectionalMapping` instance. + """ + + __slots__ = () + + @property + @abstractmethod + def inverse(self) -> BidirectionalMapping[VT, KT]: + """The inverse of this bidirectional mapping instance. + + *See also* :attr:`bidict.BidictBase.inverse`, :attr:`bidict.BidictBase.inv` + + :raises NotImplementedError: Meant to be overridden in subclasses. + """ + # The @abstractmethod decorator prevents subclasses from being instantiated unless they + # override this method. But an overriding implementation may merely return super().inverse, + # in which case this implementation is used. Raise NotImplementedError to indicate that + # subclasses must actually provide their own implementation. + raise NotImplementedError + + def __inverted__(self) -> t.Iterator[tuple[VT, KT]]: + """Get an iterator over the items in :attr:`inverse`. + + This is functionally equivalent to iterating over the items in the + forward mapping and inverting each one on the fly, but this provides a + more efficient implementation: Assuming the already-inverted items + are stored in :attr:`inverse`, just return an iterator over them directly. + + Providing this default implementation enables external functions, + particularly :func:`~bidict.inverted`, to use this optimized + implementation when available, instead of having to invert on the fly. + + *See also* :func:`bidict.inverted` + """ + return iter(self.inverse.items()) + + +class MutableBidirectionalMapping(BidirectionalMapping[KT, VT], t.MutableMapping[KT, VT]): + """Abstract base class for mutable bidirectional mapping types.""" + + __slots__ = () + + +# * Code review nav * +# ============================================================================ +# ← Prev: __init__.py Current: _abc.py Next: _base.py → +# ============================================================================ diff --git a/env/lib/python3.10/site-packages/bidict/_base.py b/env/lib/python3.10/site-packages/bidict/_base.py new file mode 100644 index 0000000..848a376 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/_base.py @@ -0,0 +1,556 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +# * Code review nav * +# (see comments in __init__.py) +# ============================================================================ +# ← Prev: _abc.py Current: _base.py Next: _frozen.py → +# ============================================================================ + + +"""Provide :class:`BidictBase`.""" + +from __future__ import annotations + +import typing as t +import weakref +from itertools import starmap +from operator import eq +from types import MappingProxyType + +from ._abc import BidirectionalMapping +from ._dup import DROP_NEW +from ._dup import DROP_OLD +from ._dup import ON_DUP_DEFAULT +from ._dup import RAISE +from ._dup import OnDup +from ._exc import DuplicationError +from ._exc import KeyAndValueDuplicationError +from ._exc import KeyDuplicationError +from ._exc import ValueDuplicationError +from ._iter import inverted +from ._iter import iteritems +from ._typing import KT +from ._typing import MISSING +from ._typing import OKT +from ._typing import OVT +from ._typing import VT +from ._typing import Maplike +from ._typing import MapOrItems + + +OldKV = t.Tuple[OKT[KT], OVT[VT]] +DedupResult = t.Optional[OldKV[KT, VT]] +Unwrites = t.List[t.Tuple[t.Any, ...]] +BT = t.TypeVar('BT', bound='BidictBase[t.Any, t.Any]') + + +class BidictKeysView(t.KeysView[KT], t.ValuesView[KT]): + """Since the keys of a bidict are the values of its inverse (and vice versa), + the :class:`~collections.abc.ValuesView` result of calling *bi.values()* + is also a :class:`~collections.abc.KeysView` of *bi.inverse*. + """ + + +class BidictBase(BidirectionalMapping[KT, VT]): + """Base class implementing :class:`BidirectionalMapping`.""" + + #: The default :class:`~bidict.OnDup` + #: that governs behavior when a provided item + #: duplicates the key or value of other item(s). + #: + #: *See also* + #: :ref:`basic-usage:Values Must Be Unique` (https://bidict.rtfd.io/basic-usage.html#values-must-be-unique), + #: :doc:`extending` (https://bidict.rtfd.io/extending.html) + on_dup = ON_DUP_DEFAULT + + _fwdm: t.MutableMapping[KT, VT] #: the backing forward mapping (*key* → *val*) + _invm: t.MutableMapping[VT, KT] #: the backing inverse mapping (*val* → *key*) + + # Use Any rather than KT/VT in the following to avoid "ClassVar cannot contain type variables" errors: + _fwdm_cls: t.ClassVar[type[t.MutableMapping[t.Any, t.Any]]] = dict #: class of the backing forward mapping + _invm_cls: t.ClassVar[type[t.MutableMapping[t.Any, t.Any]]] = dict #: class of the backing inverse mapping + + #: The class of the inverse bidict instance. + _inv_cls: t.ClassVar[type[BidictBase[t.Any, t.Any]]] + + def __init_subclass__(cls) -> None: + super().__init_subclass__() + cls._init_class() + + @classmethod + def _init_class(cls) -> None: + cls._ensure_inv_cls() + cls._set_reversed() + + __reversed__: t.ClassVar[t.Any] + + @classmethod + def _set_reversed(cls) -> None: + """Set __reversed__ for subclasses that do not set it explicitly + according to whether backing mappings are reversible. + """ + if cls is not BidictBase: + resolved = cls.__reversed__ + overridden = resolved is not BidictBase.__reversed__ + if overridden: # E.g. OrderedBidictBase, OrderedBidict + return + backing_reversible = all(issubclass(i, t.Reversible) for i in (cls._fwdm_cls, cls._invm_cls)) + cls.__reversed__ = _fwdm_reversed if backing_reversible else None + + @classmethod + def _ensure_inv_cls(cls) -> None: + """Ensure :attr:`_inv_cls` is set, computing it dynamically if necessary. + + All subclasses provided in :mod:`bidict` are their own inverse classes, + i.e., their backing forward and inverse mappings are both the same type, + but users may define subclasses where this is not the case. + This method ensures that the inverse class is computed correctly regardless. + + See: :ref:`extending:Dynamic Inverse Class Generation` + (https://bidict.rtfd.io/extending.html#dynamic-inverse-class-generation) + """ + # This _ensure_inv_cls() method is (indirectly) corecursive with _make_inv_cls() below + # in the case that we need to dynamically generate the inverse class: + # 1. _ensure_inv_cls() calls cls._make_inv_cls() + # 2. cls._make_inv_cls() calls type(..., (cls, ...), ...) to dynamically generate inv_cls + # 3. Our __init_subclass__ hook (see above) is automatically called on inv_cls + # 4. inv_cls.__init_subclass__() calls inv_cls._ensure_inv_cls() + # 5. inv_cls._ensure_inv_cls() resolves to this implementation + # (inv_cls deliberately does not override this), so we're back where we started. + # But since the _make_inv_cls() call will have set inv_cls.__dict__._inv_cls, + # just check if it's already set before calling _make_inv_cls() to prevent infinite recursion. + if getattr(cls, '__dict__', {}).get('_inv_cls'): # Don't assume cls.__dict__ (e.g. mypyc native class) + return + cls._inv_cls = cls._make_inv_cls() + + @classmethod + def _make_inv_cls(cls: type[BT]) -> type[BT]: + diff = cls._inv_cls_dict_diff() + cls_is_own_inv = all(getattr(cls, k, MISSING) == v for (k, v) in diff.items()) + if cls_is_own_inv: + return cls + # Suppress auto-calculation of _inv_cls's _inv_cls since we know it already. + # Works with the guard in BidictBase._ensure_inv_cls() to prevent infinite recursion. + diff['_inv_cls'] = cls + inv_cls = type(f'{cls.__name__}Inv', (cls, GeneratedBidictInverse), diff) + inv_cls.__module__ = cls.__module__ + return t.cast(t.Type[BT], inv_cls) + + @classmethod + def _inv_cls_dict_diff(cls) -> dict[str, t.Any]: + return { + '_fwdm_cls': cls._invm_cls, + '_invm_cls': cls._fwdm_cls, + } + + def __init__(self, arg: MapOrItems[KT, VT] = (), /, **kw: VT) -> None: + """Make a new bidirectional mapping. + The signature behaves like that of :class:`dict`. + ktems passed via positional arg are processed first, + followed by any items passed via keyword argument. + Any duplication encountered along the way + is handled as per :attr:`on_dup`. + """ + self._fwdm = self._fwdm_cls() + self._invm = self._invm_cls() + self._update(arg, kw, rollback=False) + + # If Python ever adds support for higher-kinded types, `inverse` could use them, e.g. + # def inverse(self: BT[KT, VT]) -> BT[VT, KT]: + # Ref: https://github.com/python/typing/issues/548#issuecomment-621571821 + @property + def inverse(self) -> BidictBase[VT, KT]: + """The inverse of this bidirectional mapping instance.""" + # When `bi.inverse` is called for the first time, this method + # computes the inverse instance, stores it for subsequent use, and then + # returns it. It also stores a reference on `bi.inverse` back to `bi`, + # but uses a weakref to avoid creating a reference cycle. Strong references + # to inverse instances are stored in ._inv, and weak references are stored + # in ._invweak. + + # First check if a strong reference is already stored. + inv: BidictBase[VT, KT] | None = getattr(self, '_inv', None) + if inv is not None: + return inv + # Next check if a weak reference is already stored. + invweak = getattr(self, '_invweak', None) + if invweak is not None: + inv = invweak() # Try to resolve a strong reference and return it. + if inv is not None: + return inv + # No luck. Compute the inverse reference and store it for subsequent use. + inv = self._make_inverse() + self._inv: BidictBase[VT, KT] | None = inv + self._invweak: weakref.ReferenceType[BidictBase[VT, KT]] | None = None + # Also store a weak reference back to `instance` on its inverse instance, so that + # the second `.inverse` access in `bi.inverse.inverse` hits the cached weakref. + inv._inv = None + inv._invweak = weakref.ref(self) + # In e.g. `bidict().inverse.inverse`, this design ensures that a strong reference + # back to the original instance is retained before its refcount drops to zero, + # avoiding an unintended potential deallocation. + return inv + + def _make_inverse(self) -> BidictBase[VT, KT]: + inv: BidictBase[VT, KT] = self._inv_cls() + inv._fwdm = self._invm + inv._invm = self._fwdm + return inv + + @property + def inv(self) -> BidictBase[VT, KT]: + """Alias for :attr:`inverse`.""" + return self.inverse + + def __repr__(self) -> str: + """See :func:`repr`.""" + clsname = self.__class__.__name__ + items = dict(self.items()) if self else '' + return f'{clsname}({items})' + + def values(self) -> BidictKeysView[VT]: + """A set-like object providing a view on the contained values. + + Since the values of a bidict are equivalent to the keys of its inverse, + this method returns a set-like object for this bidict's values + rather than just a collections.abc.ValuesView. + This object supports set operations like union and difference, + and constant- rather than linear-time containment checks, + and is no more expensive to provide than the less capable + collections.abc.ValuesView would be. + + See :meth:`keys` for more information. + """ + return t.cast(BidictKeysView[VT], self.inverse.keys()) + + def keys(self) -> t.KeysView[KT]: + """A set-like object providing a view on the contained keys. + + When *b._fwdm* is a :class:`dict`, *b.keys()* returns a + *dict_keys* object that behaves exactly the same as + *collections.abc.KeysView(b)*, except for + + - offering better performance + + - being reversible on Python 3.8+ + + - having a .mapping attribute in Python 3.10+ + that exposes a mappingproxy to *b._fwdm*. + """ + fwdm, fwdm_cls = self._fwdm, self._fwdm_cls + return fwdm.keys() if fwdm_cls is dict else BidictKeysView(self) + + def items(self) -> t.ItemsView[KT, VT]: + """A set-like object providing a view on the contained items. + + When *b._fwdm* is a :class:`dict`, *b.items()* returns a + *dict_items* object that behaves exactly the same as + *collections.abc.ItemsView(b)*, except for: + + - offering better performance + + - being reversible on Python 3.8+ + + - having a .mapping attribute in Python 3.10+ + that exposes a mappingproxy to *b._fwdm*. + """ + return self._fwdm.items() if self._fwdm_cls is dict else super().items() + + # The inherited collections.abc.Mapping.__contains__() method is implemented by doing a `try` + # `except KeyError` around `self[key]`. The following implementation is much faster, + # especially in the missing case. + def __contains__(self, key: t.Any) -> bool: + """True if the mapping contains the specified key, else False.""" + return key in self._fwdm + + # The inherited collections.abc.Mapping.__eq__() method is implemented in terms of an inefficient + # `dict(self.items()) == dict(other.items())` comparison, so override it with a + # more efficient implementation. + def __eq__(self, other: object) -> bool: + """*x.__eq__(other) ⟺ x == other* + + Equivalent to *dict(x.items()) == dict(other.items())* + but more efficient. + + Note that :meth:`bidict's __eq__() ` implementation + is inherited by subclasses, + in particular by the ordered bidict subclasses, + so even with ordered bidicts, + :ref:`== comparison is order-insensitive ` + (https://bidict.rtfd.io/other-bidict-types.html#eq-is-order-insensitive). + + *See also* :meth:`equals_order_sensitive` + """ + if isinstance(other, t.Mapping): + return self._fwdm.items() == other.items() + # Ref: https://docs.python.org/3/library/constants.html#NotImplemented + return NotImplemented + + def equals_order_sensitive(self, other: object) -> bool: + """Order-sensitive equality check. + + *See also* :ref:`eq-order-insensitive` + (https://bidict.rtfd.io/other-bidict-types.html#eq-is-order-insensitive) + """ + if not isinstance(other, t.Mapping) or len(self) != len(other): + return False + return all(starmap(eq, zip(self.items(), other.items()))) + + def _dedup(self, key: KT, val: VT, on_dup: OnDup) -> DedupResult[KT, VT]: + """Check *key* and *val* for any duplication in self. + + Handle any duplication as per the passed in *on_dup*. + + If (key, val) is already present, return None + since writing (key, val) would be a no-op. + + If duplication is found and the corresponding :class:`~bidict.OnDupAction` is + :attr:`~bidict.DROP_NEW`, return None. + + If duplication is found and the corresponding :class:`~bidict.OnDupAction` is + :attr:`~bidict.RAISE`, raise the appropriate exception. + + If duplication is found and the corresponding :class:`~bidict.OnDupAction` is + :attr:`~bidict.DROP_OLD`, or if no duplication is found, + return *(oldkey, oldval)*. + """ + fwdm, invm = self._fwdm, self._invm + oldval: OVT[VT] = fwdm.get(key, MISSING) + oldkey: OKT[KT] = invm.get(val, MISSING) + isdupkey, isdupval = oldval is not MISSING, oldkey is not MISSING + if isdupkey and isdupval: + if key == oldkey: + assert val == oldval + # (key, val) duplicates an existing item -> no-op. + return None + # key and val each duplicate a different existing item. + if on_dup.val is RAISE: + raise KeyAndValueDuplicationError(key, val) + if on_dup.val is DROP_NEW: + return None + assert on_dup.val is DROP_OLD + # Fall through to the return statement on the last line. + elif isdupkey: + if on_dup.key is RAISE: + raise KeyDuplicationError(key) + if on_dup.key is DROP_NEW: + return None + assert on_dup.key is DROP_OLD + # Fall through to the return statement on the last line. + elif isdupval: + if on_dup.val is RAISE: + raise ValueDuplicationError(val) + if on_dup.val is DROP_NEW: + return None + assert on_dup.val is DROP_OLD + # Fall through to the return statement on the last line. + # else neither isdupkey nor isdupval. + return oldkey, oldval + + def _write(self, newkey: KT, newval: VT, oldkey: OKT[KT], oldval: OVT[VT], unwrites: Unwrites | None) -> None: + """Insert (newkey, newval), extending *unwrites* with associated inverse operations if provided. + + *oldkey* and *oldval* are as returned by :meth:`_dedup`. + + If *unwrites* is not None, it is extended with the inverse operations necessary to undo the write. + This design allows :meth:`_update` to roll back a partially applied update that fails part-way through + when necessary. + + This design also allows subclasses that require additional operations to easily extend this implementation. + For example, :class:`bidict.OrderedBidictBase` calls this inherited implementation, and then extends *unwrites* + with additional operations needed to keep its internal linked list nodes consistent with its items' order + as changes are made. + """ + fwdm, invm = self._fwdm, self._invm + fwdm_set, invm_set = fwdm.__setitem__, invm.__setitem__ + fwdm_del, invm_del = fwdm.__delitem__, invm.__delitem__ + # Always perform the following writes regardless of duplication. + fwdm_set(newkey, newval) + invm_set(newval, newkey) + if oldval is MISSING and oldkey is MISSING: # no key or value duplication + # {0: 1, 2: 3} | {4: 5} => {0: 1, 2: 3, 4: 5} + if unwrites is not None: + unwrites.extend(( + (fwdm_del, newkey), + (invm_del, newval), + )) + elif oldval is not MISSING and oldkey is not MISSING: # key and value duplication across two different items + # {0: 1, 2: 3} | {0: 3} => {0: 3} + fwdm_del(oldkey) + invm_del(oldval) + if unwrites is not None: + unwrites.extend(( + (fwdm_set, newkey, oldval), + (invm_set, oldval, newkey), + (fwdm_set, oldkey, newval), + (invm_set, newval, oldkey), + )) + elif oldval is not MISSING: # just key duplication + # {0: 1, 2: 3} | {2: 4} => {0: 1, 2: 4} + invm_del(oldval) + if unwrites is not None: + unwrites.extend(( + (fwdm_set, newkey, oldval), + (invm_set, oldval, newkey), + (invm_del, newval), + )) + else: + assert oldkey is not MISSING # just value duplication + # {0: 1, 2: 3} | {4: 3} => {0: 1, 4: 3} + fwdm_del(oldkey) + if unwrites is not None: + unwrites.extend(( + (fwdm_set, oldkey, newval), + (invm_set, newval, oldkey), + (fwdm_del, newkey), + )) + + def _update( + self, + arg: MapOrItems[KT, VT], + kw: t.Mapping[str, VT] = MappingProxyType({}), + *, + rollback: bool | None = None, + on_dup: OnDup | None = None, + ) -> None: + """Update with the items from *arg* and *kw*, maybe failing and rolling back as per *on_dup* and *rollback*.""" + # Note: We must process input in a single pass, since arg may be a generator. + if not isinstance(arg, (t.Iterable, Maplike)): + raise TypeError(f"'{arg.__class__.__name__}' object is not iterable") + if not arg and not kw: + return + if on_dup is None: + on_dup = self.on_dup + if rollback is None: + rollback = RAISE in on_dup + + # Fast path when we're empty and updating only from another bidict (i.e. no dup vals in new items). + if not self and not kw and isinstance(arg, BidictBase): + self._init_from(arg) + return + + # Fast path when we're adding more items than we contain already and rollback is enabled: + # Update a copy of self with rollback disabled. Fail if that fails, otherwise become the copy. + if rollback and isinstance(arg, t.Sized) and len(arg) + len(kw) > len(self): + tmp = self.copy() + tmp._update(arg, kw, rollback=False, on_dup=on_dup) + self._init_from(tmp) + return + + # In all other cases, benchmarking has indicated that the update is best implemented as follows: + # For each new item, perform a dup check (raising if necessary), and apply the associated writes we need to + # perform on our backing _fwdm and _invm mappings. If rollback is enabled, also compute the associated unwrites + # as we go. If the update results in a DuplicationError and rollback is enabled, apply the accumulated unwrites + # before raising, to ensure that we fail clean. + write = self._write + unwrites: Unwrites | None = [] if rollback else None + for key, val in iteritems(arg, **kw): + try: + dedup_result = self._dedup(key, val, on_dup) + except DuplicationError: + if unwrites is not None: + for fn, *args in reversed(unwrites): + fn(*args) + raise + if dedup_result is not None: + write(key, val, *dedup_result, unwrites=unwrites) + + def __copy__(self: BT) -> BT: + """Used for the copy protocol. See the :mod:`copy` module.""" + return self.copy() + + def copy(self: BT) -> BT: + """Make a (shallow) copy of this bidict.""" + # Could just `return self.__class__(self)` here, but the below is faster. The former + # would copy this bidict's items into a new instance one at a time (checking for duplication + # for each item), whereas the below copies from the backing mappings all at once, and foregoes + # item-by-item duplication checking since the backing mappings have been checked already. + return self._from_other(self.__class__, self) + + @staticmethod + def _from_other(bt: type[BT], other: MapOrItems[KT, VT], inv: bool = False) -> BT: + """Fast, private constructor based on :meth:`_init_from`. + + If *inv* is true, return the inverse of the instance instead of the instance itself. + (Useful for pickling with dynamically-generated inverse classes -- see :meth:`__reduce__`.) + """ + inst = bt() + inst._init_from(other) + return t.cast(BT, inst.inverse) if inv else inst + + def _init_from(self, other: MapOrItems[KT, VT]) -> None: + """Fast init from *other*, bypassing item-by-item duplication checking.""" + self._fwdm.clear() + self._invm.clear() + self._fwdm.update(other) + # If other is a bidict, use its existing backing inverse mapping, otherwise + # other could be a generator that's now exhausted, so invert self._fwdm on the fly. + inv = other.inverse if isinstance(other, BidictBase) else inverted(self._fwdm) + self._invm.update(inv) + + # other's type is Mapping rather than Maplike since bidict() | SupportsKeysAndGetItem({}) + # raises a TypeError, just like dict() | SupportsKeysAndGetItem({}) does. + def __or__(self: BT, other: t.Mapping[KT, VT]) -> BT: + """Return self|other.""" + if not isinstance(other, t.Mapping): + return NotImplemented + new = self.copy() + new._update(other, rollback=False) + return new + + def __ror__(self: BT, other: t.Mapping[KT, VT]) -> BT: + """Return other|self.""" + if not isinstance(other, t.Mapping): + return NotImplemented + new = self.__class__(other) + new._update(self, rollback=False) + return new + + def __len__(self) -> int: + """The number of contained items.""" + return len(self._fwdm) + + def __iter__(self) -> t.Iterator[KT]: + """Iterator over the contained keys.""" + return iter(self._fwdm) + + def __getitem__(self, key: KT) -> VT: + """*x.__getitem__(key) ⟺ x[key]*""" + return self._fwdm[key] + + def __reduce__(self) -> tuple[t.Any, ...]: + """Return state information for pickling.""" + cls = self.__class__ + inst: t.Mapping[t.Any, t.Any] = self + # If this bidict's class is dynamically generated, pickle the inverse instead, whose (presumably not + # dynamically generated) class the caller is more likely to have a reference to somewhere in sys.modules + # that pickle can discover. + if should_invert := isinstance(self, GeneratedBidictInverse): + cls = self._inv_cls + inst = self.inverse + return self._from_other, (cls, dict(inst), should_invert) + + +# See BidictBase._set_reversed() above. +def _fwdm_reversed(self: BidictBase[KT, t.Any]) -> t.Iterator[KT]: + """Iterator over the contained keys in reverse order.""" + assert isinstance(self._fwdm, t.Reversible) + return reversed(self._fwdm) + + +BidictBase._init_class() + + +class GeneratedBidictInverse: + """Base class for dynamically-generated inverse bidict classes.""" + + +# * Code review nav * +# ============================================================================ +# ← Prev: _abc.py Current: _base.py Next: _frozen.py → +# ============================================================================ diff --git a/env/lib/python3.10/site-packages/bidict/_bidict.py b/env/lib/python3.10/site-packages/bidict/_bidict.py new file mode 100644 index 0000000..94dd3db --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/_bidict.py @@ -0,0 +1,194 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +# * Code review nav * +# (see comments in __init__.py) +# ============================================================================ +# ← Prev: _frozen.py Current: _bidict.py Next: _orderedbase.py → +# ============================================================================ + + +"""Provide :class:`MutableBidict` and :class:`bidict`.""" + +from __future__ import annotations + +import typing as t + +from ._abc import MutableBidirectionalMapping +from ._base import BidictBase +from ._dup import ON_DUP_DROP_OLD +from ._dup import ON_DUP_RAISE +from ._dup import OnDup +from ._typing import DT +from ._typing import KT +from ._typing import MISSING +from ._typing import ODT +from ._typing import VT +from ._typing import MapOrItems + + +class MutableBidict(BidictBase[KT, VT], MutableBidirectionalMapping[KT, VT]): + """Base class for mutable bidirectional mappings.""" + + if t.TYPE_CHECKING: + + @property + def inverse(self) -> MutableBidict[VT, KT]: ... + + @property + def inv(self) -> MutableBidict[VT, KT]: ... + + def _pop(self, key: KT) -> VT: + val = self._fwdm.pop(key) + del self._invm[val] + return val + + def __delitem__(self, key: KT) -> None: + """*x.__delitem__(y) ⟺ del x[y]*""" + self._pop(key) + + def __setitem__(self, key: KT, val: VT) -> None: + """Set the value for *key* to *val*. + + If *key* is already associated with *val*, this is a no-op. + + If *key* is already associated with a different value, + the old value will be replaced with *val*, + as with dict's :meth:`__setitem__`. + + If *val* is already associated with a different key, + an exception is raised + to protect against accidental removal of the key + that's currently associated with *val*. + + Use :meth:`put` instead if you want to specify different behavior in + the case that the provided key or value duplicates an existing one. + Or use :meth:`forceput` to unconditionally associate *key* with *val*, + replacing any existing items as necessary to preserve uniqueness. + + :raises bidict.ValueDuplicationError: if *val* duplicates that of an + existing item. + + :raises bidict.KeyAndValueDuplicationError: if *key* duplicates the key of an + existing item and *val* duplicates the value of a different + existing item. + """ + self.put(key, val, on_dup=self.on_dup) + + def put(self, key: KT, val: VT, on_dup: OnDup = ON_DUP_RAISE) -> None: + """Associate *key* with *val*, honoring the :class:`OnDup` given in *on_dup*. + + For example, if *on_dup* is :attr:`~bidict.ON_DUP_RAISE`, + then *key* will be associated with *val* if and only if + *key* is not already associated with an existing value and + *val* is not already associated with an existing key, + otherwise an exception will be raised. + + If *key* is already associated with *val*, this is a no-op. + + :raises bidict.KeyDuplicationError: if attempting to insert an item + whose key only duplicates an existing item's, and *on_dup.key* is + :attr:`~bidict.RAISE`. + + :raises bidict.ValueDuplicationError: if attempting to insert an item + whose value only duplicates an existing item's, and *on_dup.val* is + :attr:`~bidict.RAISE`. + + :raises bidict.KeyAndValueDuplicationError: if attempting to insert an + item whose key duplicates one existing item's, and whose value + duplicates another existing item's, and *on_dup.val* is + :attr:`~bidict.RAISE`. + """ + self._update(((key, val),), on_dup=on_dup) + + def forceput(self, key: KT, val: VT) -> None: + """Associate *key* with *val* unconditionally. + + Replace any existing mappings containing key *key* or value *val* + as necessary to preserve uniqueness. + """ + self.put(key, val, on_dup=ON_DUP_DROP_OLD) + + def clear(self) -> None: + """Remove all items.""" + self._fwdm.clear() + self._invm.clear() + + @t.overload + def pop(self, key: KT, /) -> VT: ... + @t.overload + def pop(self, key: KT, default: DT = ..., /) -> VT | DT: ... + + def pop(self, key: KT, default: ODT[DT] = MISSING, /) -> VT | DT: + """*x.pop(k[, d]) → v* + + Remove specified key and return the corresponding value. + + :raises KeyError: if *key* is not found and no *default* is provided. + """ + try: + return self._pop(key) + except KeyError: + if default is MISSING: + raise + return default + + def popitem(self) -> tuple[KT, VT]: + """*x.popitem() → (k, v)* + + Remove and return some item as a (key, value) pair. + + :raises KeyError: if *x* is empty. + """ + key, val = self._fwdm.popitem() + del self._invm[val] + return key, val + + def update(self, arg: MapOrItems[KT, VT] = (), /, **kw: VT) -> None: + """Like calling :meth:`putall` with *self.on_dup* passed for *on_dup*.""" + self._update(arg, kw=kw) + + def forceupdate(self, arg: MapOrItems[KT, VT] = (), /, **kw: VT) -> None: + """Like a bulk :meth:`forceput`.""" + self._update(arg, kw=kw, on_dup=ON_DUP_DROP_OLD) + + def putall(self, items: MapOrItems[KT, VT], on_dup: OnDup = ON_DUP_RAISE) -> None: + """Like a bulk :meth:`put`. + + If one of the given items causes an exception to be raised, + none of the items is inserted. + """ + self._update(items, on_dup=on_dup) + + # other's type is Mapping rather than Maplike since bidict() |= SupportsKeysAndGetItem({}) + # raises a TypeError, just like dict() |= SupportsKeysAndGetItem({}) does. + def __ior__(self, other: t.Mapping[KT, VT]) -> MutableBidict[KT, VT]: + """Return self|=other.""" + self.update(other) + return self + + +class bidict(MutableBidict[KT, VT]): + """The main bidirectional mapping type. + + See :ref:`intro:Introduction` and :ref:`basic-usage:Basic Usage` + to get started (also available at https://bidict.rtfd.io). + """ + + if t.TYPE_CHECKING: + + @property + def inverse(self) -> bidict[VT, KT]: ... + + @property + def inv(self) -> bidict[VT, KT]: ... + + +# * Code review nav * +# ============================================================================ +# ← Prev: _frozen.py Current: _bidict.py Next: _orderedbase.py → +# ============================================================================ diff --git a/env/lib/python3.10/site-packages/bidict/_dup.py b/env/lib/python3.10/site-packages/bidict/_dup.py new file mode 100644 index 0000000..fd25b61 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/_dup.py @@ -0,0 +1,61 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +"""Provide :class:`OnDup` and related functionality.""" + +from __future__ import annotations + +import typing as t +from enum import Enum + + +class OnDupAction(Enum): + """An action to take to prevent duplication from occurring.""" + + #: Raise a :class:`~bidict.DuplicationError`. + RAISE = 'RAISE' + #: Overwrite existing items with new items. + DROP_OLD = 'DROP_OLD' + #: Keep existing items and drop new items. + DROP_NEW = 'DROP_NEW' + + def __repr__(self) -> str: + return f'{self.__class__.__name__}.{self.name}' + + +RAISE: t.Final[OnDupAction] = OnDupAction.RAISE +DROP_OLD: t.Final[OnDupAction] = OnDupAction.DROP_OLD +DROP_NEW: t.Final[OnDupAction] = OnDupAction.DROP_NEW + + +class OnDup(t.NamedTuple): + r"""A combination of :class:`~bidict.OnDupAction`\s specifying how to handle various types of duplication. + + The :attr:`~OnDup.key` field specifies what action to take when a duplicate key is encountered. + + The :attr:`~OnDup.val` field specifies what action to take when a duplicate value is encountered. + + In the case of both key and value duplication across two different items, + only :attr:`~OnDup.val` is used. + + *See also* :ref:`basic-usage:Values Must Be Unique` + (https://bidict.rtfd.io/basic-usage.html#values-must-be-unique) + """ + + key: OnDupAction = DROP_OLD + val: OnDupAction = RAISE + + +#: Default :class:`OnDup` used for the +#: :meth:`~bidict.bidict.__init__`, +#: :meth:`~bidict.bidict.__setitem__`, and +#: :meth:`~bidict.bidict.update` methods. +ON_DUP_DEFAULT: t.Final[OnDup] = OnDup(key=DROP_OLD, val=RAISE) +#: An :class:`OnDup` whose members are all :obj:`RAISE`. +ON_DUP_RAISE: t.Final[OnDup] = OnDup(key=RAISE, val=RAISE) +#: An :class:`OnDup` whose members are all :obj:`DROP_OLD`. +ON_DUP_DROP_OLD: t.Final[OnDup] = OnDup(key=DROP_OLD, val=DROP_OLD) diff --git a/env/lib/python3.10/site-packages/bidict/_exc.py b/env/lib/python3.10/site-packages/bidict/_exc.py new file mode 100644 index 0000000..e2a96f3 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/_exc.py @@ -0,0 +1,36 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +"""Provide all bidict exceptions.""" + +from __future__ import annotations + + +class BidictException(Exception): + """Base class for bidict exceptions.""" + + +class DuplicationError(BidictException): + """Base class for exceptions raised when uniqueness is violated + as per the :attr:`~bidict.RAISE` :class:`~bidict.OnDupAction`. + """ + + +class KeyDuplicationError(DuplicationError): + """Raised when a given key is not unique.""" + + +class ValueDuplicationError(DuplicationError): + """Raised when a given value is not unique.""" + + +class KeyAndValueDuplicationError(KeyDuplicationError, ValueDuplicationError): + """Raised when a given item's key and value are not unique. + + That is, its key duplicates that of another item, + and its value duplicates that of a different other item. + """ diff --git a/env/lib/python3.10/site-packages/bidict/_frozen.py b/env/lib/python3.10/site-packages/bidict/_frozen.py new file mode 100644 index 0000000..e2f789d --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/_frozen.py @@ -0,0 +1,50 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +# * Code review nav * +# (see comments in __init__.py) +# ============================================================================ +# ← Prev: _base.py Current: _frozen.py Next: _bidict.py → +# ============================================================================ + +"""Provide :class:`frozenbidict`, an immutable, hashable bidirectional mapping type.""" + +from __future__ import annotations + +import typing as t + +from ._base import BidictBase +from ._typing import KT +from ._typing import VT + + +class frozenbidict(BidictBase[KT, VT]): + """Immutable, hashable bidict type.""" + + _hash: int + + if t.TYPE_CHECKING: + + @property + def inverse(self) -> frozenbidict[VT, KT]: ... + + @property + def inv(self) -> frozenbidict[VT, KT]: ... + + def __hash__(self) -> int: + """The hash of this bidict as determined by its items.""" + if getattr(self, '_hash', None) is None: + # The following is like hash(frozenset(self.items())) + # but more memory efficient. See also: https://bugs.python.org/issue46684 + self._hash = t.ItemsView(self)._hash() + return self._hash + + +# * Code review nav * +# ============================================================================ +# ← Prev: _base.py Current: _frozen.py Next: _bidict.py → +# ============================================================================ diff --git a/env/lib/python3.10/site-packages/bidict/_iter.py b/env/lib/python3.10/site-packages/bidict/_iter.py new file mode 100644 index 0000000..53ad25d --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/_iter.py @@ -0,0 +1,51 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +"""Functions for iterating over items in a mapping.""" + +from __future__ import annotations + +import typing as t +from operator import itemgetter + +from ._typing import KT +from ._typing import VT +from ._typing import ItemsIter +from ._typing import Maplike +from ._typing import MapOrItems + + +def iteritems(arg: MapOrItems[KT, VT] = (), /, **kw: VT) -> ItemsIter[KT, VT]: + """Yield the items from *arg* and *kw* in the order given.""" + if isinstance(arg, t.Mapping): + yield from arg.items() + elif isinstance(arg, Maplike): + yield from ((k, arg[k]) for k in arg.keys()) + else: + yield from arg + yield from t.cast(ItemsIter[KT, VT], kw.items()) + + +swap: t.Final = itemgetter(1, 0) + + +def inverted(arg: MapOrItems[KT, VT]) -> ItemsIter[VT, KT]: + """Yield the inverse items of the provided object. + + If *arg* has a :func:`callable` ``__inverted__`` attribute, + return the result of calling it. + + Otherwise, return an iterator over the items in `arg`, + inverting each item on the fly. + + *See also* :attr:`bidict.BidirectionalMapping.__inverted__` + """ + invattr = getattr(arg, '__inverted__', None) + if callable(invattr): + inv: ItemsIter[VT, KT] = invattr() + return inv + return map(swap, iteritems(arg)) diff --git a/env/lib/python3.10/site-packages/bidict/_orderedbase.py b/env/lib/python3.10/site-packages/bidict/_orderedbase.py new file mode 100644 index 0000000..92f2633 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/_orderedbase.py @@ -0,0 +1,238 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +# * Code review nav * +# (see comments in __init__.py) +# ============================================================================ +# ← Prev: _bidict.py Current: _orderedbase.py Next: _orderedbidict.py → +# ============================================================================ + + +"""Provide :class:`OrderedBidictBase`.""" + +from __future__ import annotations + +import typing as t +from weakref import ref as weakref + +from ._base import BidictBase +from ._base import Unwrites +from ._bidict import bidict +from ._iter import iteritems +from ._typing import KT +from ._typing import MISSING +from ._typing import OKT +from ._typing import OVT +from ._typing import VT +from ._typing import MapOrItems + + +AT = t.TypeVar('AT') # attr type + + +class WeakAttr(t.Generic[AT]): + """Descriptor to automatically manage (de)referencing the given slot as a weakref. + + See https://docs.python.org/3/howto/descriptor.html#managed-attributes + for an intro to using descriptors like this for managed attributes. + """ + + def __init__(self, *, slot: str) -> None: + self.slot = slot + + def __set__(self, instance: t.Any, value: AT) -> None: + setattr(instance, self.slot, weakref(value)) + + def __get__(self, instance: t.Any, __owner: t.Any = None) -> AT: + return t.cast(AT, getattr(instance, self.slot)()) + + +class Node: + """A node in a circular doubly-linked list + used to encode the order of items in an ordered bidict. + + A weak reference to the previous node is stored + to avoid creating strong reference cycles. + Referencing/dereferencing the weakref is handled automatically by :class:`WeakAttr`. + """ + + prv: WeakAttr[Node] = WeakAttr(slot='_prv_weak') + __slots__ = ('__weakref__', '_prv_weak', 'nxt') + + nxt: Node | WeakAttr[Node] # Allow subclasses to use a WeakAttr for nxt too (see SentinelNode) + + def __init__(self, prv: Node, nxt: Node) -> None: + self.prv = prv + self.nxt = nxt + + def unlink(self) -> None: + """Remove self from in between prv and nxt. + Self's references to prv and nxt are retained so it can be relinked (see below). + """ + self.prv.nxt = self.nxt + self.nxt.prv = self.prv + + def relink(self) -> None: + """Restore self between prv and nxt after unlinking (see above).""" + self.prv.nxt = self.nxt.prv = self + + +class SentinelNode(Node): + """Special node in a circular doubly-linked list + that links the first node with the last node. + When its next and previous references point back to itself + it represents an empty list. + """ + + nxt: WeakAttr[Node] = WeakAttr(slot='_nxt_weak') + __slots__ = ('_nxt_weak',) + + def __init__(self) -> None: + super().__init__(self, self) + + def iternodes(self, *, reverse: bool = False) -> t.Iterator[Node]: + """Iterator yielding nodes in the requested order.""" + attr = 'prv' if reverse else 'nxt' + node = getattr(self, attr) + while node is not self: + yield node + node = getattr(node, attr) + + def new_last_node(self) -> Node: + """Create and return a new terminal node.""" + old_last = self.prv + new_last = Node(old_last, self) + old_last.nxt = self.prv = new_last + return new_last + + +class OrderedBidictBase(BidictBase[KT, VT]): + """Base class implementing an ordered :class:`BidirectionalMapping`.""" + + _node_by_korv: bidict[t.Any, Node] + _bykey: bool + + def __init__(self, arg: MapOrItems[KT, VT] = (), /, **kw: VT) -> None: + """Make a new ordered bidirectional mapping. + The signature behaves like that of :class:`dict`. + Items passed in are added in the order they are passed, + respecting the :attr:`~bidict.BidictBase.on_dup` + class attribute in the process. + + The order in which items are inserted is remembered, + similar to :class:`collections.OrderedDict`. + """ + self._sntl = SentinelNode() + self._node_by_korv = bidict() + self._bykey = True + super().__init__(arg, **kw) + + if t.TYPE_CHECKING: + + @property + def inverse(self) -> OrderedBidictBase[VT, KT]: ... + + @property + def inv(self) -> OrderedBidictBase[VT, KT]: ... + + def _make_inverse(self) -> OrderedBidictBase[VT, KT]: + inv = t.cast(OrderedBidictBase[VT, KT], super()._make_inverse()) + inv._sntl = self._sntl + inv._node_by_korv = self._node_by_korv + inv._bykey = not self._bykey + return inv + + def _assoc_node(self, node: Node, key: KT, val: VT) -> None: + korv = key if self._bykey else val + self._node_by_korv.forceput(korv, node) + + def _dissoc_node(self, node: Node) -> None: + del self._node_by_korv.inverse[node] + node.unlink() + + def _init_from(self, other: MapOrItems[KT, VT]) -> None: + """See :meth:`BidictBase._init_from`.""" + super()._init_from(other) + bykey = self._bykey + korv_by_node = self._node_by_korv.inverse + korv_by_node.clear() + korv_by_node_set = korv_by_node.__setitem__ + self._sntl.nxt = self._sntl.prv = self._sntl + new_node = self._sntl.new_last_node + for k, v in iteritems(other): + korv_by_node_set(new_node(), k if bykey else v) + + def _write(self, newkey: KT, newval: VT, oldkey: OKT[KT], oldval: OVT[VT], unwrites: Unwrites | None) -> None: + """See :meth:`bidict.BidictBase._spec_write`.""" + super()._write(newkey, newval, oldkey, oldval, unwrites) + assoc, dissoc = self._assoc_node, self._dissoc_node + node_by_korv, bykey = self._node_by_korv, self._bykey + if oldval is MISSING and oldkey is MISSING: # no key or value duplication + # {0: 1, 2: 3} | {4: 5} => {0: 1, 2: 3, 4: 5} + newnode = self._sntl.new_last_node() + assoc(newnode, newkey, newval) + if unwrites is not None: + unwrites.append((dissoc, newnode)) + elif oldval is not MISSING and oldkey is not MISSING: # key and value duplication across two different items + # {0: 1, 2: 3} | {0: 3} => {0: 3} + # n1, n2 => n1 (collapse n1 and n2 into n1) + # oldkey: 2, oldval: 1, oldnode: n2, newkey: 0, newval: 3, newnode: n1 + if bykey: + oldnode = node_by_korv[oldkey] + newnode = node_by_korv[newkey] + else: + oldnode = node_by_korv[newval] + newnode = node_by_korv[oldval] + dissoc(oldnode) + assoc(newnode, newkey, newval) + if unwrites is not None: + unwrites.extend(( + (assoc, newnode, newkey, oldval), + (assoc, oldnode, oldkey, newval), + (oldnode.relink,), + )) + elif oldval is not MISSING: # just key duplication + # {0: 1, 2: 3} | {2: 4} => {0: 1, 2: 4} + # oldkey: MISSING, oldval: 3, newkey: 2, newval: 4 + node = node_by_korv[newkey if bykey else oldval] + assoc(node, newkey, newval) + if unwrites is not None: + unwrites.append((assoc, node, newkey, oldval)) + else: + assert oldkey is not MISSING # just value duplication + # {0: 1, 2: 3} | {4: 3} => {0: 1, 4: 3} + # oldkey: 2, oldval: MISSING, newkey: 4, newval: 3 + node = node_by_korv[oldkey if bykey else newval] + assoc(node, newkey, newval) + if unwrites is not None: + unwrites.append((assoc, node, oldkey, newval)) + + def __iter__(self) -> t.Iterator[KT]: + """Iterator over the contained keys in insertion order.""" + return self._iter(reverse=False) + + def __reversed__(self) -> t.Iterator[KT]: + """Iterator over the contained keys in reverse insertion order.""" + return self._iter(reverse=True) + + def _iter(self, *, reverse: bool = False) -> t.Iterator[KT]: + nodes = self._sntl.iternodes(reverse=reverse) + korv_by_node = self._node_by_korv.inverse + if self._bykey: + for node in nodes: + yield korv_by_node[node] + else: + key_by_val = self._invm + for node in nodes: + val = korv_by_node[node] + yield key_by_val[val] + + +# * Code review nav * +# ============================================================================ +# ← Prev: _bidict.py Current: _orderedbase.py Next: _orderedbidict.py → +# ============================================================================ diff --git a/env/lib/python3.10/site-packages/bidict/_orderedbidict.py b/env/lib/python3.10/site-packages/bidict/_orderedbidict.py new file mode 100644 index 0000000..2fb1757 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/_orderedbidict.py @@ -0,0 +1,172 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +# * Code review nav * +# (see comments in __init__.py) +# ============================================================================ +# ← Prev: _orderedbase.py Current: _orderedbidict.py +# ============================================================================ + + +"""Provide :class:`OrderedBidict`.""" + +from __future__ import annotations + +import typing as t +from collections.abc import Set + +from ._base import BidictKeysView +from ._bidict import MutableBidict +from ._orderedbase import OrderedBidictBase +from ._typing import KT +from ._typing import VT + + +class OrderedBidict(OrderedBidictBase[KT, VT], MutableBidict[KT, VT]): + """Mutable bidict type that maintains items in insertion order.""" + + if t.TYPE_CHECKING: + + @property + def inverse(self) -> OrderedBidict[VT, KT]: ... + + @property + def inv(self) -> OrderedBidict[VT, KT]: ... + + def clear(self) -> None: + """Remove all items.""" + super().clear() + self._node_by_korv.clear() + self._sntl.nxt = self._sntl.prv = self._sntl + + def _pop(self, key: KT) -> VT: + val = super()._pop(key) + node = self._node_by_korv[key if self._bykey else val] + self._dissoc_node(node) + return val + + def popitem(self, last: bool = True) -> tuple[KT, VT]: + """*b.popitem() → (k, v)* + + If *last* is true, + remove and return the most recently added item as a (key, value) pair. + Otherwise, remove and return the least recently added item. + + :raises KeyError: if *b* is empty. + """ + if not self: + raise KeyError('OrderedBidict is empty') + node = getattr(self._sntl, 'prv' if last else 'nxt') + korv = self._node_by_korv.inverse[node] + if self._bykey: + return korv, self._pop(korv) + return self.inverse._pop(korv), korv + + def move_to_end(self, key: KT, last: bool = True) -> None: + """Move the item with the given key to the end if *last* is true, else to the beginning. + + :raises KeyError: if *key* is missing + """ + korv = key if self._bykey else self._fwdm[key] + node = self._node_by_korv[korv] + node.prv.nxt = node.nxt + node.nxt.prv = node.prv + sntl = self._sntl + if last: + lastnode = sntl.prv + node.prv = lastnode + node.nxt = sntl + sntl.prv = lastnode.nxt = node + else: + firstnode = sntl.nxt + node.prv = sntl + node.nxt = firstnode + sntl.nxt = firstnode.prv = node + + # Override the keys() and items() implementations inherited from BidictBase, + # which may delegate to the backing _fwdm dict, since this is a mutable ordered bidict, + # and therefore the ordering of items can get out of sync with the backing mappings + # after mutation. (Need not override values() because it delegates to .inverse.keys().) + def keys(self) -> t.KeysView[KT]: + """A set-like object providing a view on the contained keys.""" + return _OrderedBidictKeysView(self) + + def items(self) -> t.ItemsView[KT, VT]: + """A set-like object providing a view on the contained items.""" + return _OrderedBidictItemsView(self) + + +# The following MappingView implementations use the __iter__ implementations +# inherited from their superclass counterparts in collections.abc, so they +# continue to yield items in the correct order even after an OrderedBidict +# is mutated. They also provide a __reversed__ implementation, which is not +# provided by the collections.abc superclasses. +class _OrderedBidictKeysView(BidictKeysView[KT]): + _mapping: OrderedBidict[KT, t.Any] + + def __reversed__(self) -> t.Iterator[KT]: + return reversed(self._mapping) + + +class _OrderedBidictItemsView(t.ItemsView[KT, VT]): + _mapping: OrderedBidict[KT, VT] + + def __reversed__(self) -> t.Iterator[tuple[KT, VT]]: + ob = self._mapping + for key in reversed(ob): + yield key, ob[key] + + +# For better performance, make _OrderedBidictKeysView and _OrderedBidictItemsView delegate +# to backing dicts for the methods they inherit from collections.abc.Set. (Cannot delegate +# for __iter__ and __reversed__ since they are order-sensitive.) See also: https://bugs.python.org/issue46713 +_OView = t.Union[t.Type[_OrderedBidictKeysView[KT]], t.Type[_OrderedBidictItemsView[KT, t.Any]]] +_setmethodnames: t.Iterable[str] = ( + '__lt__ __le__ __gt__ __ge__ __eq__ __ne__ __sub__ __rsub__ ' + '__or__ __ror__ __xor__ __rxor__ __and__ __rand__ isdisjoint' +).split() + + +def _override_set_methods_to_use_backing_dict(cls: _OView[KT], viewname: str) -> None: + def make_proxy_method(methodname: str) -> t.Any: + def method(self: _OrderedBidictKeysView[KT] | _OrderedBidictItemsView[KT, t.Any], *args: t.Any) -> t.Any: + fwdm = self._mapping._fwdm + if not isinstance(fwdm, dict): # dict view speedup not available, fall back to Set's implementation. + return getattr(Set, methodname)(self, *args) + fwdm_dict_view = getattr(fwdm, viewname)() + fwdm_dict_view_method = getattr(fwdm_dict_view, methodname) + if ( + len(args) != 1 + or not isinstance((arg := args[0]), self.__class__) + or not isinstance(arg._mapping._fwdm, dict) + ): + return fwdm_dict_view_method(*args) + # self and arg are both _OrderedBidictKeysViews or _OrderedBidictItemsViews whose bidicts are backed by + # a dict. Use arg's backing dict's corresponding view instead of arg. Otherwise, e.g. `ob1.keys() + # < ob2.keys()` would give "TypeError: '<' not supported between instances of '_OrderedBidictKeysView' and + # '_OrderedBidictKeysView'", because both `dict_keys(ob1).__lt__(ob2.keys()) is NotImplemented` and + # `dict_keys(ob2).__gt__(ob1.keys()) is NotImplemented`. + arg_dict = arg._mapping._fwdm + arg_dict_view = getattr(arg_dict, viewname)() + return fwdm_dict_view_method(arg_dict_view) + + method.__name__ = methodname + method.__qualname__ = f'{cls.__qualname__}.{methodname}' + return method + + for name in _setmethodnames: + setattr(cls, name, make_proxy_method(name)) + + +_override_set_methods_to_use_backing_dict(_OrderedBidictKeysView, 'keys') +_override_set_methods_to_use_backing_dict(_OrderedBidictItemsView, 'items') + + +# * Code review nav * +# ============================================================================ +# ← Prev: _orderedbase.py Current: _orderedbidict.py +# ============================================================================ diff --git a/env/lib/python3.10/site-packages/bidict/_typing.py b/env/lib/python3.10/site-packages/bidict/_typing.py new file mode 100644 index 0000000..ce95053 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/_typing.py @@ -0,0 +1,49 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +"""Provide typing-related objects.""" + +from __future__ import annotations + +import typing as t +from enum import Enum + + +KT = t.TypeVar('KT') +VT = t.TypeVar('VT') +VT_co = t.TypeVar('VT_co', covariant=True) + + +Items = t.Iterable[t.Tuple[KT, VT]] + + +@t.runtime_checkable +class Maplike(t.Protocol[KT, VT_co]): + """Like typeshed's SupportsKeysAndGetItem, but usable at runtime.""" + + def keys(self) -> t.Iterable[KT]: ... + + def __getitem__(self, __key: KT) -> VT_co: ... + + +MapOrItems = t.Union[Maplike[KT, VT], Items[KT, VT]] +MappOrItems = t.Union[t.Mapping[KT, VT], Items[KT, VT]] +ItemsIter = t.Iterator[t.Tuple[KT, VT]] + + +class MissingT(Enum): + """Sentinel used to represent none/missing when None itself can't be used.""" + + MISSING = 'MISSING' + + +MISSING: t.Final[t.Literal[MissingT.MISSING]] = MissingT.MISSING +OKT = t.Union[KT, MissingT] #: optional key type +OVT = t.Union[VT, MissingT] #: optional value type + +DT = t.TypeVar('DT') #: for default arguments +ODT = t.Union[DT, MissingT] #: optional default arg type diff --git a/env/lib/python3.10/site-packages/bidict/metadata.py b/env/lib/python3.10/site-packages/bidict/metadata.py new file mode 100644 index 0000000..30ad836 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/metadata.py @@ -0,0 +1,14 @@ +# Copyright 2009-2024 Joshua Bronson. All rights reserved. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +"""Define bidict package metadata.""" + +__version__ = '0.23.1' +__author__ = {'name': 'Joshua Bronson', 'email': 'jabronson@gmail.com'} +__copyright__ = '© 2009-2024 Joshua Bronson' +__description__ = 'The bidirectional mapping library for Python.' +__license__ = 'MPL 2.0' +__url__ = 'https://bidict.readthedocs.io' diff --git a/env/lib/python3.10/site-packages/bidict/py.typed b/env/lib/python3.10/site-packages/bidict/py.typed new file mode 100644 index 0000000..342ea76 --- /dev/null +++ b/env/lib/python3.10/site-packages/bidict/py.typed @@ -0,0 +1 @@ +PEP-561 marker. diff --git a/env/lib/python3.10/site-packages/easy-install.pth b/env/lib/python3.10/site-packages/easy-install.pth new file mode 100644 index 0000000..8b108b4 --- /dev/null +++ b/env/lib/python3.10/site-packages/easy-install.pth @@ -0,0 +1 @@ +/home/davidson/Projects/evolution_client/python diff --git a/env/lib/python3.10/site-packages/engineio/__init__.py b/env/lib/python3.10/site-packages/engineio/__init__.py new file mode 100644 index 0000000..4919efd --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/__init__.py @@ -0,0 +1,13 @@ +from .client import Client +from .middleware import WSGIApp, Middleware +from .server import Server +from .async_server import AsyncServer +from .async_client import AsyncClient +from .async_drivers.asgi import ASGIApp +try: + from .async_drivers.tornado import get_tornado_handler +except ImportError: # pragma: no cover + get_tornado_handler = None + +__all__ = ['Server', 'WSGIApp', 'Middleware', 'Client', + 'AsyncServer', 'ASGIApp', 'get_tornado_handler', 'AsyncClient'] diff --git a/env/lib/python3.10/site-packages/engineio/async_client.py b/env/lib/python3.10/site-packages/engineio/async_client.py new file mode 100644 index 0000000..7db3879 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_client.py @@ -0,0 +1,680 @@ +import asyncio +import signal +import ssl +import threading + +try: + import aiohttp +except ImportError: # pragma: no cover + aiohttp = None + +from . import base_client +from . import exceptions +from . import packet +from . import payload + +async_signal_handler_set = False + +# this set is used to keep references to background tasks to prevent them from +# being garbage collected mid-execution. Solution taken from +# https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task +task_reference_holder = set() + + +def async_signal_handler(): + """SIGINT handler. + + Disconnect all active async clients. + """ + async def _handler(): # pragma: no cover + for c in base_client.connected_clients[:]: + if c.is_asyncio_based(): + await c.disconnect() + + # cancel all running tasks + tasks = [task for task in asyncio.all_tasks() if task is not + asyncio.current_task()] + for task in tasks: + task.cancel() + await asyncio.gather(*tasks, return_exceptions=True) + asyncio.get_running_loop().stop() + + asyncio.ensure_future(_handler()) + + +class AsyncClient(base_client.BaseClient): + """An Engine.IO client for asyncio. + + This class implements a fully compliant Engine.IO web client with support + for websocket and long-polling transports, compatible with the asyncio + framework on Python 3.5 or newer. + + :param logger: To enable logging set to ``True`` or pass a logger object to + use. To disable logging set to ``False``. The default is + ``False``. Note that fatal errors are logged even when + ``logger`` is ``False``. + :param json: An alternative json module to use for encoding and decoding + packets. Custom json modules must have ``dumps`` and ``loads`` + functions that are compatible with the standard library + versions. + :param request_timeout: A timeout in seconds for requests. The default is + 5 seconds. + :param http_session: an initialized ``aiohttp.ClientSession`` object to be + used when sending requests to the server. Use it if + you need to add special client options such as proxy + servers, SSL certificates, custom CA bundle, etc. + :param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to + skip SSL certificate verification, allowing + connections to servers with self signed certificates. + The default is ``True``. + :param handle_sigint: Set to ``True`` to automatically handle disconnection + when the process is interrupted, or to ``False`` to + leave interrupt handling to the calling application. + Interrupt handling can only be enabled when the + client instance is created in the main thread. + :param websocket_extra_options: Dictionary containing additional keyword + arguments passed to + ``aiohttp.ws_connect()``. + :param timestamp_requests: If ``True`` a timestamp is added to the query + string of Socket.IO requests as a cache-busting + measure. Set to ``False`` to disable. + """ + def is_asyncio_based(self): + return True + + async def connect(self, url, headers=None, transports=None, + engineio_path='engine.io'): + """Connect to an Engine.IO server. + + :param url: The URL of the Engine.IO server. It can include custom + query string parameters if required by the server. + :param headers: A dictionary with custom headers to send with the + connection request. + :param transports: The list of allowed transports. Valid transports + are ``'polling'`` and ``'websocket'``. If not + given, the polling transport is connected first, + then an upgrade to websocket is attempted. + :param engineio_path: The endpoint where the Engine.IO server is + installed. The default value is appropriate for + most cases. + + Note: this method is a coroutine. + + Example usage:: + + eio = engineio.Client() + await eio.connect('http://localhost:5000') + """ + global async_signal_handler_set + if self.handle_sigint and not async_signal_handler_set and \ + threading.current_thread() == threading.main_thread(): + try: + asyncio.get_running_loop().add_signal_handler( + signal.SIGINT, async_signal_handler) + except NotImplementedError: # pragma: no cover + self.logger.warning('Signal handler is unsupported') + async_signal_handler_set = True + + if self.state != 'disconnected': + raise ValueError('Client is not in a disconnected state') + valid_transports = ['polling', 'websocket'] + if transports is not None: + if isinstance(transports, str): + transports = [transports] + transports = [transport for transport in transports + if transport in valid_transports] + if not transports: + raise ValueError('No valid transports provided') + self.transports = transports or valid_transports + self.queue = self.create_queue() + return await getattr(self, '_connect_' + self.transports[0])( + url, headers or {}, engineio_path) + + async def wait(self): + """Wait until the connection with the server ends. + + Client applications can use this function to block the main thread + during the life of the connection. + + Note: this method is a coroutine. + """ + if self.read_loop_task: + await self.read_loop_task + + async def send(self, data): + """Send a message to the server. + + :param data: The data to send to the server. Data can be of type + ``str``, ``bytes``, ``list`` or ``dict``. If a ``list`` + or ``dict``, the data will be serialized as JSON. + + Note: this method is a coroutine. + """ + await self._send_packet(packet.Packet(packet.MESSAGE, data=data)) + + async def disconnect(self, abort=False, reason=None): + """Disconnect from the server. + + :param abort: If set to ``True``, do not wait for background tasks + associated with the connection to end. + + Note: this method is a coroutine. + """ + if self.state == 'connected': + await self._send_packet(packet.Packet(packet.CLOSE)) + await self.queue.put(None) + self.state = 'disconnecting' + await self._trigger_event('disconnect', + reason or self.reason.CLIENT_DISCONNECT, + run_async=False) + if self.current_transport == 'websocket': + await self.ws.close() + if not abort: + await self.read_loop_task + self.state = 'disconnected' + try: + base_client.connected_clients.remove(self) + except ValueError: # pragma: no cover + pass + await self._reset() + + def start_background_task(self, target, *args, **kwargs): + """Start a background task. + + This is a utility function that applications can use to start a + background task. + + :param target: the target function to execute. + :param args: arguments to pass to the function. + :param kwargs: keyword arguments to pass to the function. + + The return value is a ``asyncio.Task`` object. + """ + return asyncio.ensure_future(target(*args, **kwargs)) + + async def sleep(self, seconds=0): + """Sleep for the requested amount of time. + + Note: this method is a coroutine. + """ + return await asyncio.sleep(seconds) + + def create_queue(self): + """Create a queue object.""" + q = asyncio.Queue() + q.Empty = asyncio.QueueEmpty + return q + + def create_event(self): + """Create an event object.""" + return asyncio.Event() + + async def _reset(self): + super()._reset() + if not self.external_http: # pragma: no cover + if self.http and not self.http.closed: + await self.http.close() + + def __del__(self): # pragma: no cover + # try to close the aiohttp session if it is still open + if self.http and not self.http.closed: + try: + loop = asyncio.get_event_loop() + if loop.is_running(): + loop.ensure_future(self.http.close()) + else: + loop.run_until_complete(self.http.close()) + except: + pass + + async def _connect_polling(self, url, headers, engineio_path): + """Establish a long-polling connection to the Engine.IO server.""" + if aiohttp is None: # pragma: no cover + self.logger.error('aiohttp not installed -- cannot make HTTP ' + 'requests!') + return + self.base_url = self._get_engineio_url(url, engineio_path, 'polling') + self.logger.info('Attempting polling connection to ' + self.base_url) + r = await self._send_request( + 'GET', self.base_url + self._get_url_timestamp(), headers=headers, + timeout=self.request_timeout) + if r is None or isinstance(r, str): + await self._reset() + raise exceptions.ConnectionError( + r or 'Connection refused by the server') + if r.status < 200 or r.status >= 300: + await self._reset() + try: + arg = await r.json() + except aiohttp.ClientError: + arg = None + raise exceptions.ConnectionError( + 'Unexpected status code {} in server response'.format( + r.status), arg) + try: + p = payload.Payload(encoded_payload=(await r.read()).decode( + 'utf-8')) + except ValueError: + raise exceptions.ConnectionError( + 'Unexpected response from server') from None + open_packet = p.packets[0] + if open_packet.packet_type != packet.OPEN: + raise exceptions.ConnectionError( + 'OPEN packet not returned by server') + self.logger.info( + 'Polling connection accepted with ' + str(open_packet.data)) + self.sid = open_packet.data['sid'] + self.upgrades = open_packet.data['upgrades'] + self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0 + self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0 + self.current_transport = 'polling' + self.base_url += '&sid=' + self.sid + + self.state = 'connected' + base_client.connected_clients.append(self) + await self._trigger_event('connect', run_async=False) + + for pkt in p.packets[1:]: + await self._receive_packet(pkt) + + if 'websocket' in self.upgrades and 'websocket' in self.transports: + # attempt to upgrade to websocket + if await self._connect_websocket(url, headers, engineio_path): + # upgrade to websocket succeeded, we're done here + return + + self.write_loop_task = self.start_background_task(self._write_loop) + self.read_loop_task = self.start_background_task( + self._read_loop_polling) + + async def _connect_websocket(self, url, headers, engineio_path): + """Establish or upgrade to a WebSocket connection with the server.""" + if aiohttp is None: # pragma: no cover + self.logger.error('aiohttp package not installed') + return False + websocket_url = self._get_engineio_url(url, engineio_path, + 'websocket') + if self.sid: + self.logger.info( + 'Attempting WebSocket upgrade to ' + websocket_url) + upgrade = True + websocket_url += '&sid=' + self.sid + else: + upgrade = False + self.base_url = websocket_url + self.logger.info( + 'Attempting WebSocket connection to ' + websocket_url) + + if self.http is None or self.http.closed: # pragma: no cover + self.http = aiohttp.ClientSession() + + # extract any new cookies passed in a header so that they can also be + # sent the the WebSocket route + cookies = {} + for header, value in headers.items(): + if header.lower() == 'cookie': + cookies = dict( + [cookie.split('=', 1) for cookie in value.split('; ')]) + del headers[header] + break + self.http.cookie_jar.update_cookies(cookies) + + extra_options = {'timeout': self.request_timeout} + if not self.ssl_verify: + ssl_context = ssl.create_default_context() + ssl_context.check_hostname = False + ssl_context.verify_mode = ssl.CERT_NONE + extra_options['ssl'] = ssl_context + + # combine internally generated options with the ones supplied by the + # caller. The caller's options take precedence. + headers.update(self.websocket_extra_options.pop('headers', {})) + extra_options['headers'] = headers + extra_options.update(self.websocket_extra_options) + + try: + ws = await self.http.ws_connect( + websocket_url + self._get_url_timestamp(), **extra_options) + except (aiohttp.client_exceptions.WSServerHandshakeError, + aiohttp.client_exceptions.ServerConnectionError, + aiohttp.client_exceptions.ClientConnectionError): + if upgrade: + self.logger.warning( + 'WebSocket upgrade failed: connection error') + return False + else: + raise exceptions.ConnectionError('Connection error') + if upgrade: + p = packet.Packet(packet.PING, data='probe').encode() + try: + await ws.send_str(p) + except Exception as e: # pragma: no cover + self.logger.warning( + 'WebSocket upgrade failed: unexpected send exception: %s', + str(e)) + return False + try: + p = (await ws.receive()).data + except Exception as e: # pragma: no cover + self.logger.warning( + 'WebSocket upgrade failed: unexpected recv exception: %s', + str(e)) + return False + pkt = packet.Packet(encoded_packet=p) + if pkt.packet_type != packet.PONG or pkt.data != 'probe': + self.logger.warning( + 'WebSocket upgrade failed: no PONG packet') + return False + p = packet.Packet(packet.UPGRADE).encode() + try: + await ws.send_str(p) + except Exception as e: # pragma: no cover + self.logger.warning( + 'WebSocket upgrade failed: unexpected send exception: %s', + str(e)) + return False + self.current_transport = 'websocket' + self.logger.info('WebSocket upgrade was successful') + else: + try: + p = (await ws.receive()).data + except Exception as e: # pragma: no cover + raise exceptions.ConnectionError( + 'Unexpected recv exception: ' + str(e)) + open_packet = packet.Packet(encoded_packet=p) + if open_packet.packet_type != packet.OPEN: + raise exceptions.ConnectionError('no OPEN packet') + self.logger.info( + 'WebSocket connection accepted with ' + str(open_packet.data)) + self.sid = open_packet.data['sid'] + self.upgrades = open_packet.data['upgrades'] + self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0 + self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0 + self.current_transport = 'websocket' + + self.state = 'connected' + base_client.connected_clients.append(self) + await self._trigger_event('connect', run_async=False) + + self.ws = ws + self.write_loop_task = self.start_background_task(self._write_loop) + self.read_loop_task = self.start_background_task( + self._read_loop_websocket) + return True + + async def _receive_packet(self, pkt): + """Handle incoming packets from the server.""" + packet_name = packet.packet_names[pkt.packet_type] \ + if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN' + self.logger.info( + 'Received packet %s data %s', packet_name, + pkt.data if not isinstance(pkt.data, bytes) else '') + if pkt.packet_type == packet.MESSAGE: + await self._trigger_event('message', pkt.data, run_async=True) + elif pkt.packet_type == packet.PING: + await self._send_packet(packet.Packet(packet.PONG, pkt.data)) + elif pkt.packet_type == packet.CLOSE: + await self.disconnect(abort=True, + reason=self.reason.SERVER_DISCONNECT) + elif pkt.packet_type == packet.NOOP: + pass + else: + self.logger.error('Received unexpected packet of type %s', + pkt.packet_type) + + async def _send_packet(self, pkt): + """Queue a packet to be sent to the server.""" + if self.state != 'connected': + return + await self.queue.put(pkt) + self.logger.info( + 'Sending packet %s data %s', + packet.packet_names[pkt.packet_type], + pkt.data if not isinstance(pkt.data, bytes) else '') + + async def _send_request( + self, method, url, headers=None, body=None, + timeout=None): # pragma: no cover + if self.http is None or self.http.closed: + self.http = aiohttp.ClientSession() + http_method = getattr(self.http, method.lower()) + + try: + if not self.ssl_verify: + return await http_method( + url, headers=headers, data=body, + timeout=aiohttp.ClientTimeout(total=timeout), ssl=False) + else: + return await http_method( + url, headers=headers, data=body, + timeout=aiohttp.ClientTimeout(total=timeout)) + + except (aiohttp.ClientError, asyncio.TimeoutError) as exc: + self.logger.info('HTTP %s request to %s failed with error %s.', + method, url, exc) + return str(exc) + + async def _trigger_event(self, event, *args, **kwargs): + """Invoke an event handler.""" + run_async = kwargs.pop('run_async', False) + ret = None + if event in self.handlers: + if asyncio.iscoroutinefunction(self.handlers[event]) is True: + if run_async: + task = self.start_background_task(self.handlers[event], + *args) + task_reference_holder.add(task) + task.add_done_callback(task_reference_holder.discard) + return task + else: + try: + try: + ret = await self.handlers[event](*args) + except TypeError: + if event == 'disconnect' and \ + len(args) == 1: # pragma: no branch + # legacy disconnect events do not have a reason + # argument + return await self.handlers[event]() + else: # pragma: no cover + raise + except asyncio.CancelledError: # pragma: no cover + pass + except: + self.logger.exception(event + ' async handler error') + if event == 'connect': + # if connect handler raised error we reject the + # connection + return False + else: + if run_async: + async def async_handler(): + return self.handlers[event](*args) + + task = self.start_background_task(async_handler) + task_reference_holder.add(task) + task.add_done_callback(task_reference_holder.discard) + return task + else: + try: + try: + ret = self.handlers[event](*args) + except TypeError: + if event == 'disconnect' and \ + len(args) == 1: # pragma: no branch + # legacy disconnect events do not have a reason + # argument + ret = self.handlers[event]() + else: # pragma: no cover + raise + except: + self.logger.exception(event + ' handler error') + if event == 'connect': + # if connect handler raised error we reject the + # connection + return False + return ret + + async def _read_loop_polling(self): + """Read packets by polling the Engine.IO server.""" + while self.state == 'connected' and self.write_loop_task: + self.logger.info( + 'Sending polling GET request to ' + self.base_url) + r = await self._send_request( + 'GET', self.base_url + self._get_url_timestamp(), + timeout=max(self.ping_interval, self.ping_timeout) + 5) + if r is None or isinstance(r, str): + self.logger.warning( + r or 'Connection refused by the server, aborting') + await self.queue.put(None) + break + if r.status < 200 or r.status >= 300: + self.logger.warning('Unexpected status code %s in server ' + 'response, aborting', r.status) + await self.queue.put(None) + break + try: + p = payload.Payload(encoded_payload=(await r.read()).decode( + 'utf-8')) + except ValueError: + self.logger.warning( + 'Unexpected packet from server, aborting') + await self.queue.put(None) + break + for pkt in p.packets: + await self._receive_packet(pkt) + + if self.write_loop_task: # pragma: no branch + self.logger.info('Waiting for write loop task to end') + await self.write_loop_task + if self.state == 'connected': + await self._trigger_event( + 'disconnect', self.reason.TRANSPORT_ERROR, run_async=False) + try: + base_client.connected_clients.remove(self) + except ValueError: # pragma: no cover + pass + await self._reset() + self.logger.info('Exiting read loop task') + + async def _read_loop_websocket(self): + """Read packets from the Engine.IO WebSocket connection.""" + while self.state == 'connected': + p = None + try: + p = await asyncio.wait_for( + self.ws.receive(), + timeout=self.ping_interval + self.ping_timeout) + if not isinstance(p.data, (str, bytes)): # pragma: no cover + self.logger.warning( + 'Server sent %s packet data %s, aborting', + 'close' if p.type in [aiohttp.WSMsgType.CLOSE, + aiohttp.WSMsgType.CLOSING] + else str(p.type), str(p.data)) + await self.queue.put(None) + break # the connection is broken + p = p.data + except asyncio.TimeoutError: + self.logger.warning( + 'Server has stopped communicating, aborting') + await self.queue.put(None) + break + except aiohttp.client_exceptions.ServerDisconnectedError: + self.logger.info( + 'Read loop: WebSocket connection was closed, aborting') + await self.queue.put(None) + break + except Exception as e: + self.logger.info( + 'Unexpected error receiving packet: "%s", aborting', + str(e)) + await self.queue.put(None) + break + try: + pkt = packet.Packet(encoded_packet=p) + except Exception as e: # pragma: no cover + self.logger.info( + 'Unexpected error decoding packet: "%s", aborting', str(e)) + await self.queue.put(None) + break + await self._receive_packet(pkt) + + if self.write_loop_task: # pragma: no branch + self.logger.info('Waiting for write loop task to end') + await self.write_loop_task + if self.state == 'connected': + await self._trigger_event( + 'disconnect', self.reason.TRANSPORT_ERROR, run_async=False) + try: + base_client.connected_clients.remove(self) + except ValueError: # pragma: no cover + pass + await self._reset() + self.logger.info('Exiting read loop task') + + async def _write_loop(self): + """This background task sends packages to the server as they are + pushed to the send queue. + """ + while self.state == 'connected': + # to simplify the timeout handling, use the maximum of the + # ping interval and ping timeout as timeout, with an extra 5 + # seconds grace period + timeout = max(self.ping_interval, self.ping_timeout) + 5 + packets = None + try: + packets = [await asyncio.wait_for(self.queue.get(), timeout)] + except (self.queue.Empty, asyncio.TimeoutError): + self.logger.error('packet queue is empty, aborting') + break + except asyncio.CancelledError: # pragma: no cover + break + if packets == [None]: + self.queue.task_done() + packets = [] + else: + while True: + try: + packets.append(self.queue.get_nowait()) + except self.queue.Empty: + break + if packets[-1] is None: + packets = packets[:-1] + self.queue.task_done() + break + if not packets: + # empty packet list returned -> connection closed + break + if self.current_transport == 'polling': + p = payload.Payload(packets=packets) + r = await self._send_request( + 'POST', self.base_url, body=p.encode(), + headers={'Content-Type': 'text/plain'}, + timeout=self.request_timeout) + for pkt in packets: + self.queue.task_done() + if r is None or isinstance(r, str): + self.logger.warning( + r or 'Connection refused by the server, aborting') + break + if r.status < 200 or r.status >= 300: + self.logger.warning('Unexpected status code %s in server ' + 'response, aborting', r.status) + self.write_loop_task = None + break + else: + # websocket + try: + for pkt in packets: + if pkt.binary: + await self.ws.send_bytes(pkt.encode()) + else: + await self.ws.send_str(pkt.encode()) + self.queue.task_done() + except (aiohttp.client_exceptions.ServerDisconnectedError, + BrokenPipeError, OSError): + self.logger.info( + 'Write loop: WebSocket connection was closed, ' + 'aborting') + break + self.logger.info('Exiting write loop task') diff --git a/env/lib/python3.10/site-packages/engineio/async_drivers/__init__.py b/env/lib/python3.10/site-packages/engineio/async_drivers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/env/lib/python3.10/site-packages/engineio/async_drivers/_websocket_wsgi.py b/env/lib/python3.10/site-packages/engineio/async_drivers/_websocket_wsgi.py new file mode 100644 index 0000000..aca30dc --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_drivers/_websocket_wsgi.py @@ -0,0 +1,34 @@ +import simple_websocket + + +class SimpleWebSocketWSGI: # pragma: no cover + """ + This wrapper class provides a threading WebSocket interface that is + compatible with eventlet's implementation. + """ + def __init__(self, handler, server, **kwargs): + self.app = handler + self.server_args = kwargs + + def __call__(self, environ, start_response): + self.ws = simple_websocket.Server(environ, **self.server_args) + ret = self.app(self) + if self.ws.mode == 'gunicorn': + raise StopIteration() + return ret + + def close(self): + if self.ws.connected: + self.ws.close() + + def send(self, message): + try: + return self.ws.send(message) + except simple_websocket.ConnectionClosed: + raise OSError() + + def wait(self): + try: + return self.ws.receive() + except simple_websocket.ConnectionClosed: + return None diff --git a/env/lib/python3.10/site-packages/engineio/async_drivers/aiohttp.py b/env/lib/python3.10/site-packages/engineio/async_drivers/aiohttp.py new file mode 100644 index 0000000..7c3440f --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_drivers/aiohttp.py @@ -0,0 +1,127 @@ +import asyncio +import sys +from urllib.parse import urlsplit + +from aiohttp.web import Response, WebSocketResponse + + +def create_route(app, engineio_server, engineio_endpoint): + """This function sets up the engine.io endpoint as a route for the + application. + + Note that both GET and POST requests must be hooked up on the engine.io + endpoint. + """ + app.router.add_get(engineio_endpoint, engineio_server.handle_request) + app.router.add_post(engineio_endpoint, engineio_server.handle_request) + app.router.add_route('OPTIONS', engineio_endpoint, + engineio_server.handle_request) + + +def translate_request(request): + """This function takes the arguments passed to the request handler and + uses them to generate a WSGI compatible environ dictionary. + """ + message = request._message + payload = request._payload + + uri_parts = urlsplit(message.path) + environ = { + 'wsgi.input': payload, + 'wsgi.errors': sys.stderr, + 'wsgi.version': (1, 0), + 'wsgi.async': True, + 'wsgi.multithread': False, + 'wsgi.multiprocess': False, + 'wsgi.run_once': False, + 'SERVER_SOFTWARE': 'aiohttp', + 'REQUEST_METHOD': message.method, + 'QUERY_STRING': uri_parts.query or '', + 'RAW_URI': message.path, + 'SERVER_PROTOCOL': 'HTTP/%s.%s' % message.version, + 'REMOTE_ADDR': '127.0.0.1', + 'REMOTE_PORT': '0', + 'SERVER_NAME': 'aiohttp', + 'SERVER_PORT': '0', + 'aiohttp.request': request + } + + for hdr_name, hdr_value in message.headers.items(): + hdr_name = hdr_name.upper() + if hdr_name == 'CONTENT-TYPE': + environ['CONTENT_TYPE'] = hdr_value + continue + elif hdr_name == 'CONTENT-LENGTH': + environ['CONTENT_LENGTH'] = hdr_value + continue + + key = 'HTTP_%s' % hdr_name.replace('-', '_') + if key in environ: + hdr_value = f'{environ[key]},{hdr_value}' + + environ[key] = hdr_value + + environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http') + + path_info = uri_parts.path + + environ['PATH_INFO'] = path_info + environ['SCRIPT_NAME'] = '' + + return environ + + +def make_response(status, headers, payload, environ): + """This function generates an appropriate response object for this async + mode. + """ + return Response(body=payload, status=int(status.split()[0]), + headers=headers) + + +class WebSocket: # pragma: no cover + """ + This wrapper class provides a aiohttp WebSocket interface that is + somewhat compatible with eventlet's implementation. + """ + def __init__(self, handler, server): + self.handler = handler + self._sock = None + + async def __call__(self, environ): + request = environ['aiohttp.request'] + self._sock = WebSocketResponse(max_msg_size=0) + await self._sock.prepare(request) + + self.environ = environ + await self.handler(self) + return self._sock + + async def close(self): + await self._sock.close() + + async def send(self, message): + if isinstance(message, bytes): + f = self._sock.send_bytes + else: + f = self._sock.send_str + if asyncio.iscoroutinefunction(f): + await f(message) + else: + f(message) + + async def wait(self): + msg = await self._sock.receive() + if not isinstance(msg.data, bytes) and \ + not isinstance(msg.data, str): + raise OSError() + return msg.data + + +_async = { + 'asyncio': True, + 'create_route': create_route, + 'translate_request': translate_request, + 'make_response': make_response, + 'websocket': WebSocket, +} diff --git a/env/lib/python3.10/site-packages/engineio/async_drivers/asgi.py b/env/lib/python3.10/site-packages/engineio/async_drivers/asgi.py new file mode 100644 index 0000000..866033b --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_drivers/asgi.py @@ -0,0 +1,291 @@ +import os +import sys +import asyncio + +from engineio.static_files import get_static_file + + +class ASGIApp: + """ASGI application middleware for Engine.IO. + + This middleware dispatches traffic to an Engine.IO application. It can + also serve a list of static files to the client, or forward unrelated + HTTP traffic to another ASGI application. + + :param engineio_server: The Engine.IO server. Must be an instance of the + ``engineio.AsyncServer`` class. + :param static_files: A dictionary with static file mapping rules. See the + documentation for details on this argument. + :param other_asgi_app: A separate ASGI app that receives all other traffic. + :param engineio_path: The endpoint where the Engine.IO application should + be installed. The default value is appropriate for + most cases. With a value of ``None``, all incoming + traffic is directed to the Engine.IO server, with the + assumption that routing, if necessary, is handled by + a different layer. When this option is set to + ``None``, ``static_files`` and ``other_asgi_app`` are + ignored. + :param on_startup: function to be called on application startup; can be + coroutine + :param on_shutdown: function to be called on application shutdown; can be + coroutine + + Example usage:: + + import engineio + import uvicorn + + eio = engineio.AsyncServer() + app = engineio.ASGIApp(eio, static_files={ + '/': {'content_type': 'text/html', 'filename': 'index.html'}, + '/index.html': {'content_type': 'text/html', + 'filename': 'index.html'}, + }) + uvicorn.run(app, '127.0.0.1', 5000) + """ + def __init__(self, engineio_server, other_asgi_app=None, + static_files=None, engineio_path='engine.io', + on_startup=None, on_shutdown=None): + self.engineio_server = engineio_server + self.other_asgi_app = other_asgi_app + self.engineio_path = engineio_path + if self.engineio_path is not None: + if not self.engineio_path.startswith('/'): + self.engineio_path = '/' + self.engineio_path + if not self.engineio_path.endswith('/'): + self.engineio_path += '/' + self.static_files = static_files or {} + self.on_startup = on_startup + self.on_shutdown = on_shutdown + + async def __call__(self, scope, receive, send): + if scope['type'] == 'lifespan': + await self.lifespan(scope, receive, send) + elif scope['type'] in ['http', 'websocket'] and ( + self.engineio_path is None + or self._ensure_trailing_slash(scope['path']).startswith( + self.engineio_path)): + await self.engineio_server.handle_request(scope, receive, send) + else: + static_file = get_static_file(scope['path'], self.static_files) \ + if scope['type'] == 'http' and self.static_files else None + if static_file and os.path.exists(static_file['filename']): + await self.serve_static_file(static_file, receive, send) + elif self.other_asgi_app is not None: + await self.other_asgi_app(scope, receive, send) + else: + await self.not_found(receive, send) + + async def serve_static_file(self, static_file, receive, + send): # pragma: no cover + event = await receive() + if event['type'] == 'http.request': + with open(static_file['filename'], 'rb') as f: + payload = f.read() + await send({'type': 'http.response.start', + 'status': 200, + 'headers': [(b'Content-Type', static_file[ + 'content_type'].encode('utf-8'))]}) + await send({'type': 'http.response.body', + 'body': payload}) + + async def lifespan(self, scope, receive, send): + if self.other_asgi_app is not None and self.on_startup is None and \ + self.on_shutdown is None: + # let the other ASGI app handle lifespan events + await self.other_asgi_app(scope, receive, send) + return + + while True: + event = await receive() + if event['type'] == 'lifespan.startup': + if self.on_startup: + try: + await self.on_startup() \ + if asyncio.iscoroutinefunction(self.on_startup) \ + else self.on_startup() + except: + await send({'type': 'lifespan.startup.failed'}) + return + await send({'type': 'lifespan.startup.complete'}) + elif event['type'] == 'lifespan.shutdown': + if self.on_shutdown: + try: + await self.on_shutdown() \ + if asyncio.iscoroutinefunction(self.on_shutdown) \ + else self.on_shutdown() + except: + await send({'type': 'lifespan.shutdown.failed'}) + return + await send({'type': 'lifespan.shutdown.complete'}) + return + + async def not_found(self, receive, send): + """Return a 404 Not Found error to the client.""" + await send({'type': 'http.response.start', + 'status': 404, + 'headers': [(b'Content-Type', b'text/plain')]}) + await send({'type': 'http.response.body', + 'body': b'Not Found'}) + + def _ensure_trailing_slash(self, path): + if not path.endswith('/'): + path += '/' + return path + + +async def translate_request(scope, receive, send): + class AwaitablePayload: # pragma: no cover + def __init__(self, payload): + self.payload = payload or b'' + + async def read(self, length=None): + if length is None: + r = self.payload + self.payload = b'' + else: + r = self.payload[:length] + self.payload = self.payload[length:] + return r + + event = await receive() + payload = b'' + if event['type'] == 'http.request': + payload += event.get('body') or b'' + while event.get('more_body'): + event = await receive() + if event['type'] == 'http.request': + payload += event.get('body') or b'' + elif event['type'] == 'websocket.connect': + pass + else: + return {} + + raw_uri = scope['path'] + query_string = '' + if 'query_string' in scope and scope['query_string']: + try: + query_string = scope['query_string'].decode('utf-8') + except UnicodeDecodeError: + pass + else: + raw_uri += '?' + query_string + environ = { + 'wsgi.input': AwaitablePayload(payload), + 'wsgi.errors': sys.stderr, + 'wsgi.version': (1, 0), + 'wsgi.async': True, + 'wsgi.multithread': False, + 'wsgi.multiprocess': False, + 'wsgi.run_once': False, + 'SERVER_SOFTWARE': 'asgi', + 'REQUEST_METHOD': scope.get('method', 'GET'), + 'PATH_INFO': scope['path'], + 'QUERY_STRING': query_string, + 'RAW_URI': raw_uri, + 'SCRIPT_NAME': '', + 'SERVER_PROTOCOL': 'HTTP/1.1', + 'REMOTE_ADDR': '127.0.0.1', + 'REMOTE_PORT': '0', + 'SERVER_NAME': 'asgi', + 'SERVER_PORT': '0', + 'asgi.receive': receive, + 'asgi.send': send, + 'asgi.scope': scope, + } + + for hdr_name, hdr_value in scope['headers']: + try: + hdr_name = hdr_name.upper().decode('utf-8') + hdr_value = hdr_value.decode('utf-8') + except UnicodeDecodeError: + # skip header if it cannot be decoded + continue + if hdr_name == 'CONTENT-TYPE': + environ['CONTENT_TYPE'] = hdr_value + continue + elif hdr_name == 'CONTENT-LENGTH': + environ['CONTENT_LENGTH'] = hdr_value + continue + + key = 'HTTP_%s' % hdr_name.replace('-', '_') + if key in environ: + hdr_value = f'{environ[key]},{hdr_value}' + + environ[key] = hdr_value + + environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http') + return environ + + +async def make_response(status, headers, payload, environ): + headers = [(h[0].encode('utf-8'), h[1].encode('utf-8')) for h in headers] + if environ['asgi.scope']['type'] == 'websocket': + if status.startswith('200 '): + await environ['asgi.send']({'type': 'websocket.accept', + 'headers': headers}) + else: + if payload: + reason = payload.decode('utf-8') \ + if isinstance(payload, bytes) else str(payload) + await environ['asgi.send']({'type': 'websocket.close', + 'reason': reason}) + else: + await environ['asgi.send']({'type': 'websocket.close'}) + return + + await environ['asgi.send']({'type': 'http.response.start', + 'status': int(status.split(' ')[0]), + 'headers': headers}) + await environ['asgi.send']({'type': 'http.response.body', + 'body': payload}) + + +class WebSocket: # pragma: no cover + """ + This wrapper class provides an asgi WebSocket interface that is + somewhat compatible with eventlet's implementation. + """ + def __init__(self, handler, server): + self.handler = handler + self.asgi_receive = None + self.asgi_send = None + + async def __call__(self, environ): + self.asgi_receive = environ['asgi.receive'] + self.asgi_send = environ['asgi.send'] + await self.asgi_send({'type': 'websocket.accept'}) + await self.handler(self) + return '' # send nothing as response + + async def close(self): + try: + await self.asgi_send({'type': 'websocket.close'}) + except Exception: + # if the socket is already close we don't care + pass + + async def send(self, message): + msg_bytes = None + msg_text = None + if isinstance(message, bytes): + msg_bytes = message + else: + msg_text = message + await self.asgi_send({'type': 'websocket.send', + 'bytes': msg_bytes, + 'text': msg_text}) + + async def wait(self): + event = await self.asgi_receive() + if event['type'] != 'websocket.receive': + raise OSError() + return event.get('bytes') or event.get('text') + + +_async = { + 'asyncio': True, + 'translate_request': translate_request, + 'make_response': make_response, + 'websocket': WebSocket, +} diff --git a/env/lib/python3.10/site-packages/engineio/async_drivers/eventlet.py b/env/lib/python3.10/site-packages/engineio/async_drivers/eventlet.py new file mode 100644 index 0000000..6361c4d --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_drivers/eventlet.py @@ -0,0 +1,52 @@ +from eventlet.green.threading import Event +from eventlet import queue, sleep, spawn +from eventlet.websocket import WebSocketWSGI as _WebSocketWSGI + + +class EventletThread: # pragma: no cover + """Thread class that uses eventlet green threads. + + Eventlet's own Thread class has a strange bug that causes _DummyThread + objects to be created and leaked, since they are never garbage collected. + """ + def __init__(self, target, args=None, kwargs=None): + self.target = target + self.args = args or () + self.kwargs = kwargs or {} + self.g = None + + def start(self): + self.g = spawn(self.target, *self.args, **self.kwargs) + + def join(self): + if self.g: + return self.g.wait() + + +class WebSocketWSGI(_WebSocketWSGI): # pragma: no cover + def __init__(self, handler, server): + try: + super().__init__( + handler, max_frame_length=int(server.max_http_buffer_size)) + except TypeError: # pragma: no cover + # older versions of eventlet do not support a max frame size + super().__init__(handler) + self._sock = None + + def __call__(self, environ, start_response): + if 'eventlet.input' not in environ: + raise RuntimeError('You need to use the eventlet server. ' + 'See the Deployment section of the ' + 'documentation for more information.') + self._sock = environ['eventlet.input'].get_socket() + return super().__call__(environ, start_response) + + +_async = { + 'thread': EventletThread, + 'queue': queue.Queue, + 'queue_empty': queue.Empty, + 'event': Event, + 'websocket': WebSocketWSGI, + 'sleep': sleep, +} diff --git a/env/lib/python3.10/site-packages/engineio/async_drivers/gevent.py b/env/lib/python3.10/site-packages/engineio/async_drivers/gevent.py new file mode 100644 index 0000000..db284a5 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_drivers/gevent.py @@ -0,0 +1,83 @@ +import gevent +from gevent import queue +from gevent.event import Event +try: + # use gevent-websocket if installed + import geventwebsocket # noqa + SimpleWebSocketWSGI = None +except ImportError: # pragma: no cover + # fallback to simple_websocket when gevent-websocket is not installed + from engineio.async_drivers._websocket_wsgi import SimpleWebSocketWSGI + + +class Thread(gevent.Greenlet): # pragma: no cover + """ + This wrapper class provides gevent Greenlet interface that is compatible + with the standard library's Thread class. + """ + def __init__(self, target, args=[], kwargs={}): + super().__init__(target, *args, **kwargs) + + def _run(self): + return self.run() + + +if SimpleWebSocketWSGI is not None: + class WebSocketWSGI(SimpleWebSocketWSGI): # pragma: no cover + """ + This wrapper class provides a gevent WebSocket interface that is + compatible with eventlet's implementation, using the simple-websocket + package. + """ + def __init__(self, handler, server): + # to avoid the requirement that the standard library is + # monkey-patched, here we pass the gevent versions of the + # concurrency and networking classes required by simple-websocket + import gevent.event + import gevent.selectors + super().__init__(handler, server, + thread_class=Thread, + event_class=gevent.event.Event, + selector_class=gevent.selectors.DefaultSelector) +else: + class WebSocketWSGI: # pragma: no cover + """ + This wrapper class provides a gevent WebSocket interface that is + compatible with eventlet's implementation, using the gevent-websocket + package. + """ + def __init__(self, handler, server): + self.app = handler + + def __call__(self, environ, start_response): + if 'wsgi.websocket' not in environ: + raise RuntimeError('The gevent-websocket server is not ' + 'configured appropriately. ' + 'See the Deployment section of the ' + 'documentation for more information.') + self._sock = environ['wsgi.websocket'] + self.environ = environ + self.version = self._sock.version + self.path = self._sock.path + self.origin = self._sock.origin + self.protocol = self._sock.protocol + return self.app(self) + + def close(self): + return self._sock.close() + + def send(self, message): + return self._sock.send(message) + + def wait(self): + return self._sock.receive() + + +_async = { + 'thread': Thread, + 'queue': queue.JoinableQueue, + 'queue_empty': queue.Empty, + 'event': Event, + 'websocket': WebSocketWSGI, + 'sleep': gevent.sleep, +} diff --git a/env/lib/python3.10/site-packages/engineio/async_drivers/gevent_uwsgi.py b/env/lib/python3.10/site-packages/engineio/async_drivers/gevent_uwsgi.py new file mode 100644 index 0000000..b5ccefc --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_drivers/gevent_uwsgi.py @@ -0,0 +1,168 @@ +import gevent +from gevent import queue +from gevent.event import Event +from gevent import selectors +import uwsgi +_websocket_available = hasattr(uwsgi, 'websocket_handshake') + + +class Thread(gevent.Greenlet): # pragma: no cover + """ + This wrapper class provides gevent Greenlet interface that is compatible + with the standard library's Thread class. + """ + def __init__(self, target, args=[], kwargs={}): + super().__init__(target, *args, **kwargs) + + def _run(self): + return self.run() + + +class uWSGIWebSocket: # pragma: no cover + """ + This wrapper class provides a uWSGI WebSocket interface that is + compatible with eventlet's implementation. + """ + def __init__(self, handler, server): + self.app = handler + self._sock = None + self.received_messages = [] + + def __call__(self, environ, start_response): + self._sock = uwsgi.connection_fd() + self.environ = environ + + uwsgi.websocket_handshake() + + self._req_ctx = None + if hasattr(uwsgi, 'request_context'): + # uWSGI >= 2.1.x with support for api access across-greenlets + self._req_ctx = uwsgi.request_context() + else: + # use event and queue for sending messages + self._event = Event() + self._send_queue = queue.Queue() + + # spawn a select greenlet + def select_greenlet_runner(fd, event): + """Sets event when data becomes available to read on fd.""" + sel = selectors.DefaultSelector() + sel.register(fd, selectors.EVENT_READ) + try: + while True: + sel.select() + event.set() + except gevent.GreenletExit: + sel.unregister(fd) + self._select_greenlet = gevent.spawn( + select_greenlet_runner, + self._sock, + self._event) + + self.app(self) + uwsgi.disconnect() + return '' # send nothing as response + + def close(self): + """Disconnects uWSGI from the client.""" + if self._req_ctx is None: + # better kill it here in case wait() is not called again + self._select_greenlet.kill() + self._event.set() + + def _send(self, msg): + """Transmits message either in binary or UTF-8 text mode, + depending on its type.""" + if isinstance(msg, bytes): + method = uwsgi.websocket_send_binary + else: + method = uwsgi.websocket_send + if self._req_ctx is not None: + method(msg, request_context=self._req_ctx) + else: + method(msg) + + def _decode_received(self, msg): + """Returns either bytes or str, depending on message type.""" + if not isinstance(msg, bytes): + # already decoded - do nothing + return msg + # only decode from utf-8 if message is not binary data + type = ord(msg[0:1]) + if type >= 48: # no binary + return msg.decode('utf-8') + # binary message, don't try to decode + return msg + + def send(self, msg): + """Queues a message for sending. Real transmission is done in + wait method. + Sends directly if uWSGI version is new enough.""" + if self._req_ctx is not None: + self._send(msg) + else: + self._send_queue.put(msg) + self._event.set() + + def wait(self): + """Waits and returns received messages. + If running in compatibility mode for older uWSGI versions, + it also sends messages that have been queued by send(). + A return value of None means that connection was closed. + This must be called repeatedly. For uWSGI < 2.1.x it must + be called from the main greenlet.""" + while True: + if self._req_ctx is not None: + try: + msg = uwsgi.websocket_recv(request_context=self._req_ctx) + except OSError: # connection closed + self.close() + return None + return self._decode_received(msg) + else: + if self.received_messages: + return self.received_messages.pop(0) + + # we wake up at least every 3 seconds to let uWSGI + # do its ping/ponging + event_set = self._event.wait(timeout=3) + if event_set: + self._event.clear() + # maybe there is something to send + msgs = [] + while True: + try: + msgs.append(self._send_queue.get(block=False)) + except gevent.queue.Empty: + break + for msg in msgs: + try: + self._send(msg) + except OSError: + self.close() + return None + # maybe there is something to receive, if not, at least + # ensure uWSGI does its ping/ponging + while True: + try: + msg = uwsgi.websocket_recv_nb() + except OSError: # connection closed + self.close() + return None + if msg: # message available + self.received_messages.append( + self._decode_received(msg)) + else: + break + if self.received_messages: + return self.received_messages.pop(0) + + +_async = { + 'thread': Thread, + 'queue': queue.JoinableQueue, + 'queue_empty': queue.Empty, + 'event': Event, + 'websocket': uWSGIWebSocket if _websocket_available else None, + 'sleep': gevent.sleep, +} diff --git a/env/lib/python3.10/site-packages/engineio/async_drivers/sanic.py b/env/lib/python3.10/site-packages/engineio/async_drivers/sanic.py new file mode 100644 index 0000000..4d6a5b8 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_drivers/sanic.py @@ -0,0 +1,148 @@ +import sys +from urllib.parse import urlsplit + +try: # pragma: no cover + from sanic.response import HTTPResponse + try: + from sanic.server.protocols.websocket_protocol import WebSocketProtocol + except ImportError: + from sanic.websocket import WebSocketProtocol +except ImportError: + HTTPResponse = None + WebSocketProtocol = None + + +def create_route(app, engineio_server, engineio_endpoint): # pragma: no cover + """This function sets up the engine.io endpoint as a route for the + application. + + Note that both GET and POST requests must be hooked up on the engine.io + endpoint. + """ + app.add_route(engineio_server.handle_request, engineio_endpoint, + methods=['GET', 'POST', 'OPTIONS']) + try: + app.enable_websocket() + except AttributeError: + # ignore, this version does not support websocket + pass + + +def translate_request(request): # pragma: no cover + """This function takes the arguments passed to the request handler and + uses them to generate a WSGI compatible environ dictionary. + """ + class AwaitablePayload: + def __init__(self, payload): + self.payload = payload or b'' + + async def read(self, length=None): + if length is None: + r = self.payload + self.payload = b'' + else: + r = self.payload[:length] + self.payload = self.payload[length:] + return r + + uri_parts = urlsplit(request.url) + environ = { + 'wsgi.input': AwaitablePayload(request.body), + 'wsgi.errors': sys.stderr, + 'wsgi.version': (1, 0), + 'wsgi.async': True, + 'wsgi.multithread': False, + 'wsgi.multiprocess': False, + 'wsgi.run_once': False, + 'SERVER_SOFTWARE': 'sanic', + 'REQUEST_METHOD': request.method, + 'QUERY_STRING': uri_parts.query or '', + 'RAW_URI': request.url, + 'SERVER_PROTOCOL': 'HTTP/' + request.version, + 'REMOTE_ADDR': '127.0.0.1', + 'REMOTE_PORT': '0', + 'SERVER_NAME': 'sanic', + 'SERVER_PORT': '0', + 'sanic.request': request + } + + for hdr_name, hdr_value in request.headers.items(): + hdr_name = hdr_name.upper() + if hdr_name == 'CONTENT-TYPE': + environ['CONTENT_TYPE'] = hdr_value + continue + elif hdr_name == 'CONTENT-LENGTH': + environ['CONTENT_LENGTH'] = hdr_value + continue + + key = 'HTTP_%s' % hdr_name.replace('-', '_') + if key in environ: + hdr_value = f'{environ[key]},{hdr_value}' + + environ[key] = hdr_value + + environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http') + + path_info = uri_parts.path + + environ['PATH_INFO'] = path_info + environ['SCRIPT_NAME'] = '' + + return environ + + +def make_response(status, headers, payload, environ): # pragma: no cover + """This function generates an appropriate response object for this async + mode. + """ + headers_dict = {} + content_type = None + for h in headers: + if h[0].lower() == 'content-type': + content_type = h[1] + else: + headers_dict[h[0]] = h[1] + return HTTPResponse(body=payload, content_type=content_type, + status=int(status.split()[0]), headers=headers_dict) + + +class WebSocket: # pragma: no cover + """ + This wrapper class provides a sanic WebSocket interface that is + somewhat compatible with eventlet's implementation. + """ + def __init__(self, handler, server): + self.handler = handler + self.server = server + self._sock = None + + async def __call__(self, environ): + request = environ['sanic.request'] + protocol = request.transport.get_protocol() + self._sock = await protocol.websocket_handshake(request) + + self.environ = environ + await self.handler(self) + return self.server._ok() + + async def close(self): + await self._sock.close() + + async def send(self, message): + await self._sock.send(message) + + async def wait(self): + data = await self._sock.recv() + if not isinstance(data, bytes) and \ + not isinstance(data, str): + raise OSError() + return data + + +_async = { + 'asyncio': True, + 'create_route': create_route, + 'translate_request': translate_request, + 'make_response': make_response, + 'websocket': WebSocket if WebSocketProtocol else None, +} diff --git a/env/lib/python3.10/site-packages/engineio/async_drivers/threading.py b/env/lib/python3.10/site-packages/engineio/async_drivers/threading.py new file mode 100644 index 0000000..1615579 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_drivers/threading.py @@ -0,0 +1,19 @@ +import queue +import threading +import time +from engineio.async_drivers._websocket_wsgi import SimpleWebSocketWSGI + + +class DaemonThread(threading.Thread): # pragma: no cover + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs, daemon=True) + + +_async = { + 'thread': DaemonThread, + 'queue': queue.Queue, + 'queue_empty': queue.Empty, + 'event': threading.Event, + 'websocket': SimpleWebSocketWSGI, + 'sleep': time.sleep, +} diff --git a/env/lib/python3.10/site-packages/engineio/async_drivers/tornado.py b/env/lib/python3.10/site-packages/engineio/async_drivers/tornado.py new file mode 100644 index 0000000..abb1e2b --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_drivers/tornado.py @@ -0,0 +1,182 @@ +import asyncio +import sys +from urllib.parse import urlsplit +from .. import exceptions + +import tornado.web +import tornado.websocket + + +def get_tornado_handler(engineio_server): + class Handler(tornado.websocket.WebSocketHandler): # pragma: no cover + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if isinstance(engineio_server.cors_allowed_origins, str): + if engineio_server.cors_allowed_origins == '*': + self.allowed_origins = None + else: + self.allowed_origins = [ + engineio_server.cors_allowed_origins] + else: + self.allowed_origins = engineio_server.cors_allowed_origins + self.receive_queue = asyncio.Queue() + + async def get(self, *args, **kwargs): + if self.request.headers.get('Upgrade', '').lower() == 'websocket': + ret = super().get(*args, **kwargs) + if asyncio.iscoroutine(ret): + await ret + else: + await engineio_server.handle_request(self) + + async def open(self, *args, **kwargs): + # this is the handler for the websocket request + asyncio.ensure_future(engineio_server.handle_request(self)) + + async def post(self, *args, **kwargs): + await engineio_server.handle_request(self) + + async def options(self, *args, **kwargs): + await engineio_server.handle_request(self) + + async def on_message(self, message): + await self.receive_queue.put(message) + + async def get_next_message(self): + return await self.receive_queue.get() + + def on_close(self): + self.receive_queue.put_nowait(None) + + def check_origin(self, origin): + if self.allowed_origins is None or origin in self.allowed_origins: + return True + return super().check_origin(origin) + + def get_compression_options(self): + # enable compression + return {} + + return Handler + + +def translate_request(handler): + """This function takes the arguments passed to the request handler and + uses them to generate a WSGI compatible environ dictionary. + """ + class AwaitablePayload: + def __init__(self, payload): + self.payload = payload or b'' + + async def read(self, length=None): + if length is None: + r = self.payload + self.payload = b'' + else: + r = self.payload[:length] + self.payload = self.payload[length:] + return r + + payload = handler.request.body + + uri_parts = urlsplit(handler.request.path) + full_uri = handler.request.path + if handler.request.query: # pragma: no cover + full_uri += '?' + handler.request.query + environ = { + 'wsgi.input': AwaitablePayload(payload), + 'wsgi.errors': sys.stderr, + 'wsgi.version': (1, 0), + 'wsgi.async': True, + 'wsgi.multithread': False, + 'wsgi.multiprocess': False, + 'wsgi.run_once': False, + 'SERVER_SOFTWARE': 'aiohttp', + 'REQUEST_METHOD': handler.request.method, + 'QUERY_STRING': handler.request.query or '', + 'RAW_URI': full_uri, + 'SERVER_PROTOCOL': 'HTTP/%s' % handler.request.version, + 'REMOTE_ADDR': '127.0.0.1', + 'REMOTE_PORT': '0', + 'SERVER_NAME': 'aiohttp', + 'SERVER_PORT': '0', + 'tornado.handler': handler + } + + for hdr_name, hdr_value in handler.request.headers.items(): + hdr_name = hdr_name.upper() + if hdr_name == 'CONTENT-TYPE': + environ['CONTENT_TYPE'] = hdr_value + continue + elif hdr_name == 'CONTENT-LENGTH': + environ['CONTENT_LENGTH'] = hdr_value + continue + + key = 'HTTP_%s' % hdr_name.replace('-', '_') + environ[key] = hdr_value + + environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http') + + path_info = uri_parts.path + + environ['PATH_INFO'] = path_info + environ['SCRIPT_NAME'] = '' + + return environ + + +def make_response(status, headers, payload, environ): + """This function generates an appropriate response object for this async + mode. + """ + tornado_handler = environ['tornado.handler'] + try: + tornado_handler.set_status(int(status.split()[0])) + except RuntimeError: # pragma: no cover + # for websocket connections Tornado does not accept a response, since + # it already emitted the 101 status code + return + for header, value in headers: + tornado_handler.set_header(header, value) + tornado_handler.write(payload) + tornado_handler.finish() + + +class WebSocket: # pragma: no cover + """ + This wrapper class provides a tornado WebSocket interface that is + somewhat compatible with eventlet's implementation. + """ + def __init__(self, handler, server): + self.handler = handler + self.tornado_handler = None + + async def __call__(self, environ): + self.tornado_handler = environ['tornado.handler'] + self.environ = environ + await self.handler(self) + + async def close(self): + self.tornado_handler.close() + + async def send(self, message): + try: + self.tornado_handler.write_message( + message, binary=isinstance(message, bytes)) + except tornado.websocket.WebSocketClosedError: + raise exceptions.EngineIOError() + + async def wait(self): + msg = await self.tornado_handler.get_next_message() + if not isinstance(msg, bytes) and \ + not isinstance(msg, str): + raise OSError() + return msg + + +_async = { + 'asyncio': True, + 'translate_request': translate_request, + 'make_response': make_response, + 'websocket': WebSocket, +} diff --git a/env/lib/python3.10/site-packages/engineio/async_server.py b/env/lib/python3.10/site-packages/engineio/async_server.py new file mode 100644 index 0000000..4bf28f2 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_server.py @@ -0,0 +1,611 @@ +import asyncio +import urllib + +from . import base_server +from . import exceptions +from . import packet +from . import async_socket + +# this set is used to keep references to background tasks to prevent them from +# being garbage collected mid-execution. Solution taken from +# https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task +task_reference_holder = set() + + +class AsyncServer(base_server.BaseServer): + """An Engine.IO server for asyncio. + + This class implements a fully compliant Engine.IO web server with support + for websocket and long-polling transports, compatible with the asyncio + framework on Python 3.5 or newer. + + :param async_mode: The asynchronous model to use. See the Deployment + section in the documentation for a description of the + available options. Valid async modes are "aiohttp", + "sanic", "tornado" and "asgi". If this argument is not + given, "aiohttp" is tried first, followed by "sanic", + "tornado", and finally "asgi". The first async mode that + has all its dependencies installed is the one that is + chosen. + :param ping_interval: The interval in seconds at which the server pings + the client. The default is 25 seconds. For advanced + control, a two element tuple can be given, where + the first number is the ping interval and the second + is a grace period added by the server. + :param ping_timeout: The time in seconds that the client waits for the + server to respond before disconnecting. The default + is 20 seconds. + :param max_http_buffer_size: The maximum size that is accepted for incoming + messages. The default is 1,000,000 bytes. In + spite of its name, the value set in this + argument is enforced for HTTP long-polling and + WebSocket connections. + :param allow_upgrades: Whether to allow transport upgrades or not. + :param http_compression: Whether to compress packages when using the + polling transport. + :param compression_threshold: Only compress messages when their byte size + is greater than this value. + :param cookie: If set to a string, it is the name of the HTTP cookie the + server sends back tot he client containing the client + session id. If set to a dictionary, the ``'name'`` key + contains the cookie name and other keys define cookie + attributes, where the value of each attribute can be a + string, a callable with no arguments, or a boolean. If set + to ``None`` (the default), a cookie is not sent to the + client. + :param cors_allowed_origins: Origin or list of origins that are allowed to + connect to this server. Only the same origin + is allowed by default. Set this argument to + ``'*'`` to allow all origins, or to ``[]`` to + disable CORS handling. + :param cors_credentials: Whether credentials (cookies, authentication) are + allowed in requests to this server. + :param logger: To enable logging set to ``True`` or pass a logger object to + use. To disable logging set to ``False``. Note that fatal + errors are logged even when ``logger`` is ``False``. + :param json: An alternative json module to use for encoding and decoding + packets. Custom json modules must have ``dumps`` and ``loads`` + functions that are compatible with the standard library + versions. + :param async_handlers: If set to ``True``, run message event handlers in + non-blocking threads. To run handlers synchronously, + set to ``False``. The default is ``True``. + :param monitor_clients: If set to ``True``, a background task will ensure + inactive clients are closed. Set to ``False`` to + disable the monitoring task (not recommended). The + default is ``True``. + :param transports: The list of allowed transports. Valid transports + are ``'polling'`` and ``'websocket'``. Defaults to + ``['polling', 'websocket']``. + :param kwargs: Reserved for future extensions, any additional parameters + given as keyword arguments will be silently ignored. + """ + def is_asyncio_based(self): + return True + + def async_modes(self): + return ['aiohttp', 'sanic', 'tornado', 'asgi'] + + def attach(self, app, engineio_path='engine.io'): + """Attach the Engine.IO server to an application.""" + engineio_path = engineio_path.strip('/') + self._async['create_route'](app, self, f'/{engineio_path}/') + + async def send(self, sid, data): + """Send a message to a client. + + :param sid: The session id of the recipient client. + :param data: The data to send to the client. Data can be of type + ``str``, ``bytes``, ``list`` or ``dict``. If a ``list`` + or ``dict``, the data will be serialized as JSON. + + Note: this method is a coroutine. + """ + await self.send_packet(sid, packet.Packet(packet.MESSAGE, data=data)) + + async def send_packet(self, sid, pkt): + """Send a raw packet to a client. + + :param sid: The session id of the recipient client. + :param pkt: The packet to send to the client. + + Note: this method is a coroutine. + """ + try: + socket = self._get_socket(sid) + except KeyError: + # the socket is not available + self.logger.warning('Cannot send to sid %s', sid) + return + await socket.send(pkt) + + async def get_session(self, sid): + """Return the user session for a client. + + :param sid: The session id of the client. + + The return value is a dictionary. Modifications made to this + dictionary are not guaranteed to be preserved. If you want to modify + the user session, use the ``session`` context manager instead. + """ + socket = self._get_socket(sid) + return socket.session + + async def save_session(self, sid, session): + """Store the user session for a client. + + :param sid: The session id of the client. + :param session: The session dictionary. + """ + socket = self._get_socket(sid) + socket.session = session + + def session(self, sid): + """Return the user session for a client with context manager syntax. + + :param sid: The session id of the client. + + This is a context manager that returns the user session dictionary for + the client. Any changes that are made to this dictionary inside the + context manager block are saved back to the session. Example usage:: + + @eio.on('connect') + def on_connect(sid, environ): + username = authenticate_user(environ) + if not username: + return False + with eio.session(sid) as session: + session['username'] = username + + @eio.on('message') + def on_message(sid, msg): + async with eio.session(sid) as session: + print('received message from ', session['username']) + """ + class _session_context_manager: + def __init__(self, server, sid): + self.server = server + self.sid = sid + self.session = None + + async def __aenter__(self): + self.session = await self.server.get_session(sid) + return self.session + + async def __aexit__(self, *args): + await self.server.save_session(sid, self.session) + + return _session_context_manager(self, sid) + + async def disconnect(self, sid=None): + """Disconnect a client. + + :param sid: The session id of the client to close. If this parameter + is not given, then all clients are closed. + + Note: this method is a coroutine. + """ + if sid is not None: + try: + socket = self._get_socket(sid) + except KeyError: # pragma: no cover + # the socket was already closed or gone + pass + else: + await socket.close(reason=self.reason.SERVER_DISCONNECT) + if sid in self.sockets: # pragma: no cover + del self.sockets[sid] + else: + await asyncio.wait([ + asyncio.create_task(client.close( + reason=self.reason.SERVER_DISCONNECT)) + for client in self.sockets.values() + ]) + self.sockets = {} + + async def handle_request(self, *args, **kwargs): + """Handle an HTTP request from the client. + + This is the entry point of the Engine.IO application. This function + returns the HTTP response to deliver to the client. + + Note: this method is a coroutine. + """ + translate_request = self._async['translate_request'] + if asyncio.iscoroutinefunction(translate_request): + environ = await translate_request(*args, **kwargs) + else: + environ = translate_request(*args, **kwargs) + + if self.cors_allowed_origins != []: + # Validate the origin header if present + # This is important for WebSocket more than for HTTP, since + # browsers only apply CORS controls to HTTP. + origin = environ.get('HTTP_ORIGIN') + if origin: + allowed_origins = self._cors_allowed_origins(environ) + if allowed_origins is not None and origin not in \ + allowed_origins: + self._log_error_once( + origin + ' is not an accepted origin.', 'bad-origin') + return await self._make_response( + self._bad_request( + origin + ' is not an accepted origin.'), + environ) + + method = environ['REQUEST_METHOD'] + query = urllib.parse.parse_qs(environ.get('QUERY_STRING', '')) + + sid = query['sid'][0] if 'sid' in query else None + jsonp = False + jsonp_index = None + + # make sure the client uses an allowed transport + transport = query.get('transport', ['polling'])[0] + if transport not in self.transports: + self._log_error_once('Invalid transport', 'bad-transport') + return await self._make_response( + self._bad_request('Invalid transport'), environ) + + # make sure the client speaks a compatible Engine.IO version + sid = query['sid'][0] if 'sid' in query else None + if sid is None and query.get('EIO') != ['4']: + self._log_error_once( + 'The client is using an unsupported version of the Socket.IO ' + 'or Engine.IO protocols', 'bad-version' + ) + return await self._make_response(self._bad_request( + 'The client is using an unsupported version of the Socket.IO ' + 'or Engine.IO protocols' + ), environ) + + if 'j' in query: + jsonp = True + try: + jsonp_index = int(query['j'][0]) + except (ValueError, KeyError, IndexError): + # Invalid JSONP index number + pass + + if jsonp and jsonp_index is None: + self._log_error_once('Invalid JSONP index number', + 'bad-jsonp-index') + r = self._bad_request('Invalid JSONP index number') + elif method == 'GET': + upgrade_header = environ.get('HTTP_UPGRADE').lower() \ + if 'HTTP_UPGRADE' in environ else None + if sid is None: + # transport must be one of 'polling' or 'websocket'. + # if 'websocket', the HTTP_UPGRADE header must match. + if transport == 'polling' \ + or transport == upgrade_header == 'websocket': + r = await self._handle_connect(environ, transport, + jsonp_index) + else: + self._log_error_once('Invalid websocket upgrade', + 'bad-upgrade') + r = self._bad_request('Invalid websocket upgrade') + else: + if sid not in self.sockets: + self._log_error_once(f'Invalid session {sid}', 'bad-sid') + r = self._bad_request(f'Invalid session {sid}') + else: + try: + socket = self._get_socket(sid) + except KeyError as e: # pragma: no cover + self._log_error_once(f'{e} {sid}', 'bad-sid') + r = self._bad_request(f'{e} {sid}') + else: + if self.transport(sid) != transport and \ + transport != upgrade_header: + self._log_error_once( + f'Invalid transport for session {sid}', + 'bad-transport') + r = self._bad_request('Invalid transport') + else: + try: + packets = await socket.handle_get_request( + environ) + if isinstance(packets, list): + r = self._ok(packets, + jsonp_index=jsonp_index) + else: + r = packets + except exceptions.EngineIOError: + if sid in self.sockets: # pragma: no cover + await self.disconnect(sid) + r = self._bad_request() + if sid in self.sockets and \ + self.sockets[sid].closed: + del self.sockets[sid] + elif method == 'POST': + if sid is None or sid not in self.sockets: + self._log_error_once(f'Invalid session {sid}', 'bad-sid') + r = self._bad_request(f'Invalid session {sid}') + else: + socket = self._get_socket(sid) + try: + await socket.handle_post_request(environ) + r = self._ok(jsonp_index=jsonp_index) + except exceptions.EngineIOError: + if sid in self.sockets: # pragma: no cover + await self.disconnect(sid) + r = self._bad_request() + except: # pragma: no cover + # for any other unexpected errors, we log the error + # and keep going + self.logger.exception('post request handler error') + r = self._ok(jsonp_index=jsonp_index) + elif method == 'OPTIONS': + r = self._ok() + else: + self.logger.warning('Method %s not supported', method) + r = self._method_not_found() + if not isinstance(r, dict): + return r + if self.http_compression and \ + len(r['response']) >= self.compression_threshold: + encodings = [e.split(';')[0].strip() for e in + environ.get('HTTP_ACCEPT_ENCODING', '').split(',')] + for encoding in encodings: + if encoding in self.compression_methods: + r['response'] = \ + getattr(self, '_' + encoding)(r['response']) + r['headers'] += [('Content-Encoding', encoding)] + break + return await self._make_response(r, environ) + + async def shutdown(self): + """Stop Socket.IO background tasks. + + This method stops background activity initiated by the Socket.IO + server. It must be called before shutting down the web server. + """ + self.logger.info('Socket.IO is shutting down') + if self.service_task_event: # pragma: no cover + self.service_task_event.set() + await self.service_task_handle + self.service_task_handle = None + + def start_background_task(self, target, *args, **kwargs): + """Start a background task using the appropriate async model. + + This is a utility function that applications can use to start a + background task using the method that is compatible with the + selected async mode. + + :param target: the target function to execute. + :param args: arguments to pass to the function. + :param kwargs: keyword arguments to pass to the function. + + The return value is a ``asyncio.Task`` object. + """ + return asyncio.ensure_future(target(*args, **kwargs)) + + async def sleep(self, seconds=0): + """Sleep for the requested amount of time using the appropriate async + model. + + This is a utility function that applications can use to put a task to + sleep without having to worry about using the correct call for the + selected async mode. + + Note: this method is a coroutine. + """ + return await asyncio.sleep(seconds) + + def create_queue(self, *args, **kwargs): + """Create a queue object using the appropriate async model. + + This is a utility function that applications can use to create a queue + without having to worry about using the correct call for the selected + async mode. For asyncio based async modes, this returns an instance of + ``asyncio.Queue``. + """ + return asyncio.Queue(*args, **kwargs) + + def get_queue_empty_exception(self): + """Return the queue empty exception for the appropriate async model. + + This is a utility function that applications can use to work with a + queue without having to worry about using the correct call for the + selected async mode. For asyncio based async modes, this returns an + instance of ``asyncio.QueueEmpty``. + """ + return asyncio.QueueEmpty + + def create_event(self, *args, **kwargs): + """Create an event object using the appropriate async model. + + This is a utility function that applications can use to create an + event without having to worry about using the correct call for the + selected async mode. For asyncio based async modes, this returns + an instance of ``asyncio.Event``. + """ + return asyncio.Event(*args, **kwargs) + + async def _make_response(self, response_dict, environ): + cors_headers = self._cors_headers(environ) + make_response = self._async['make_response'] + if asyncio.iscoroutinefunction(make_response): + response = await make_response( + response_dict['status'], + response_dict['headers'] + cors_headers, + response_dict['response'], environ) + else: + response = make_response( + response_dict['status'], + response_dict['headers'] + cors_headers, + response_dict['response'], environ) + return response + + async def _handle_connect(self, environ, transport, jsonp_index=None): + """Handle a client connection request.""" + if self.start_service_task: + # start the service task to monitor connected clients + self.start_service_task = False + self.service_task_handle = self.start_background_task( + self._service_task) + + sid = self.generate_id() + s = async_socket.AsyncSocket(self, sid) + self.sockets[sid] = s + + pkt = packet.Packet(packet.OPEN, { + 'sid': sid, + 'upgrades': self._upgrades(sid, transport), + 'pingTimeout': int(self.ping_timeout * 1000), + 'pingInterval': int( + self.ping_interval + self.ping_interval_grace_period) * 1000, + 'maxPayload': self.max_http_buffer_size, + }) + await s.send(pkt) + s.schedule_ping() + + ret = await self._trigger_event('connect', sid, environ, + run_async=False) + if ret is not None and ret is not True: + del self.sockets[sid] + self.logger.warning('Application rejected connection') + return self._unauthorized(ret or None) + + if transport == 'websocket': + ret = await s.handle_get_request(environ) + if s.closed and sid in self.sockets: + # websocket connection ended, so we are done + del self.sockets[sid] + return ret + else: + s.connected = True + headers = None + if self.cookie: + if isinstance(self.cookie, dict): + headers = [( + 'Set-Cookie', + self._generate_sid_cookie(sid, self.cookie) + )] + else: + headers = [( + 'Set-Cookie', + self._generate_sid_cookie(sid, { + 'name': self.cookie, 'path': '/', 'SameSite': 'Lax' + }) + )] + try: + return self._ok(await s.poll(), headers=headers, + jsonp_index=jsonp_index) + except exceptions.QueueEmpty: + return self._bad_request() + + async def _trigger_event(self, event, *args, **kwargs): + """Invoke an event handler.""" + run_async = kwargs.pop('run_async', False) + ret = None + if event in self.handlers: + if asyncio.iscoroutinefunction(self.handlers[event]): + async def run_async_handler(): + try: + try: + return await self.handlers[event](*args) + except TypeError: + if event == 'disconnect' and \ + len(args) == 2: # pragma: no branch + # legacy disconnect events do not have a reason + # argument + return await self.handlers[event](args[0]) + else: # pragma: no cover + raise + except asyncio.CancelledError: # pragma: no cover + pass + except: + self.logger.exception(event + ' async handler error') + if event == 'connect': + # if connect handler raised error we reject the + # connection + return False + + if run_async: + ret = self.start_background_task(run_async_handler) + task_reference_holder.add(ret) + ret.add_done_callback(task_reference_holder.discard) + else: + ret = await run_async_handler() + else: + async def run_sync_handler(): + try: + try: + return self.handlers[event](*args) + except TypeError: + if event == 'disconnect' and \ + len(args) == 2: # pragma: no branch + # legacy disconnect events do not have a reason + # argument + return self.handlers[event](args[0]) + else: # pragma: no cover + raise + except: + self.logger.exception(event + ' handler error') + if event == 'connect': + # if connect handler raised error we reject the + # connection + return False + + if run_async: + ret = self.start_background_task(run_sync_handler) + task_reference_holder.add(ret) + ret.add_done_callback(task_reference_holder.discard) + else: + ret = await run_sync_handler() + return ret + + async def _service_task(self): # pragma: no cover + """Monitor connected clients and clean up those that time out.""" + loop = asyncio.get_running_loop() + self.service_task_event = self.create_event() + while not self.service_task_event.is_set(): + if len(self.sockets) == 0: + # nothing to do + try: + await asyncio.wait_for(self.service_task_event.wait(), + timeout=self.ping_timeout) + break + except asyncio.TimeoutError: + continue + + # go through the entire client list in a ping interval cycle + sleep_interval = self.ping_timeout / len(self.sockets) + + try: + # iterate over the current clients + for s in self.sockets.copy().values(): + if s.closed: + try: + del self.sockets[s.sid] + except KeyError: + # the socket could have also been removed by + # the _get_socket() method from another thread + pass + elif not s.closing: + await s.check_ping_timeout() + try: + await asyncio.wait_for(self.service_task_event.wait(), + timeout=sleep_interval) + raise KeyboardInterrupt() + except asyncio.TimeoutError: + continue + except ( + SystemExit, + KeyboardInterrupt, + asyncio.CancelledError, + GeneratorExit, + ): + self.logger.info('service task canceled') + break + except: + if loop.is_closed(): + self.logger.info('event loop is closed, exiting service ' + 'task') + break + + # an unexpected exception has occurred, log it and continue + self.logger.exception('service task exception') diff --git a/env/lib/python3.10/site-packages/engineio/async_socket.py b/env/lib/python3.10/site-packages/engineio/async_socket.py new file mode 100644 index 0000000..cfdbe1a --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/async_socket.py @@ -0,0 +1,261 @@ +import asyncio +import sys +import time + +from . import base_socket +from . import exceptions +from . import packet +from . import payload + + +class AsyncSocket(base_socket.BaseSocket): + async def poll(self): + """Wait for packets to send to the client.""" + try: + packets = [await asyncio.wait_for( + self.queue.get(), + self.server.ping_interval + self.server.ping_timeout)] + self.queue.task_done() + except (asyncio.TimeoutError, asyncio.CancelledError): + raise exceptions.QueueEmpty() + if packets == [None]: + return [] + while True: + try: + pkt = self.queue.get_nowait() + self.queue.task_done() + if pkt is None: + self.queue.put_nowait(None) + break + packets.append(pkt) + except asyncio.QueueEmpty: + break + return packets + + async def receive(self, pkt): + """Receive packet from the client.""" + self.server.logger.info('%s: Received packet %s data %s', + self.sid, packet.packet_names[pkt.packet_type], + pkt.data if not isinstance(pkt.data, bytes) + else '') + if pkt.packet_type == packet.PONG: + self.schedule_ping() + elif pkt.packet_type == packet.MESSAGE: + await self.server._trigger_event( + 'message', self.sid, pkt.data, + run_async=self.server.async_handlers) + elif pkt.packet_type == packet.UPGRADE: + await self.send(packet.Packet(packet.NOOP)) + elif pkt.packet_type == packet.CLOSE: + await self.close(wait=False, abort=True, + reason=self.server.reason.CLIENT_DISCONNECT) + else: + raise exceptions.UnknownPacketError() + + async def check_ping_timeout(self): + """Make sure the client is still sending pings.""" + if self.closed: + raise exceptions.SocketIsClosedError() + if self.last_ping and \ + time.time() - self.last_ping > self.server.ping_timeout: + self.server.logger.info('%s: Client is gone, closing socket', + self.sid) + # Passing abort=False here will cause close() to write a + # CLOSE packet. This has the effect of updating half-open sockets + # to their correct state of disconnected + await self.close(wait=False, abort=False, + reason=self.server.reason.PING_TIMEOUT) + return False + return True + + async def send(self, pkt): + """Send a packet to the client.""" + if not await self.check_ping_timeout(): + return + else: + await self.queue.put(pkt) + self.server.logger.info('%s: Sending packet %s data %s', + self.sid, packet.packet_names[pkt.packet_type], + pkt.data if not isinstance(pkt.data, bytes) + else '') + + async def handle_get_request(self, environ): + """Handle a long-polling GET request from the client.""" + connections = [ + s.strip() + for s in environ.get('HTTP_CONNECTION', '').lower().split(',')] + transport = environ.get('HTTP_UPGRADE', '').lower() + if 'upgrade' in connections and transport in self.upgrade_protocols: + self.server.logger.info('%s: Received request to upgrade to %s', + self.sid, transport) + return await getattr(self, '_upgrade_' + transport)(environ) + if self.upgrading or self.upgraded: + # we are upgrading to WebSocket, do not return any more packets + # through the polling endpoint + return [packet.Packet(packet.NOOP)] + try: + packets = await self.poll() + except exceptions.QueueEmpty: + exc = sys.exc_info() + await self.close(wait=False, + reason=self.server.reason.TRANSPORT_ERROR) + raise exc[1].with_traceback(exc[2]) + return packets + + async def handle_post_request(self, environ): + """Handle a long-polling POST request from the client.""" + length = int(environ.get('CONTENT_LENGTH', '0')) + if length > self.server.max_http_buffer_size: + raise exceptions.ContentTooLongError() + else: + body = (await environ['wsgi.input'].read(length)).decode('utf-8') + p = payload.Payload(encoded_payload=body) + for pkt in p.packets: + await self.receive(pkt) + + async def close(self, wait=True, abort=False, reason=None): + """Close the socket connection.""" + if not self.closed and not self.closing: + self.closing = True + await self.server._trigger_event( + 'disconnect', self.sid, + reason or self.server.reason.SERVER_DISCONNECT, + run_async=False) + if not abort: + await self.send(packet.Packet(packet.CLOSE)) + self.closed = True + if wait: + await self.queue.join() + + def schedule_ping(self): + self.server.start_background_task(self._send_ping) + + async def _send_ping(self): + self.last_ping = None + await asyncio.sleep(self.server.ping_interval) + if not self.closing and not self.closed: + self.last_ping = time.time() + await self.send(packet.Packet(packet.PING)) + + async def _upgrade_websocket(self, environ): + """Upgrade the connection from polling to websocket.""" + if self.upgraded: + raise OSError('Socket has been upgraded already') + if self.server._async['websocket'] is None: + # the selected async mode does not support websocket + return self.server._bad_request() + ws = self.server._async['websocket']( + self._websocket_handler, self.server) + return await ws(environ) + + async def _websocket_handler(self, ws): + """Engine.IO handler for websocket transport.""" + async def websocket_wait(): + data = await ws.wait() + if data and len(data) > self.server.max_http_buffer_size: + raise ValueError('packet is too large') + return data + + if self.connected: + # the socket was already connected, so this is an upgrade + self.upgrading = True # hold packet sends during the upgrade + + try: + pkt = await websocket_wait() + except OSError: # pragma: no cover + return + decoded_pkt = packet.Packet(encoded_packet=pkt) + if decoded_pkt.packet_type != packet.PING or \ + decoded_pkt.data != 'probe': + self.server.logger.info( + '%s: Failed websocket upgrade, no PING packet', self.sid) + self.upgrading = False + return + await ws.send(packet.Packet(packet.PONG, data='probe').encode()) + await self.queue.put(packet.Packet(packet.NOOP)) # end poll + + try: + pkt = await websocket_wait() + except OSError: # pragma: no cover + self.upgrading = False + return + decoded_pkt = packet.Packet(encoded_packet=pkt) + if decoded_pkt.packet_type != packet.UPGRADE: + self.upgraded = False + self.server.logger.info( + ('%s: Failed websocket upgrade, expected UPGRADE packet, ' + 'received %s instead.'), + self.sid, pkt) + self.upgrading = False + return + self.upgraded = True + self.upgrading = False + else: + self.connected = True + self.upgraded = True + + # start separate writer thread + async def writer(): + while True: + packets = None + try: + packets = await self.poll() + except exceptions.QueueEmpty: + break + if not packets: + # empty packet list returned -> connection closed + break + try: + for pkt in packets: + await ws.send(pkt.encode()) + except: + break + await ws.close() + + writer_task = asyncio.ensure_future(writer()) + + self.server.logger.info( + '%s: Upgrade to websocket successful', self.sid) + + while True: + p = None + wait_task = asyncio.ensure_future(websocket_wait()) + try: + p = await asyncio.wait_for( + wait_task, + self.server.ping_interval + self.server.ping_timeout) + except asyncio.CancelledError: # pragma: no cover + # there is a bug (https://bugs.python.org/issue30508) in + # asyncio that causes a "Task exception never retrieved" error + # to appear when wait_task raises an exception before it gets + # cancelled. Calling wait_task.exception() prevents the error + # from being issued in Python 3.6, but causes other errors in + # other versions, so we run it with all errors suppressed and + # hope for the best. + try: + wait_task.exception() + except: + pass + break + except: + break + if p is None: + # connection closed by client + break + pkt = packet.Packet(encoded_packet=p) + try: + await self.receive(pkt) + except exceptions.UnknownPacketError: # pragma: no cover + pass + except exceptions.SocketIsClosedError: # pragma: no cover + self.server.logger.info('Receive error -- socket is closed') + break + except: # pragma: no cover + # if we get an unexpected exception we log the error and exit + # the connection properly + self.server.logger.exception('Unknown receive error') + + await self.queue.put(None) # unlock the writer task so it can exit + await asyncio.wait_for(writer_task, timeout=None) + await self.close(wait=False, abort=True, + reason=self.server.reason.TRANSPORT_CLOSE) diff --git a/env/lib/python3.10/site-packages/engineio/base_client.py b/env/lib/python3.10/site-packages/engineio/base_client.py new file mode 100644 index 0000000..912baf7 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/base_client.py @@ -0,0 +1,158 @@ +import logging +import signal +import threading +import time +import urllib +from . import packet + +default_logger = logging.getLogger('engineio.client') +connected_clients = [] + + +def signal_handler(sig, frame): + """SIGINT handler. + + Disconnect all active clients and then invoke the original signal handler. + """ + for client in connected_clients[:]: + if not client.is_asyncio_based(): + client.disconnect() + if callable(original_signal_handler): + return original_signal_handler(sig, frame) + else: # pragma: no cover + # Handle case where no original SIGINT handler was present. + return signal.default_int_handler(sig, frame) + + +original_signal_handler = None + + +class BaseClient: + event_names = ['connect', 'disconnect', 'message'] + + class reason: + """Disconnection reasons.""" + #: Client-initiated disconnection. + CLIENT_DISCONNECT = 'client disconnect' + #: Server-initiated disconnection. + SERVER_DISCONNECT = 'server disconnect' + #: Transport error. + TRANSPORT_ERROR = 'transport error' + + def __init__(self, logger=False, json=None, request_timeout=5, + http_session=None, ssl_verify=True, handle_sigint=True, + websocket_extra_options=None, timestamp_requests=True): + global original_signal_handler + if handle_sigint and original_signal_handler is None and \ + threading.current_thread() == threading.main_thread(): + original_signal_handler = signal.signal(signal.SIGINT, + signal_handler) + self.handlers = {} + self.base_url = None + self.transports = None + self.current_transport = None + self.sid = None + self.upgrades = None + self.ping_interval = None + self.ping_timeout = None + self.http = http_session + self.external_http = http_session is not None + self.handle_sigint = handle_sigint + self.ws = None + self.read_loop_task = None + self.write_loop_task = None + self.queue = None + self.state = 'disconnected' + self.ssl_verify = ssl_verify + self.websocket_extra_options = websocket_extra_options or {} + self.timestamp_requests = timestamp_requests + + if json is not None: + packet.Packet.json = json + if not isinstance(logger, bool): + self.logger = logger + else: + self.logger = default_logger + if self.logger.level == logging.NOTSET: + if logger: + self.logger.setLevel(logging.INFO) + else: + self.logger.setLevel(logging.ERROR) + self.logger.addHandler(logging.StreamHandler()) + + self.request_timeout = request_timeout + + def is_asyncio_based(self): + return False + + def on(self, event, handler=None): + """Register an event handler. + + :param event: The event name. Can be ``'connect'``, ``'message'`` or + ``'disconnect'``. + :param handler: The function that should be invoked to handle the + event. When this parameter is not given, the method + acts as a decorator for the handler function. + + Example usage:: + + # as a decorator: + @eio.on('connect') + def connect_handler(): + print('Connection request') + + # as a method: + def message_handler(msg): + print('Received message: ', msg) + eio.send('response') + eio.on('message', message_handler) + """ + if event not in self.event_names: + raise ValueError('Invalid event') + + def set_handler(handler): + self.handlers[event] = handler + return handler + + if handler is None: + return set_handler + set_handler(handler) + + def transport(self): + """Return the name of the transport currently in use. + + The possible values returned by this function are ``'polling'`` and + ``'websocket'``. + """ + return self.current_transport + + def _reset(self): + self.state = 'disconnected' + self.sid = None + + def _get_engineio_url(self, url, engineio_path, transport): + """Generate the Engine.IO connection URL.""" + engineio_path = engineio_path.strip('/') + parsed_url = urllib.parse.urlparse(url) + + if transport == 'polling': + scheme = 'http' + elif transport == 'websocket': + scheme = 'ws' + else: # pragma: no cover + raise ValueError('invalid transport') + if parsed_url.scheme in ['https', 'wss']: + scheme += 's' + + return ('{scheme}://{netloc}/{path}/?{query}' + '{sep}transport={transport}&EIO=4').format( + scheme=scheme, netloc=parsed_url.netloc, + path=engineio_path, query=parsed_url.query, + sep='&' if parsed_url.query else '', + transport=transport) + + def _get_url_timestamp(self): + """Generate the Engine.IO query string timestamp.""" + if not self.timestamp_requests: + return '' + return '&t=' + str(time.time()) diff --git a/env/lib/python3.10/site-packages/engineio/base_server.py b/env/lib/python3.10/site-packages/engineio/base_server.py new file mode 100644 index 0000000..bfb4165 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/base_server.py @@ -0,0 +1,351 @@ +import base64 +import gzip +import importlib +import io +import logging +import secrets +import zlib + +from . import packet +from . import payload + +default_logger = logging.getLogger('engineio.server') + + +class BaseServer: + compression_methods = ['gzip', 'deflate'] + event_names = ['connect', 'disconnect', 'message'] + valid_transports = ['polling', 'websocket'] + _default_monitor_clients = True + sequence_number = 0 + + class reason: + """Disconnection reasons.""" + #: Server-initiated disconnection. + SERVER_DISCONNECT = 'server disconnect' + #: Client-initiated disconnection. + CLIENT_DISCONNECT = 'client disconnect' + #: Ping timeout. + PING_TIMEOUT = 'ping timeout' + #: Transport close. + TRANSPORT_CLOSE = 'transport close' + #: Transport error. + TRANSPORT_ERROR = 'transport error' + + def __init__(self, async_mode=None, ping_interval=25, ping_timeout=20, + max_http_buffer_size=1000000, allow_upgrades=True, + http_compression=True, compression_threshold=1024, + cookie=None, cors_allowed_origins=None, + cors_credentials=True, logger=False, json=None, + async_handlers=True, monitor_clients=None, transports=None, + **kwargs): + self.ping_timeout = ping_timeout + if isinstance(ping_interval, tuple): + self.ping_interval = ping_interval[0] + self.ping_interval_grace_period = ping_interval[1] + else: + self.ping_interval = ping_interval + self.ping_interval_grace_period = 0 + self.max_http_buffer_size = max_http_buffer_size + self.allow_upgrades = allow_upgrades + self.http_compression = http_compression + self.compression_threshold = compression_threshold + self.cookie = cookie + self.cors_allowed_origins = cors_allowed_origins + self.cors_credentials = cors_credentials + self.async_handlers = async_handlers + self.sockets = {} + self.handlers = {} + self.log_message_keys = set() + self.start_service_task = monitor_clients \ + if monitor_clients is not None else self._default_monitor_clients + self.service_task_handle = None + self.service_task_event = None + if json is not None: + packet.Packet.json = json + if not isinstance(logger, bool): + self.logger = logger + else: + self.logger = default_logger + if self.logger.level == logging.NOTSET: + if logger: + self.logger.setLevel(logging.INFO) + else: + self.logger.setLevel(logging.ERROR) + self.logger.addHandler(logging.StreamHandler()) + modes = self.async_modes() + if async_mode is not None: + modes = [async_mode] if async_mode in modes else [] + self._async = None + self.async_mode = None + for mode in modes: + try: + self._async = importlib.import_module( + 'engineio.async_drivers.' + mode)._async + asyncio_based = self._async['asyncio'] \ + if 'asyncio' in self._async else False + if asyncio_based != self.is_asyncio_based(): + continue # pragma: no cover + self.async_mode = mode + break + except ImportError: + pass + if self.async_mode is None: + raise ValueError('Invalid async_mode specified') + if self.is_asyncio_based() and \ + ('asyncio' not in self._async or not + self._async['asyncio']): # pragma: no cover + raise ValueError('The selected async_mode is not asyncio ' + 'compatible') + if not self.is_asyncio_based() and 'asyncio' in self._async and \ + self._async['asyncio']: # pragma: no cover + raise ValueError('The selected async_mode requires asyncio and ' + 'must use the AsyncServer class') + if transports is not None: + if isinstance(transports, str): + transports = [transports] + transports = [transport for transport in transports + if transport in self.valid_transports] + if not transports: + raise ValueError('No valid transports provided') + self.transports = transports or self.valid_transports + self.logger.info('Server initialized for %s.', self.async_mode) + + def is_asyncio_based(self): + return False + + def async_modes(self): + return ['eventlet', 'gevent_uwsgi', 'gevent', 'threading'] + + def on(self, event, handler=None): + """Register an event handler. + + :param event: The event name. Can be ``'connect'``, ``'message'`` or + ``'disconnect'``. + :param handler: The function that should be invoked to handle the + event. When this parameter is not given, the method + acts as a decorator for the handler function. + + Example usage:: + + # as a decorator: + @eio.on('connect') + def connect_handler(sid, environ): + print('Connection request') + if environ['REMOTE_ADDR'] in blacklisted: + return False # reject + + # as a method: + def message_handler(sid, msg): + print('Received message: ', msg) + eio.send(sid, 'response') + eio.on('message', message_handler) + + The handler function receives the ``sid`` (session ID) for the + client as first argument. The ``'connect'`` event handler receives the + WSGI environment as a second argument, and can return ``False`` to + reject the connection. The ``'message'`` handler receives the message + payload as a second argument. The ``'disconnect'`` handler does not + take a second argument. + """ + if event not in self.event_names: + raise ValueError('Invalid event') + + def set_handler(handler): + self.handlers[event] = handler + return handler + + if handler is None: + return set_handler + set_handler(handler) + + def transport(self, sid): + """Return the name of the transport used by the client. + + The two possible values returned by this function are ``'polling'`` + and ``'websocket'``. + + :param sid: The session of the client. + """ + return 'websocket' if self._get_socket(sid).upgraded else 'polling' + + def create_queue(self, *args, **kwargs): + """Create a queue object using the appropriate async model. + + This is a utility function that applications can use to create a queue + without having to worry about using the correct call for the selected + async mode. + """ + return self._async['queue'](*args, **kwargs) + + def get_queue_empty_exception(self): + """Return the queue empty exception for the appropriate async model. + + This is a utility function that applications can use to work with a + queue without having to worry about using the correct call for the + selected async mode. + """ + return self._async['queue_empty'] + + def create_event(self, *args, **kwargs): + """Create an event object using the appropriate async model. + + This is a utility function that applications can use to create an + event without having to worry about using the correct call for the + selected async mode. + """ + return self._async['event'](*args, **kwargs) + + def generate_id(self): + """Generate a unique session id.""" + id = base64.b64encode( + secrets.token_bytes(12) + self.sequence_number.to_bytes(3, 'big')) + self.sequence_number = (self.sequence_number + 1) & 0xffffff + return id.decode('utf-8').replace('/', '_').replace('+', '-') + + def _generate_sid_cookie(self, sid, attributes): + """Generate the sid cookie.""" + cookie = attributes.get('name', 'io') + '=' + sid + for attribute, value in attributes.items(): + if attribute == 'name': + continue + if callable(value): + value = value() + if value is True: + cookie += '; ' + attribute + else: + cookie += '; ' + attribute + '=' + value + return cookie + + def _upgrades(self, sid, transport): + """Return the list of possible upgrades for a client connection.""" + if not self.allow_upgrades or self._get_socket(sid).upgraded or \ + transport == 'websocket': + return [] + if self._async['websocket'] is None: # pragma: no cover + self._log_error_once( + 'The WebSocket transport is not available, you must install a ' + 'WebSocket server that is compatible with your async mode to ' + 'enable it. See the documentation for details.', + 'no-websocket') + return [] + return ['websocket'] + + def _get_socket(self, sid): + """Return the socket object for a given session.""" + try: + s = self.sockets[sid] + except KeyError: + raise KeyError('Session not found') + if s.closed: + del self.sockets[sid] + raise KeyError('Session is disconnected') + return s + + def _ok(self, packets=None, headers=None, jsonp_index=None): + """Generate a successful HTTP response.""" + if packets is not None: + if headers is None: + headers = [] + headers += [('Content-Type', 'text/plain; charset=UTF-8')] + return {'status': '200 OK', + 'headers': headers, + 'response': payload.Payload(packets=packets).encode( + jsonp_index=jsonp_index).encode('utf-8')} + else: + return {'status': '200 OK', + 'headers': [('Content-Type', 'text/plain')], + 'response': b'OK'} + + def _bad_request(self, message=None): + """Generate a bad request HTTP error response.""" + if message is None: + message = 'Bad Request' + message = packet.Packet.json.dumps(message) + return {'status': '400 BAD REQUEST', + 'headers': [('Content-Type', 'text/plain')], + 'response': message.encode('utf-8')} + + def _method_not_found(self): + """Generate a method not found HTTP error response.""" + return {'status': '405 METHOD NOT FOUND', + 'headers': [('Content-Type', 'text/plain')], + 'response': b'Method Not Found'} + + def _unauthorized(self, message=None): + """Generate a unauthorized HTTP error response.""" + if message is None: + message = 'Unauthorized' + message = packet.Packet.json.dumps(message) + return {'status': '401 UNAUTHORIZED', + 'headers': [('Content-Type', 'application/json')], + 'response': message.encode('utf-8')} + + def _cors_allowed_origins(self, environ): + default_origins = [] + if 'wsgi.url_scheme' in environ and 'HTTP_HOST' in environ: + default_origins.append('{scheme}://{host}'.format( + scheme=environ['wsgi.url_scheme'], host=environ['HTTP_HOST'])) + if 'HTTP_X_FORWARDED_PROTO' in environ or \ + 'HTTP_X_FORWARDED_HOST' in environ: + scheme = environ.get( + 'HTTP_X_FORWARDED_PROTO', + environ['wsgi.url_scheme']).split(',')[0].strip() + default_origins.append('{scheme}://{host}'.format( + scheme=scheme, host=environ.get( + 'HTTP_X_FORWARDED_HOST', environ['HTTP_HOST']).split( + ',')[0].strip())) + if self.cors_allowed_origins is None: + allowed_origins = default_origins + elif self.cors_allowed_origins == '*': + allowed_origins = None + elif isinstance(self.cors_allowed_origins, str): + allowed_origins = [self.cors_allowed_origins] + elif callable(self.cors_allowed_origins): + origin = environ.get('HTTP_ORIGIN') + allowed_origins = [origin] \ + if self.cors_allowed_origins(origin) else [] + else: + allowed_origins = self.cors_allowed_origins + return allowed_origins + + def _cors_headers(self, environ): + """Return the cross-origin-resource-sharing headers.""" + if self.cors_allowed_origins == []: + # special case, CORS handling is completely disabled + return [] + headers = [] + allowed_origins = self._cors_allowed_origins(environ) + if 'HTTP_ORIGIN' in environ and \ + (allowed_origins is None or environ['HTTP_ORIGIN'] in + allowed_origins): + headers = [('Access-Control-Allow-Origin', environ['HTTP_ORIGIN'])] + if environ['REQUEST_METHOD'] == 'OPTIONS': + headers += [('Access-Control-Allow-Methods', 'OPTIONS, GET, POST')] + if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in environ: + headers += [('Access-Control-Allow-Headers', + environ['HTTP_ACCESS_CONTROL_REQUEST_HEADERS'])] + if self.cors_credentials: + headers += [('Access-Control-Allow-Credentials', 'true')] + return headers + + def _gzip(self, response): + """Apply gzip compression to a response.""" + bytesio = io.BytesIO() + with gzip.GzipFile(fileobj=bytesio, mode='w') as gz: + gz.write(response) + return bytesio.getvalue() + + def _deflate(self, response): + """Apply deflate compression to a response.""" + return zlib.compress(response) + + def _log_error_once(self, message, message_key): + """Log message with logging.ERROR level the first time, then log + with given level.""" + if message_key not in self.log_message_keys: + self.logger.error(message + ' (further occurrences of this error ' + 'will be logged with level INFO)') + self.log_message_keys.add(message_key) + else: + self.logger.info(message) diff --git a/env/lib/python3.10/site-packages/engineio/base_socket.py b/env/lib/python3.10/site-packages/engineio/base_socket.py new file mode 100644 index 0000000..6b5d7dc --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/base_socket.py @@ -0,0 +1,14 @@ +class BaseSocket: + upgrade_protocols = ['websocket'] + + def __init__(self, server, sid): + self.server = server + self.sid = sid + self.queue = self.server.create_queue() + self.last_ping = None + self.connected = False + self.upgrading = False + self.upgraded = False + self.closing = False + self.closed = False + self.session = {} diff --git a/env/lib/python3.10/site-packages/engineio/client.py b/env/lib/python3.10/site-packages/engineio/client.py new file mode 100644 index 0000000..987c399 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/client.py @@ -0,0 +1,620 @@ +from base64 import b64encode +from engineio.json import JSONDecodeError +import logging +import queue +import ssl +import threading +import time +import urllib + +try: + import requests +except ImportError: # pragma: no cover + requests = None +try: + import websocket +except ImportError: # pragma: no cover + websocket = None +from . import base_client +from . import exceptions +from . import packet +from . import payload + +default_logger = logging.getLogger('engineio.client') + + +class Client(base_client.BaseClient): + """An Engine.IO client. + + This class implements a fully compliant Engine.IO web client with support + for websocket and long-polling transports. + + :param logger: To enable logging set to ``True`` or pass a logger object to + use. To disable logging set to ``False``. The default is + ``False``. Note that fatal errors are logged even when + ``logger`` is ``False``. + :param json: An alternative json module to use for encoding and decoding + packets. Custom json modules must have ``dumps`` and ``loads`` + functions that are compatible with the standard library + versions. + :param request_timeout: A timeout in seconds for requests. The default is + 5 seconds. + :param http_session: an initialized ``requests.Session`` object to be used + when sending requests to the server. Use it if you + need to add special client options such as proxy + servers, SSL certificates, custom CA bundle, etc. + :param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to + skip SSL certificate verification, allowing + connections to servers with self signed certificates. + The default is ``True``. + :param handle_sigint: Set to ``True`` to automatically handle disconnection + when the process is interrupted, or to ``False`` to + leave interrupt handling to the calling application. + Interrupt handling can only be enabled when the + client instance is created in the main thread. + :param websocket_extra_options: Dictionary containing additional keyword + arguments passed to + ``websocket.create_connection()``. + :param timestamp_requests: If ``True`` a timestamp is added to the query + string of Socket.IO requests as a cache-busting + measure. Set to ``False`` to disable. + """ + def connect(self, url, headers=None, transports=None, + engineio_path='engine.io'): + """Connect to an Engine.IO server. + + :param url: The URL of the Engine.IO server. It can include custom + query string parameters if required by the server. + :param headers: A dictionary with custom headers to send with the + connection request. + :param transports: The list of allowed transports. Valid transports + are ``'polling'`` and ``'websocket'``. If not + given, the polling transport is connected first, + then an upgrade to websocket is attempted. + :param engineio_path: The endpoint where the Engine.IO server is + installed. The default value is appropriate for + most cases. + + Example usage:: + + eio = engineio.Client() + eio.connect('http://localhost:5000') + """ + if self.state != 'disconnected': + raise ValueError('Client is not in a disconnected state') + valid_transports = ['polling', 'websocket'] + if transports is not None: + if isinstance(transports, str): + transports = [transports] + transports = [transport for transport in transports + if transport in valid_transports] + if not transports: + raise ValueError('No valid transports provided') + self.transports = transports or valid_transports + self.queue = self.create_queue() + return getattr(self, '_connect_' + self.transports[0])( + url, headers or {}, engineio_path) + + def wait(self): + """Wait until the connection with the server ends. + + Client applications can use this function to block the main thread + during the life of the connection. + """ + if self.read_loop_task: + self.read_loop_task.join() + + def send(self, data): + """Send a message to the server. + + :param data: The data to send to the server. Data can be of type + ``str``, ``bytes``, ``list`` or ``dict``. If a ``list`` + or ``dict``, the data will be serialized as JSON. + """ + self._send_packet(packet.Packet(packet.MESSAGE, data=data)) + + def disconnect(self, abort=False, reason=None): + """Disconnect from the server. + + :param abort: If set to ``True``, do not wait for background tasks + associated with the connection to end. + """ + if self.state == 'connected': + self._send_packet(packet.Packet(packet.CLOSE)) + self.queue.put(None) + self.state = 'disconnecting' + self._trigger_event('disconnect', + reason or self.reason.CLIENT_DISCONNECT, + run_async=False) + if self.current_transport == 'websocket': + self.ws.close() + if not abort: + self.read_loop_task.join() + self.state = 'disconnected' + try: + base_client.connected_clients.remove(self) + except ValueError: # pragma: no cover + pass + self._reset() + + def start_background_task(self, target, *args, **kwargs): + """Start a background task. + + This is a utility function that applications can use to start a + background task. + + :param target: the target function to execute. + :param args: arguments to pass to the function. + :param kwargs: keyword arguments to pass to the function. + + This function returns an object that represents the background task, + on which the ``join()`` method can be invoked to wait for the task to + complete. + """ + th = threading.Thread(target=target, args=args, kwargs=kwargs, + daemon=True) + th.start() + return th + + def sleep(self, seconds=0): + """Sleep for the requested amount of time.""" + return time.sleep(seconds) + + def create_queue(self, *args, **kwargs): + """Create a queue object.""" + q = queue.Queue(*args, **kwargs) + q.Empty = queue.Empty + return q + + def create_event(self, *args, **kwargs): + """Create an event object.""" + return threading.Event(*args, **kwargs) + + def _connect_polling(self, url, headers, engineio_path): + """Establish a long-polling connection to the Engine.IO server.""" + if requests is None: # pragma: no cover + # not installed + self.logger.error('requests package is not installed -- cannot ' + 'send HTTP requests!') + return + self.base_url = self._get_engineio_url(url, engineio_path, 'polling') + self.logger.info('Attempting polling connection to ' + self.base_url) + r = self._send_request( + 'GET', self.base_url + self._get_url_timestamp(), headers=headers, + timeout=self.request_timeout) + if r is None or isinstance(r, str): + self._reset() + raise exceptions.ConnectionError( + r or 'Connection refused by the server') + if r.status_code < 200 or r.status_code >= 300: + self._reset() + try: + arg = r.json() + except JSONDecodeError: + arg = None + raise exceptions.ConnectionError( + 'Unexpected status code {} in server response'.format( + r.status_code), arg) + try: + p = payload.Payload(encoded_payload=r.content.decode('utf-8')) + except ValueError: + raise exceptions.ConnectionError( + 'Unexpected response from server') from None + open_packet = p.packets[0] + if open_packet.packet_type != packet.OPEN: + raise exceptions.ConnectionError( + 'OPEN packet not returned by server') + self.logger.info( + 'Polling connection accepted with ' + str(open_packet.data)) + self.sid = open_packet.data['sid'] + self.upgrades = open_packet.data['upgrades'] + self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0 + self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0 + self.current_transport = 'polling' + self.base_url += '&sid=' + self.sid + + self.state = 'connected' + base_client.connected_clients.append(self) + self._trigger_event('connect', run_async=False) + + for pkt in p.packets[1:]: + self._receive_packet(pkt) + + if 'websocket' in self.upgrades and 'websocket' in self.transports: + # attempt to upgrade to websocket + if self._connect_websocket(url, headers, engineio_path): + # upgrade to websocket succeeded, we're done here + return + + # start background tasks associated with this client + self.write_loop_task = self.start_background_task(self._write_loop) + self.read_loop_task = self.start_background_task( + self._read_loop_polling) + + def _connect_websocket(self, url, headers, engineio_path): + """Establish or upgrade to a WebSocket connection with the server.""" + if websocket is None: # pragma: no cover + # not installed + self.logger.error('websocket-client package not installed, only ' + 'polling transport is available') + return False + websocket_url = self._get_engineio_url(url, engineio_path, 'websocket') + if self.sid: + self.logger.info( + 'Attempting WebSocket upgrade to ' + websocket_url) + upgrade = True + websocket_url += '&sid=' + self.sid + else: + upgrade = False + self.base_url = websocket_url + self.logger.info( + 'Attempting WebSocket connection to ' + websocket_url) + + # get cookies and other settings from the long-polling connection + # so that they are preserved when connecting to the WebSocket route + cookies = None + extra_options = {} + if self.http: + # cookies + cookies = '; '.join([f"{cookie.name}={cookie.value}" + for cookie in self.http.cookies]) + for header, value in headers.items(): + if header.lower() == 'cookie': + if cookies: + cookies += '; ' + cookies += value + del headers[header] + break + + # auth + if 'Authorization' not in headers and self.http.auth is not None: + if not isinstance(self.http.auth, tuple): # pragma: no cover + raise ValueError('Only basic authentication is supported') + basic_auth = '{}:{}'.format( + self.http.auth[0], self.http.auth[1]).encode('utf-8') + basic_auth = b64encode(basic_auth).decode('utf-8') + headers['Authorization'] = 'Basic ' + basic_auth + + # cert + # this can be given as ('certfile', 'keyfile') or just 'certfile' + if isinstance(self.http.cert, tuple): + extra_options['sslopt'] = { + 'certfile': self.http.cert[0], + 'keyfile': self.http.cert[1]} + elif self.http.cert: + extra_options['sslopt'] = {'certfile': self.http.cert} + + # proxies + if self.http.proxies: + proxy_url = None + if websocket_url.startswith('ws://'): + proxy_url = self.http.proxies.get( + 'ws', self.http.proxies.get('http')) + else: # wss:// + proxy_url = self.http.proxies.get( + 'wss', self.http.proxies.get('https')) + if proxy_url: + parsed_url = urllib.parse.urlparse( + proxy_url if '://' in proxy_url + else 'scheme://' + proxy_url) + extra_options['http_proxy_host'] = parsed_url.hostname + extra_options['http_proxy_port'] = parsed_url.port + extra_options['http_proxy_auth'] = ( + (parsed_url.username, parsed_url.password) + if parsed_url.username or parsed_url.password + else None) + + # verify + if isinstance(self.http.verify, str): + if 'sslopt' in extra_options: + extra_options['sslopt']['ca_certs'] = self.http.verify + else: + extra_options['sslopt'] = {'ca_certs': self.http.verify} + elif not self.http.verify: + self.ssl_verify = False + + if not self.ssl_verify: + if 'sslopt' in extra_options: + extra_options['sslopt'].update({"cert_reqs": ssl.CERT_NONE}) + else: + extra_options['sslopt'] = {"cert_reqs": ssl.CERT_NONE} + + # combine internally generated options with the ones supplied by the + # caller. The caller's options take precedence. + headers.update(self.websocket_extra_options.pop('header', {})) + extra_options['header'] = headers + extra_options['cookie'] = cookies + extra_options['enable_multithread'] = True + extra_options['timeout'] = self.request_timeout + extra_options.update(self.websocket_extra_options) + try: + ws = websocket.create_connection( + websocket_url + self._get_url_timestamp(), **extra_options) + except (ConnectionError, OSError, websocket.WebSocketException): + if upgrade: + self.logger.warning( + 'WebSocket upgrade failed: connection error') + return False + else: + raise exceptions.ConnectionError('Connection error') + if upgrade: + p = packet.Packet(packet.PING, data='probe').encode() + try: + ws.send(p) + except Exception as e: # pragma: no cover + self.logger.warning( + 'WebSocket upgrade failed: unexpected send exception: %s', + str(e)) + return False + try: + p = ws.recv() + except Exception as e: # pragma: no cover + self.logger.warning( + 'WebSocket upgrade failed: unexpected recv exception: %s', + str(e)) + return False + pkt = packet.Packet(encoded_packet=p) + if pkt.packet_type != packet.PONG or pkt.data != 'probe': + self.logger.warning( + 'WebSocket upgrade failed: no PONG packet') + return False + p = packet.Packet(packet.UPGRADE).encode() + try: + ws.send(p) + except Exception as e: # pragma: no cover + self.logger.warning( + 'WebSocket upgrade failed: unexpected send exception: %s', + str(e)) + return False + self.current_transport = 'websocket' + self.logger.info('WebSocket upgrade was successful') + else: + try: + p = ws.recv() + except Exception as e: # pragma: no cover + raise exceptions.ConnectionError( + 'Unexpected recv exception: ' + str(e)) + open_packet = packet.Packet(encoded_packet=p) + if open_packet.packet_type != packet.OPEN: + raise exceptions.ConnectionError('no OPEN packet') + self.logger.info( + 'WebSocket connection accepted with ' + str(open_packet.data)) + self.sid = open_packet.data['sid'] + self.upgrades = open_packet.data['upgrades'] + self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0 + self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0 + self.current_transport = 'websocket' + + self.state = 'connected' + base_client.connected_clients.append(self) + self._trigger_event('connect', run_async=False) + self.ws = ws + self.ws.settimeout(self.ping_interval + self.ping_timeout) + + # start background tasks associated with this client + self.write_loop_task = self.start_background_task(self._write_loop) + self.read_loop_task = self.start_background_task( + self._read_loop_websocket) + return True + + def _receive_packet(self, pkt): + """Handle incoming packets from the server.""" + packet_name = packet.packet_names[pkt.packet_type] \ + if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN' + self.logger.info( + 'Received packet %s data %s', packet_name, + pkt.data if not isinstance(pkt.data, bytes) else '') + if pkt.packet_type == packet.MESSAGE: + self._trigger_event('message', pkt.data, run_async=True) + elif pkt.packet_type == packet.PING: + self._send_packet(packet.Packet(packet.PONG, pkt.data)) + elif pkt.packet_type == packet.CLOSE: + self.disconnect(abort=True, reason=self.reason.SERVER_DISCONNECT) + elif pkt.packet_type == packet.NOOP: + pass + else: + self.logger.error('Received unexpected packet of type %s', + pkt.packet_type) + + def _send_packet(self, pkt): + """Queue a packet to be sent to the server.""" + if self.state != 'connected': + return + self.queue.put(pkt) + self.logger.info( + 'Sending packet %s data %s', + packet.packet_names[pkt.packet_type], + pkt.data if not isinstance(pkt.data, bytes) else '') + + def _send_request( + self, method, url, headers=None, body=None, + timeout=None): # pragma: no cover + if self.http is None: + self.http = requests.Session() + if not self.ssl_verify: + self.http.verify = False + try: + return self.http.request(method, url, headers=headers, data=body, + timeout=timeout) + except requests.exceptions.RequestException as exc: + self.logger.info('HTTP %s request to %s failed with error %s.', + method, url, exc) + return str(exc) + + def _trigger_event(self, event, *args, **kwargs): + """Invoke an event handler.""" + run_async = kwargs.pop('run_async', False) + if event in self.handlers: + if run_async: + return self.start_background_task(self.handlers[event], *args) + else: + try: + try: + return self.handlers[event](*args) + except TypeError: + if event == 'disconnect' and \ + len(args) == 1: # pragma: no branch + # legacy disconnect events do not have a reason + # argument + return self.handlers[event]() + else: # pragma: no cover + raise + except: + self.logger.exception(event + ' handler error') + + def _read_loop_polling(self): + """Read packets by polling the Engine.IO server.""" + while self.state == 'connected' and self.write_loop_task: + self.logger.info( + 'Sending polling GET request to ' + self.base_url) + r = self._send_request( + 'GET', self.base_url + self._get_url_timestamp(), + timeout=max(self.ping_interval, self.ping_timeout) + 5) + if r is None or isinstance(r, str): + self.logger.warning( + r or 'Connection refused by the server, aborting') + self.queue.put(None) + break + if r.status_code < 200 or r.status_code >= 300: + self.logger.warning('Unexpected status code %s in server ' + 'response, aborting', r.status_code) + self.queue.put(None) + break + try: + p = payload.Payload(encoded_payload=r.content.decode('utf-8')) + except ValueError: + self.logger.warning( + 'Unexpected packet from server, aborting') + self.queue.put(None) + break + for pkt in p.packets: + self._receive_packet(pkt) + + if self.write_loop_task: # pragma: no branch + self.logger.info('Waiting for write loop task to end') + self.write_loop_task.join() + if self.state == 'connected': + self._trigger_event('disconnect', self.reason.TRANSPORT_ERROR, + run_async=False) + try: + base_client.connected_clients.remove(self) + except ValueError: # pragma: no cover + pass + self._reset() + self.logger.info('Exiting read loop task') + + def _read_loop_websocket(self): + """Read packets from the Engine.IO WebSocket connection.""" + while self.state == 'connected': + p = None + try: + p = self.ws.recv() + if len(p) == 0 and not self.ws.connected: # pragma: no cover + # websocket client can return an empty string after close + raise websocket.WebSocketConnectionClosedException() + except websocket.WebSocketTimeoutException: + self.logger.warning( + 'Server has stopped communicating, aborting') + self.queue.put(None) + break + except websocket.WebSocketConnectionClosedException: + self.logger.warning( + 'WebSocket connection was closed, aborting') + self.queue.put(None) + break + except Exception as e: # pragma: no cover + if type(e) is OSError and e.errno == 9: + self.logger.info( + 'WebSocket connection is closing, aborting') + else: + self.logger.info( + 'Unexpected error receiving packet: "%s", aborting', + str(e)) + self.queue.put(None) + break + try: + pkt = packet.Packet(encoded_packet=p) + except Exception as e: # pragma: no cover + self.logger.info( + 'Unexpected error decoding packet: "%s", aborting', str(e)) + self.queue.put(None) + break + self._receive_packet(pkt) + + if self.write_loop_task: # pragma: no branch + self.logger.info('Waiting for write loop task to end') + self.write_loop_task.join() + if self.state == 'connected': + self._trigger_event('disconnect', self.reason.TRANSPORT_ERROR, + run_async=False) + try: + base_client.connected_clients.remove(self) + except ValueError: # pragma: no cover + pass + self._reset() + self.logger.info('Exiting read loop task') + + def _write_loop(self): + """This background task sends packages to the server as they are + pushed to the send queue. + """ + while self.state == 'connected': + # to simplify the timeout handling, use the maximum of the + # ping interval and ping timeout as timeout, with an extra 5 + # seconds grace period + timeout = max(self.ping_interval, self.ping_timeout) + 5 + packets = None + try: + packets = [self.queue.get(timeout=timeout)] + except self.queue.Empty: + self.logger.error('packet queue is empty, aborting') + break + if packets == [None]: + self.queue.task_done() + packets = [] + else: + while True: + try: + packets.append(self.queue.get(block=False)) + except self.queue.Empty: + break + if packets[-1] is None: + packets = packets[:-1] + self.queue.task_done() + break + if not packets: + # empty packet list returned -> connection closed + break + if self.current_transport == 'polling': + p = payload.Payload(packets=packets) + r = self._send_request( + 'POST', self.base_url, body=p.encode(), + headers={'Content-Type': 'text/plain'}, + timeout=self.request_timeout) + for pkt in packets: + self.queue.task_done() + if r is None or isinstance(r, str): + self.logger.warning( + r or 'Connection refused by the server, aborting') + break + if r.status_code < 200 or r.status_code >= 300: + self.logger.warning('Unexpected status code %s in server ' + 'response, aborting', r.status_code) + self.write_loop_task = None + break + else: + # websocket + try: + for pkt in packets: + encoded_packet = pkt.encode() + if pkt.binary: + self.ws.send_binary(encoded_packet) + else: + self.ws.send(encoded_packet) + self.queue.task_done() + except (websocket.WebSocketConnectionClosedException, + BrokenPipeError, OSError): + self.logger.warning( + 'WebSocket connection was closed, aborting') + break + self.logger.info('Exiting write loop task') diff --git a/env/lib/python3.10/site-packages/engineio/exceptions.py b/env/lib/python3.10/site-packages/engineio/exceptions.py new file mode 100644 index 0000000..fb0b3e0 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/exceptions.py @@ -0,0 +1,22 @@ +class EngineIOError(Exception): + pass + + +class ContentTooLongError(EngineIOError): + pass + + +class UnknownPacketError(EngineIOError): + pass + + +class QueueEmpty(EngineIOError): + pass + + +class SocketIsClosedError(EngineIOError): + pass + + +class ConnectionError(EngineIOError): + pass diff --git a/env/lib/python3.10/site-packages/engineio/json.py b/env/lib/python3.10/site-packages/engineio/json.py new file mode 100644 index 0000000..b612556 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/json.py @@ -0,0 +1,16 @@ +"""JSON-compatible module with sane defaults.""" + +from json import * # noqa: F401, F403 +from json import loads as original_loads + + +def _safe_int(s): + if len(s) > 100: + raise ValueError('Integer is too large') + return int(s) + + +def loads(*args, **kwargs): + if 'parse_int' not in kwargs: # pragma: no cover + kwargs['parse_int'] = _safe_int + return original_loads(*args, **kwargs) diff --git a/env/lib/python3.10/site-packages/engineio/middleware.py b/env/lib/python3.10/site-packages/engineio/middleware.py new file mode 100644 index 0000000..0e34fb0 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/middleware.py @@ -0,0 +1,86 @@ +import os +from engineio.static_files import get_static_file + + +class WSGIApp: + """WSGI application middleware for Engine.IO. + + This middleware dispatches traffic to an Engine.IO application. It can + also serve a list of static files to the client, or forward unrelated + HTTP traffic to another WSGI application. + + :param engineio_app: The Engine.IO server. Must be an instance of the + ``engineio.Server`` class. + :param wsgi_app: The WSGI app that receives all other traffic. + :param static_files: A dictionary with static file mapping rules. See the + documentation for details on this argument. + :param engineio_path: The endpoint where the Engine.IO application should + be installed. The default value is appropriate for + most cases. + + Example usage:: + + import engineio + import eventlet + + eio = engineio.Server() + app = engineio.WSGIApp(eio, static_files={ + '/': {'content_type': 'text/html', 'filename': 'index.html'}, + '/index.html': {'content_type': 'text/html', + 'filename': 'index.html'}, + }) + eventlet.wsgi.server(eventlet.listen(('', 8000)), app) + """ + def __init__(self, engineio_app, wsgi_app=None, static_files=None, + engineio_path='engine.io'): + self.engineio_app = engineio_app + self.wsgi_app = wsgi_app + self.engineio_path = engineio_path + if not self.engineio_path.startswith('/'): + self.engineio_path = '/' + self.engineio_path + if not self.engineio_path.endswith('/'): + self.engineio_path += '/' + self.static_files = static_files or {} + + def __call__(self, environ, start_response): + if 'gunicorn.socket' in environ: + # gunicorn saves the socket under environ['gunicorn.socket'], while + # eventlet saves it under environ['eventlet.input']. Eventlet also + # stores the socket inside a wrapper class, while gunicon writes it + # directly into the environment. To give eventlet's WebSocket + # module access to this socket when running under gunicorn, here we + # copy the socket to the eventlet format. + class Input: + def __init__(self, socket): + self.socket = socket + + def get_socket(self): + return self.socket + + environ['eventlet.input'] = Input(environ['gunicorn.socket']) + path = environ['PATH_INFO'] + if path is not None and path.startswith(self.engineio_path): + return self.engineio_app.handle_request(environ, start_response) + else: + static_file = get_static_file(path, self.static_files) \ + if self.static_files else None + if static_file and os.path.exists(static_file['filename']): + start_response( + '200 OK', + [('Content-Type', static_file['content_type'])]) + with open(static_file['filename'], 'rb') as f: + return [f.read()] + elif self.wsgi_app is not None: + return self.wsgi_app(environ, start_response) + return self.not_found(start_response) + + def not_found(self, start_response): + start_response("404 Not Found", [('Content-Type', 'text/plain')]) + return [b'Not Found'] + + +class Middleware(WSGIApp): + """This class has been renamed to ``WSGIApp`` and is now deprecated.""" + def __init__(self, engineio_app, wsgi_app=None, + engineio_path='engine.io'): + super().__init__(engineio_app, wsgi_app, engineio_path=engineio_path) diff --git a/env/lib/python3.10/site-packages/engineio/packet.py b/env/lib/python3.10/site-packages/engineio/packet.py new file mode 100644 index 0000000..7edc7a3 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/packet.py @@ -0,0 +1,82 @@ +import base64 +from engineio import json as _json + +(OPEN, CLOSE, PING, PONG, MESSAGE, UPGRADE, NOOP) = (0, 1, 2, 3, 4, 5, 6) +packet_names = ['OPEN', 'CLOSE', 'PING', 'PONG', 'MESSAGE', 'UPGRADE', 'NOOP'] + +binary_types = (bytes, bytearray) + + +class Packet: + """Engine.IO packet.""" + + json = _json + + def __init__(self, packet_type=NOOP, data=None, encoded_packet=None): + self.packet_type = packet_type + self.data = data + self.encode_cache = None + if isinstance(data, str): + self.binary = False + elif isinstance(data, binary_types): + self.binary = True + else: + self.binary = False + if self.binary and self.packet_type != MESSAGE: + raise ValueError('Binary packets can only be of type MESSAGE') + if encoded_packet is not None: + self.decode(encoded_packet) + + def encode(self, b64=False): + """Encode the packet for transmission. + + Note: as a performance optimization, subsequent calls to this method + will return a cached encoded packet, even if the data has changed. + """ + if self.encode_cache: + return self.encode_cache + if self.binary: + if b64: + encoded_packet = 'b' + base64.b64encode(self.data).decode( + 'utf-8') + else: + encoded_packet = self.data + else: + encoded_packet = str(self.packet_type) + if isinstance(self.data, str): + encoded_packet += self.data + elif isinstance(self.data, dict) or isinstance(self.data, list): + encoded_packet += self.json.dumps(self.data, + separators=(',', ':')) + elif self.data is not None: + encoded_packet += str(self.data) + self.encode_cache = encoded_packet + return encoded_packet + + def decode(self, encoded_packet): + """Decode a transmitted package.""" + self.binary = isinstance(encoded_packet, binary_types) + if not self.binary and len(encoded_packet) == 0: + raise ValueError('Invalid empty packet received') + b64 = not self.binary and encoded_packet[0] == 'b' + if b64: + self.binary = True + self.packet_type = MESSAGE + self.data = base64.b64decode(encoded_packet[1:]) + else: + if self.binary and not isinstance(encoded_packet, bytes): + encoded_packet = bytes(encoded_packet) + if self.binary: + self.packet_type = MESSAGE + self.data = encoded_packet + else: + self.packet_type = int(encoded_packet[0]) + try: + self.data = self.json.loads(encoded_packet[1:]) + if isinstance(self.data, int): + # do not allow integer payloads, see + # github.com/miguelgrinberg/python-engineio/issues/75 + # for background on this decision + raise ValueError + except ValueError: + self.data = encoded_packet[1:] diff --git a/env/lib/python3.10/site-packages/engineio/payload.py b/env/lib/python3.10/site-packages/engineio/payload.py new file mode 100644 index 0000000..775241b --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/payload.py @@ -0,0 +1,46 @@ +import urllib + +from . import packet + + +class Payload: + """Engine.IO payload.""" + max_decode_packets = 16 + + def __init__(self, packets=None, encoded_payload=None): + self.packets = packets or [] + if encoded_payload is not None: + self.decode(encoded_payload) + + def encode(self, jsonp_index=None): + """Encode the payload for transmission.""" + encoded_payload = '' + for pkt in self.packets: + if encoded_payload: + encoded_payload += '\x1e' + encoded_payload += pkt.encode(b64=True) + if jsonp_index is not None: + encoded_payload = '___eio[' + \ + str(jsonp_index) + \ + ']("' + \ + encoded_payload.replace('"', '\\"') + \ + '");' + return encoded_payload + + def decode(self, encoded_payload): + """Decode a transmitted payload.""" + self.packets = [] + + if len(encoded_payload) == 0: + return + + # JSONP POST payload starts with 'd=' + if encoded_payload.startswith('d='): + encoded_payload = urllib.parse.parse_qs( + encoded_payload)['d'][0] + + encoded_packets = encoded_payload.split('\x1e') + if len(encoded_packets) > self.max_decode_packets: + raise ValueError('Too many packets in payload') + self.packets = [packet.Packet(encoded_packet=encoded_packet) + for encoded_packet in encoded_packets] diff --git a/env/lib/python3.10/site-packages/engineio/server.py b/env/lib/python3.10/site-packages/engineio/server.py new file mode 100644 index 0000000..f1b1367 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/server.py @@ -0,0 +1,503 @@ +import logging +import urllib + +from . import base_server +from . import exceptions +from . import packet +from . import socket + +default_logger = logging.getLogger('engineio.server') + + +class Server(base_server.BaseServer): + """An Engine.IO server. + + This class implements a fully compliant Engine.IO web server with support + for websocket and long-polling transports. + + :param async_mode: The asynchronous model to use. See the Deployment + section in the documentation for a description of the + available options. Valid async modes are "threading", + "eventlet", "gevent" and "gevent_uwsgi". If this + argument is not given, "eventlet" is tried first, then + "gevent_uwsgi", then "gevent", and finally "threading". + The first async mode that has all its dependencies + installed is the one that is chosen. + :param ping_interval: The interval in seconds at which the server pings + the client. The default is 25 seconds. For advanced + control, a two element tuple can be given, where + the first number is the ping interval and the second + is a grace period added by the server. + :param ping_timeout: The time in seconds that the client waits for the + server to respond before disconnecting. The default + is 20 seconds. + :param max_http_buffer_size: The maximum size that is accepted for incoming + messages. The default is 1,000,000 bytes. In + spite of its name, the value set in this + argument is enforced for HTTP long-polling and + WebSocket connections. + :param allow_upgrades: Whether to allow transport upgrades or not. The + default is ``True``. + :param http_compression: Whether to compress packages when using the + polling transport. The default is ``True``. + :param compression_threshold: Only compress messages when their byte size + is greater than this value. The default is + 1024 bytes. + :param cookie: If set to a string, it is the name of the HTTP cookie the + server sends back tot he client containing the client + session id. If set to a dictionary, the ``'name'`` key + contains the cookie name and other keys define cookie + attributes, where the value of each attribute can be a + string, a callable with no arguments, or a boolean. If set + to ``None`` (the default), a cookie is not sent to the + client. + :param cors_allowed_origins: Origin or list of origins that are allowed to + connect to this server. Only the same origin + is allowed by default. Set this argument to + ``'*'`` to allow all origins, or to ``[]`` to + disable CORS handling. + :param cors_credentials: Whether credentials (cookies, authentication) are + allowed in requests to this server. The default + is ``True``. + :param logger: To enable logging set to ``True`` or pass a logger object to + use. To disable logging set to ``False``. The default is + ``False``. Note that fatal errors are logged even when + ``logger`` is ``False``. + :param json: An alternative json module to use for encoding and decoding + packets. Custom json modules must have ``dumps`` and ``loads`` + functions that are compatible with the standard library + versions. + :param async_handlers: If set to ``True``, run message event handlers in + non-blocking threads. To run handlers synchronously, + set to ``False``. The default is ``True``. + :param monitor_clients: If set to ``True``, a background task will ensure + inactive clients are closed. Set to ``False`` to + disable the monitoring task (not recommended). The + default is ``True``. + :param transports: The list of allowed transports. Valid transports + are ``'polling'`` and ``'websocket'``. Defaults to + ``['polling', 'websocket']``. + :param kwargs: Reserved for future extensions, any additional parameters + given as keyword arguments will be silently ignored. + """ + def send(self, sid, data): + """Send a message to a client. + + :param sid: The session id of the recipient client. + :param data: The data to send to the client. Data can be of type + ``str``, ``bytes``, ``list`` or ``dict``. If a ``list`` + or ``dict``, the data will be serialized as JSON. + """ + self.send_packet(sid, packet.Packet(packet.MESSAGE, data=data)) + + def send_packet(self, sid, pkt): + """Send a raw packet to a client. + + :param sid: The session id of the recipient client. + :param pkt: The packet to send to the client. + """ + try: + socket = self._get_socket(sid) + except KeyError: + # the socket is not available + self.logger.warning('Cannot send to sid %s', sid) + return + socket.send(pkt) + + def get_session(self, sid): + """Return the user session for a client. + + :param sid: The session id of the client. + + The return value is a dictionary. Modifications made to this + dictionary are not guaranteed to be preserved unless + ``save_session()`` is called, or when the ``session`` context manager + is used. + """ + socket = self._get_socket(sid) + return socket.session + + def save_session(self, sid, session): + """Store the user session for a client. + + :param sid: The session id of the client. + :param session: The session dictionary. + """ + socket = self._get_socket(sid) + socket.session = session + + def session(self, sid): + """Return the user session for a client with context manager syntax. + + :param sid: The session id of the client. + + This is a context manager that returns the user session dictionary for + the client. Any changes that are made to this dictionary inside the + context manager block are saved back to the session. Example usage:: + + @eio.on('connect') + def on_connect(sid, environ): + username = authenticate_user(environ) + if not username: + return False + with eio.session(sid) as session: + session['username'] = username + + @eio.on('message') + def on_message(sid, msg): + with eio.session(sid) as session: + print('received message from ', session['username']) + """ + class _session_context_manager: + def __init__(self, server, sid): + self.server = server + self.sid = sid + self.session = None + + def __enter__(self): + self.session = self.server.get_session(sid) + return self.session + + def __exit__(self, *args): + self.server.save_session(sid, self.session) + + return _session_context_manager(self, sid) + + def disconnect(self, sid=None): + """Disconnect a client. + + :param sid: The session id of the client to close. If this parameter + is not given, then all clients are closed. + """ + if sid is not None: + try: + socket = self._get_socket(sid) + except KeyError: # pragma: no cover + # the socket was already closed or gone + pass + else: + socket.close(reason=self.reason.SERVER_DISCONNECT) + if sid in self.sockets: # pragma: no cover + del self.sockets[sid] + else: + for client in self.sockets.copy().values(): + client.close(reason=self.reason.SERVER_DISCONNECT) + self.sockets = {} + + def handle_request(self, environ, start_response): + """Handle an HTTP request from the client. + + This is the entry point of the Engine.IO application, using the same + interface as a WSGI application. For the typical usage, this function + is invoked by the :class:`Middleware` instance, but it can be invoked + directly when the middleware is not used. + + :param environ: The WSGI environment. + :param start_response: The WSGI ``start_response`` function. + + This function returns the HTTP response body to deliver to the client + as a byte sequence. + """ + if self.cors_allowed_origins != []: + # Validate the origin header if present + # This is important for WebSocket more than for HTTP, since + # browsers only apply CORS controls to HTTP. + origin = environ.get('HTTP_ORIGIN') + if origin: + allowed_origins = self._cors_allowed_origins(environ) + if allowed_origins is not None and origin not in \ + allowed_origins: + self._log_error_once( + origin + ' is not an accepted origin.', 'bad-origin') + r = self._bad_request('Not an accepted origin.') + start_response(r['status'], r['headers']) + return [r['response']] + + method = environ['REQUEST_METHOD'] + query = urllib.parse.parse_qs(environ.get('QUERY_STRING', '')) + jsonp = False + jsonp_index = None + + # make sure the client uses an allowed transport + transport = query.get('transport', ['polling'])[0] + if transport not in self.transports: + self._log_error_once('Invalid transport', 'bad-transport') + r = self._bad_request('Invalid transport') + start_response(r['status'], r['headers']) + return [r['response']] + + # make sure the client speaks a compatible Engine.IO version + sid = query['sid'][0] if 'sid' in query else None + if sid is None and query.get('EIO') != ['4']: + self._log_error_once( + 'The client is using an unsupported version of the Socket.IO ' + 'or Engine.IO protocols', 'bad-version') + r = self._bad_request( + 'The client is using an unsupported version of the Socket.IO ' + 'or Engine.IO protocols') + start_response(r['status'], r['headers']) + return [r['response']] + + if 'j' in query: + jsonp = True + try: + jsonp_index = int(query['j'][0]) + except (ValueError, KeyError, IndexError): + # Invalid JSONP index number + pass + + if jsonp and jsonp_index is None: + self._log_error_once('Invalid JSONP index number', + 'bad-jsonp-index') + r = self._bad_request('Invalid JSONP index number') + elif method == 'GET': + upgrade_header = environ.get('HTTP_UPGRADE').lower() \ + if 'HTTP_UPGRADE' in environ else None + if sid is None: + # transport must be one of 'polling' or 'websocket'. + # if 'websocket', the HTTP_UPGRADE header must match. + if transport == 'polling' \ + or transport == upgrade_header == 'websocket': + r = self._handle_connect(environ, start_response, + transport, jsonp_index) + else: + self._log_error_once('Invalid websocket upgrade', + 'bad-upgrade') + r = self._bad_request('Invalid websocket upgrade') + else: + if sid not in self.sockets: + self._log_error_once(f'Invalid session {sid}', 'bad-sid') + r = self._bad_request(f'Invalid session {sid}') + else: + try: + socket = self._get_socket(sid) + except KeyError as e: # pragma: no cover + self._log_error_once(f'{e} {sid}', 'bad-sid') + r = self._bad_request(f'{e} {sid}') + else: + if self.transport(sid) != transport and \ + transport != upgrade_header: + self._log_error_once( + f'Invalid transport for session {sid}', + 'bad-transport') + r = self._bad_request('Invalid transport') + else: + try: + packets = socket.handle_get_request( + environ, start_response) + if isinstance(packets, list): + r = self._ok(packets, + jsonp_index=jsonp_index) + else: + r = packets + except exceptions.EngineIOError: + if sid in self.sockets: # pragma: no cover + self.disconnect(sid) + r = self._bad_request() + if sid in self.sockets and \ + self.sockets[sid].closed: + del self.sockets[sid] + elif method == 'POST': + if sid is None or sid not in self.sockets: + self._log_error_once(f'Invalid session {sid}', 'bad-sid') + r = self._bad_request(f'Invalid session {sid}') + else: + socket = self._get_socket(sid) + try: + socket.handle_post_request(environ) + r = self._ok(jsonp_index=jsonp_index) + except exceptions.EngineIOError: + if sid in self.sockets: # pragma: no cover + self.disconnect(sid) + r = self._bad_request() + except: # pragma: no cover + # for any other unexpected errors, we log the error + # and keep going + self.logger.exception('post request handler error') + r = self._ok(jsonp_index=jsonp_index) + elif method == 'OPTIONS': + r = self._ok() + else: + self.logger.warning('Method %s not supported', method) + r = self._method_not_found() + + if not isinstance(r, dict): + return r + if self.http_compression and \ + len(r['response']) >= self.compression_threshold: + encodings = [e.split(';')[0].strip() for e in + environ.get('HTTP_ACCEPT_ENCODING', '').split(',')] + for encoding in encodings: + if encoding in self.compression_methods: + r['response'] = \ + getattr(self, '_' + encoding)(r['response']) + r['headers'] += [('Content-Encoding', encoding)] + break + cors_headers = self._cors_headers(environ) + start_response(r['status'], r['headers'] + cors_headers) + return [r['response']] + + def shutdown(self): + """Stop Socket.IO background tasks. + + This method stops background activity initiated by the Socket.IO + server. It must be called before shutting down the web server. + """ + self.logger.info('Socket.IO is shutting down') + if self.service_task_event: # pragma: no cover + self.service_task_event.set() + self.service_task_handle.join() + self.service_task_handle = None + + def start_background_task(self, target, *args, **kwargs): + """Start a background task using the appropriate async model. + + This is a utility function that applications can use to start a + background task using the method that is compatible with the + selected async mode. + + :param target: the target function to execute. + :param args: arguments to pass to the function. + :param kwargs: keyword arguments to pass to the function. + + This function returns an object that represents the background task, + on which the ``join()`` methond can be invoked to wait for the task to + complete. + """ + th = self._async['thread'](target=target, args=args, kwargs=kwargs) + th.start() + return th # pragma: no cover + + def sleep(self, seconds=0): + """Sleep for the requested amount of time using the appropriate async + model. + + This is a utility function that applications can use to put a task to + sleep without having to worry about using the correct call for the + selected async mode. + """ + return self._async['sleep'](seconds) + + def _handle_connect(self, environ, start_response, transport, + jsonp_index=None): + """Handle a client connection request.""" + if self.start_service_task: + # start the service task to monitor connected clients + self.start_service_task = False + self.service_task_handle = self.start_background_task( + self._service_task) + + sid = self.generate_id() + s = socket.Socket(self, sid) + self.sockets[sid] = s + + pkt = packet.Packet(packet.OPEN, { + 'sid': sid, + 'upgrades': self._upgrades(sid, transport), + 'pingTimeout': int(self.ping_timeout * 1000), + 'pingInterval': int( + self.ping_interval + self.ping_interval_grace_period) * 1000, + 'maxPayload': self.max_http_buffer_size, + }) + s.send(pkt) + s.schedule_ping() + + # NOTE: some sections below are marked as "no cover" to workaround + # what seems to be a bug in the coverage package. All the lines below + # are covered by tests, but some are not reported as such for some + # reason + ret = self._trigger_event('connect', sid, environ, run_async=False) + if ret is not None and ret is not True: # pragma: no cover + del self.sockets[sid] + self.logger.warning('Application rejected connection') + return self._unauthorized(ret or None) + + if transport == 'websocket': # pragma: no cover + ret = s.handle_get_request(environ, start_response) + if s.closed and sid in self.sockets: + # websocket connection ended, so we are done + del self.sockets[sid] + return ret + else: # pragma: no cover + s.connected = True + headers = None + if self.cookie: + if isinstance(self.cookie, dict): + headers = [( + 'Set-Cookie', + self._generate_sid_cookie(sid, self.cookie) + )] + else: + headers = [( + 'Set-Cookie', + self._generate_sid_cookie(sid, { + 'name': self.cookie, 'path': '/', 'SameSite': 'Lax' + }) + )] + try: + return self._ok(s.poll(), headers=headers, + jsonp_index=jsonp_index) + except exceptions.QueueEmpty: + return self._bad_request() + + def _trigger_event(self, event, *args, **kwargs): + """Invoke an event handler.""" + run_async = kwargs.pop('run_async', False) + if event in self.handlers: + def run_handler(): + try: + try: + return self.handlers[event](*args) + except TypeError: + if event == 'disconnect' and \ + len(args) == 2: # pragma: no branch + # legacy disconnect events do not have a reason + # argument + return self.handlers[event](args[0]) + else: # pragma: no cover + raise + except: + self.logger.exception(event + ' handler error') + if event == 'connect': + # if connect handler raised error we reject the + # connection + return False + + if run_async: + return self.start_background_task(run_handler) + else: + return run_handler() + + def _service_task(self): # pragma: no cover + """Monitor connected clients and clean up those that time out.""" + self.service_task_event = self.create_event() + while not self.service_task_event.is_set(): + if len(self.sockets) == 0: + # nothing to do + if self.service_task_event.wait(timeout=self.ping_timeout): + break + continue + + # go through the entire client list in a ping interval cycle + sleep_interval = float(self.ping_timeout) / len(self.sockets) + + try: + # iterate over the current clients + for s in self.sockets.copy().values(): + if s.closed: + try: + del self.sockets[s.sid] + except KeyError: + # the socket could have also been removed by + # the _get_socket() method from another thread + pass + elif not s.closing: + s.check_ping_timeout() + if self.service_task_event.wait(timeout=sleep_interval): + raise KeyboardInterrupt() + except (SystemExit, KeyboardInterrupt): + self.logger.info('service task canceled') + break + except: + # an unexpected exception has occurred, log it and continue + self.logger.exception('service task exception') diff --git a/env/lib/python3.10/site-packages/engineio/socket.py b/env/lib/python3.10/site-packages/engineio/socket.py new file mode 100644 index 0000000..26bb94b --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/socket.py @@ -0,0 +1,256 @@ +import sys +import time + +from . import base_socket +from . import exceptions +from . import packet +from . import payload + + +class Socket(base_socket.BaseSocket): + """An Engine.IO socket.""" + def poll(self): + """Wait for packets to send to the client.""" + queue_empty = self.server.get_queue_empty_exception() + try: + packets = [self.queue.get( + timeout=self.server.ping_interval + self.server.ping_timeout)] + self.queue.task_done() + except queue_empty: + raise exceptions.QueueEmpty() + if packets == [None]: + return [] + while True: + try: + pkt = self.queue.get(block=False) + self.queue.task_done() + if pkt is None: + self.queue.put(None) + break + packets.append(pkt) + except queue_empty: + break + return packets + + def receive(self, pkt): + """Receive packet from the client.""" + packet_name = packet.packet_names[pkt.packet_type] \ + if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN' + self.server.logger.info('%s: Received packet %s data %s', + self.sid, packet_name, + pkt.data if not isinstance(pkt.data, bytes) + else '') + if pkt.packet_type == packet.PONG: + self.schedule_ping() + elif pkt.packet_type == packet.MESSAGE: + self.server._trigger_event('message', self.sid, pkt.data, + run_async=self.server.async_handlers) + elif pkt.packet_type == packet.UPGRADE: + self.send(packet.Packet(packet.NOOP)) + elif pkt.packet_type == packet.CLOSE: + self.close(wait=False, abort=True, + reason=self.server.reason.CLIENT_DISCONNECT) + else: + raise exceptions.UnknownPacketError() + + def check_ping_timeout(self): + """Make sure the client is still responding to pings.""" + if self.closed: + raise exceptions.SocketIsClosedError() + if self.last_ping and \ + time.time() - self.last_ping > self.server.ping_timeout: + self.server.logger.info('%s: Client is gone, closing socket', + self.sid) + # Passing abort=False here will cause close() to write a + # CLOSE packet. This has the effect of updating half-open sockets + # to their correct state of disconnected + self.close(wait=False, abort=False, + reason=self.server.reason.PING_TIMEOUT) + return False + return True + + def send(self, pkt): + """Send a packet to the client.""" + if not self.check_ping_timeout(): + return + else: + self.queue.put(pkt) + self.server.logger.info('%s: Sending packet %s data %s', + self.sid, packet.packet_names[pkt.packet_type], + pkt.data if not isinstance(pkt.data, bytes) + else '') + + def handle_get_request(self, environ, start_response): + """Handle a long-polling GET request from the client.""" + connections = [ + s.strip() + for s in environ.get('HTTP_CONNECTION', '').lower().split(',')] + transport = environ.get('HTTP_UPGRADE', '').lower() + if 'upgrade' in connections and transport in self.upgrade_protocols: + self.server.logger.info('%s: Received request to upgrade to %s', + self.sid, transport) + return getattr(self, '_upgrade_' + transport)(environ, + start_response) + if self.upgrading or self.upgraded: + # we are upgrading to WebSocket, do not return any more packets + # through the polling endpoint + return [packet.Packet(packet.NOOP)] + try: + packets = self.poll() + except exceptions.QueueEmpty: + exc = sys.exc_info() + self.close(wait=False, reason=self.server.reason.TRANSPORT_ERROR) + raise exc[1].with_traceback(exc[2]) + return packets + + def handle_post_request(self, environ): + """Handle a long-polling POST request from the client.""" + length = int(environ.get('CONTENT_LENGTH', '0')) + if length > self.server.max_http_buffer_size: + raise exceptions.ContentTooLongError() + else: + body = environ['wsgi.input'].read(length).decode('utf-8') + p = payload.Payload(encoded_payload=body) + for pkt in p.packets: + self.receive(pkt) + + def close(self, wait=True, abort=False, reason=None): + """Close the socket connection.""" + if not self.closed and not self.closing: + self.closing = True + self.server._trigger_event( + 'disconnect', self.sid, + reason or self.server.reason.SERVER_DISCONNECT, + run_async=False) + if not abort: + self.send(packet.Packet(packet.CLOSE)) + self.closed = True + self.queue.put(None) + if wait: + self.queue.join() + + def schedule_ping(self): + self.server.start_background_task(self._send_ping) + + def _send_ping(self): + self.last_ping = None + self.server.sleep(self.server.ping_interval) + if not self.closing and not self.closed: + self.last_ping = time.time() + self.send(packet.Packet(packet.PING)) + + def _upgrade_websocket(self, environ, start_response): + """Upgrade the connection from polling to websocket.""" + if self.upgraded: + raise OSError('Socket has been upgraded already') + if self.server._async['websocket'] is None: + # the selected async mode does not support websocket + return self.server._bad_request() + ws = self.server._async['websocket']( + self._websocket_handler, self.server) + return ws(environ, start_response) + + def _websocket_handler(self, ws): + """Engine.IO handler for websocket transport.""" + def websocket_wait(): + data = ws.wait() + if data and len(data) > self.server.max_http_buffer_size: + raise ValueError('packet is too large') + return data + + # try to set a socket timeout matching the configured ping interval + # and timeout + for attr in ['_sock', 'socket']: # pragma: no cover + if hasattr(ws, attr) and hasattr(getattr(ws, attr), 'settimeout'): + getattr(ws, attr).settimeout( + self.server.ping_interval + self.server.ping_timeout) + + if self.connected: + # the socket was already connected, so this is an upgrade + self.upgrading = True # hold packet sends during the upgrade + + pkt = websocket_wait() + decoded_pkt = packet.Packet(encoded_packet=pkt) + if decoded_pkt.packet_type != packet.PING or \ + decoded_pkt.data != 'probe': + self.server.logger.info( + '%s: Failed websocket upgrade, no PING packet', self.sid) + self.upgrading = False + return [] + ws.send(packet.Packet(packet.PONG, data='probe').encode()) + self.queue.put(packet.Packet(packet.NOOP)) # end poll + + pkt = websocket_wait() + decoded_pkt = packet.Packet(encoded_packet=pkt) + if decoded_pkt.packet_type != packet.UPGRADE: + self.upgraded = False + self.server.logger.info( + ('%s: Failed websocket upgrade, expected UPGRADE packet, ' + 'received %s instead.'), + self.sid, pkt) + self.upgrading = False + return [] + self.upgraded = True + self.upgrading = False + else: + self.connected = True + self.upgraded = True + + # start separate writer thread + def writer(): + while True: + packets = None + try: + packets = self.poll() + except exceptions.QueueEmpty: + break + if not packets: + # empty packet list returned -> connection closed + break + try: + for pkt in packets: + ws.send(pkt.encode()) + except: + break + ws.close() + + writer_task = self.server.start_background_task(writer) + + self.server.logger.info( + '%s: Upgrade to websocket successful', self.sid) + + while True: + p = None + try: + p = websocket_wait() + except Exception as e: + # if the socket is already closed, we can assume this is a + # downstream error of that + if not self.closed: # pragma: no cover + self.server.logger.info( + '%s: Unexpected error "%s", closing connection', + self.sid, str(e)) + break + if p is None: + # connection closed by client + break + pkt = packet.Packet(encoded_packet=p) + try: + self.receive(pkt) + except exceptions.UnknownPacketError: # pragma: no cover + pass + except exceptions.SocketIsClosedError: # pragma: no cover + self.server.logger.info('Receive error -- socket is closed') + break + except: # pragma: no cover + # if we get an unexpected exception we log the error and exit + # the connection properly + self.server.logger.exception('Unknown receive error') + break + + self.queue.put(None) # unlock the writer task so that it can exit + writer_task.join() + self.close(wait=False, abort=True, + reason=self.server.reason.TRANSPORT_CLOSE) + + return [] diff --git a/env/lib/python3.10/site-packages/engineio/static_files.py b/env/lib/python3.10/site-packages/engineio/static_files.py new file mode 100644 index 0000000..77c8915 --- /dev/null +++ b/env/lib/python3.10/site-packages/engineio/static_files.py @@ -0,0 +1,60 @@ +content_types = { + 'css': 'text/css', + 'gif': 'image/gif', + 'html': 'text/html', + 'jpg': 'image/jpeg', + 'js': 'application/javascript', + 'json': 'application/json', + 'png': 'image/png', + 'txt': 'text/plain', +} + + +def get_static_file(path, static_files): + """Return the local filename and content type for the requested static + file URL. + + :param path: the path portion of the requested URL. + :param static_files: a static file configuration dictionary. + + This function returns a dictionary with two keys, "filename" and + "content_type". If the requested URL does not match any static file, the + return value is None. + """ + extra_path = '' + if path in static_files: + f = static_files[path] + else: + f = None + while path != '': + path, last = path.rsplit('/', 1) + extra_path = '/' + last + extra_path + if path in static_files: + f = static_files[path] + break + elif path + '/' in static_files: + f = static_files[path + '/'] + break + if f: + if isinstance(f, str): + f = {'filename': f} + else: + f = f.copy() # in case it is mutated below + if f['filename'].endswith('/') and extra_path.startswith('/'): + extra_path = extra_path[1:] + f['filename'] += extra_path + if f['filename'].endswith('/'): + if '' in static_files: + if isinstance(static_files[''], str): + f['filename'] += static_files[''] + else: + f['filename'] += static_files['']['filename'] + if 'content_type' in static_files['']: + f['content_type'] = static_files['']['content_type'] + else: + f['filename'] += 'index.html' + if 'content_type' not in f: + ext = f['filename'].rsplit('.')[-1] + f['content_type'] = content_types.get( + ext, 'application/octet-stream') + return f diff --git a/env/lib/python3.10/site-packages/evolutionapi.egg-link b/env/lib/python3.10/site-packages/evolutionapi.egg-link new file mode 100644 index 0000000..3a55ebd --- /dev/null +++ b/env/lib/python3.10/site-packages/evolutionapi.egg-link @@ -0,0 +1,2 @@ +/home/davidson/Projects/evolution_client/python +. \ No newline at end of file diff --git a/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/INSTALLER b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/LICENSE.txt b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/LICENSE.txt new file mode 100644 index 0000000..8f080ea --- /dev/null +++ b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/LICENSE.txt @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Nathaniel J. Smith and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/METADATA b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/METADATA new file mode 100644 index 0000000..cf12a82 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/METADATA @@ -0,0 +1,193 @@ +Metadata-Version: 2.1 +Name: h11 +Version: 0.14.0 +Summary: A pure-Python, bring-your-own-I/O implementation of HTTP/1.1 +Home-page: https://github.com/python-hyper/h11 +Author: Nathaniel J. Smith +Author-email: njs@pobox.com +License: MIT +Classifier: Development Status :: 3 - Alpha +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: System :: Networking +Requires-Python: >=3.7 +License-File: LICENSE.txt +Requires-Dist: typing-extensions ; python_version < "3.8" + +h11 +=== + +.. image:: https://travis-ci.org/python-hyper/h11.svg?branch=master + :target: https://travis-ci.org/python-hyper/h11 + :alt: Automated test status + +.. image:: https://codecov.io/gh/python-hyper/h11/branch/master/graph/badge.svg + :target: https://codecov.io/gh/python-hyper/h11 + :alt: Test coverage + +.. image:: https://readthedocs.org/projects/h11/badge/?version=latest + :target: http://h11.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +This is a little HTTP/1.1 library written from scratch in Python, +heavily inspired by `hyper-h2 `_. + +It's a "bring-your-own-I/O" library; h11 contains no IO code +whatsoever. This means you can hook h11 up to your favorite network +API, and that could be anything you want: synchronous, threaded, +asynchronous, or your own implementation of `RFC 6214 +`_ -- h11 won't judge you. +(Compare this to the current state of the art, where every time a `new +network API `_ comes along then someone +gets to start over reimplementing the entire HTTP protocol from +scratch.) Cory Benfield made an `excellent blog post describing the +benefits of this approach +`_, or if you like video +then here's his `PyCon 2016 talk on the same theme +`_. + +This also means that h11 is not immediately useful out of the box: +it's a toolkit for building programs that speak HTTP, not something +that could directly replace ``requests`` or ``twisted.web`` or +whatever. But h11 makes it much easier to implement something like +``requests`` or ``twisted.web``. + +At a high level, working with h11 goes like this: + +1) First, create an ``h11.Connection`` object to track the state of a + single HTTP/1.1 connection. + +2) When you read data off the network, pass it to + ``conn.receive_data(...)``; you'll get back a list of objects + representing high-level HTTP "events". + +3) When you want to send a high-level HTTP event, create the + corresponding "event" object and pass it to ``conn.send(...)``; + this will give you back some bytes that you can then push out + through the network. + +For example, a client might instantiate and then send a +``h11.Request`` object, then zero or more ``h11.Data`` objects for the +request body (e.g., if this is a POST), and then a +``h11.EndOfMessage`` to indicate the end of the message. Then the +server would then send back a ``h11.Response``, some ``h11.Data``, and +its own ``h11.EndOfMessage``. If either side violates the protocol, +you'll get a ``h11.ProtocolError`` exception. + +h11 is suitable for implementing both servers and clients, and has a +pleasantly symmetric API: the events you send as a client are exactly +the ones that you receive as a server and vice-versa. + +`Here's an example of a tiny HTTP client +`_ + +It also has `a fine manual `_. + +FAQ +--- + +*Whyyyyy?* + +I wanted to play with HTTP in `Curio +`__ and `Trio +`__, which at the time didn't have any +HTTP libraries. So I thought, no big deal, Python has, like, a dozen +different implementations of HTTP, surely I can find one that's +reusable. I didn't find one, but I did find Cory's call-to-arms +blog-post. So I figured, well, fine, if I have to implement HTTP from +scratch, at least I can make sure no-one *else* has to ever again. + +*Should I use it?* + +Maybe. You should be aware that it's a very young project. But, it's +feature complete and has an exhaustive test-suite and complete docs, +so the next step is for people to try using it and see how it goes +:-). If you do then please let us know -- if nothing else we'll want +to talk to you before making any incompatible changes! + +*What are the features/limitations?* + +Roughly speaking, it's trying to be a robust, complete, and non-hacky +implementation of the first "chapter" of the HTTP/1.1 spec: `RFC 7230: +HTTP/1.1 Message Syntax and Routing +`_. That is, it mostly focuses on +implementing HTTP at the level of taking bytes on and off the wire, +and the headers related to that, and tries to be anal about spec +conformance. It doesn't know about higher-level concerns like URL +routing, conditional GETs, cross-origin cookie policies, or content +negotiation. But it does know how to take care of framing, +cross-version differences in keep-alive handling, and the "obsolete +line folding" rule, so you can focus your energies on the hard / +interesting parts for your application, and it tries to support the +full specification in the sense that any useful HTTP/1.1 conformant +application should be able to use h11. + +It's pure Python, and has no dependencies outside of the standard +library. + +It has a test suite with 100.0% coverage for both statements and +branches. + +Currently it supports Python 3 (testing on 3.7-3.10) and PyPy 3. +The last Python 2-compatible version was h11 0.11.x. +(Originally it had a Cython wrapper for `http-parser +`_ and a beautiful nested state +machine implemented with ``yield from`` to postprocess the output. But +I had to take these out -- the new *parser* needs fewer lines-of-code +than the old *parser wrapper*, is written in pure Python, uses no +exotic language syntax, and has more features. It's sad, really; that +old state machine was really slick. I just need a few sentences here +to mourn that.) + +I don't know how fast it is. I haven't benchmarked or profiled it yet, +so it's probably got a few pointless hot spots, and I've been trying +to err on the side of simplicity and robustness instead of +micro-optimization. But at the architectural level I tried hard to +avoid fundamentally bad decisions, e.g., I believe that all the +parsing algorithms remain linear-time even in the face of pathological +input like slowloris, and there are no byte-by-byte loops. (I also +believe that it maintains bounded memory usage in the face of +arbitrary/pathological input.) + +The whole library is ~800 lines-of-code. You can read and understand +the whole thing in less than an hour. Most of the energy invested in +this so far has been spent on trying to keep things simple by +minimizing special-cases and ad hoc state manipulation; even though it +is now quite small and simple, I'm still annoyed that I haven't +figured out how to make it even smaller and simpler. (Unfortunately, +HTTP does not lend itself to simplicity.) + +The API is ~feature complete and I don't expect the general outlines +to change much, but you can't judge an API's ergonomics until you +actually document and use it, so I'd expect some changes in the +details. + +*How do I try it?* + +.. code-block:: sh + + $ pip install h11 + $ git clone git@github.com:python-hyper/h11 + $ cd h11/examples + $ python basic-client.py + +and go from there. + +*License?* + +MIT + +*Code of conduct?* + +Contributors are requested to follow our `code of conduct +`_ in +all project spaces. diff --git a/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/RECORD b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/RECORD new file mode 100644 index 0000000..b4f073d --- /dev/null +++ b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/RECORD @@ -0,0 +1,52 @@ +h11-0.14.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +h11-0.14.0.dist-info/LICENSE.txt,sha256=N9tbuFkm2yikJ6JYZ_ELEjIAOuob5pzLhRE4rbjm82E,1124 +h11-0.14.0.dist-info/METADATA,sha256=B7pZ0m7WBXNs17vl6hUH9bJTL9s37DaGvY31w7jNxSg,8175 +h11-0.14.0.dist-info/RECORD,, +h11-0.14.0.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 +h11-0.14.0.dist-info/top_level.txt,sha256=F7dC4jl3zeh8TGHEPaWJrMbeuoWbS379Gwdi-Yvdcis,4 +h11/__init__.py,sha256=iO1KzkSO42yZ6ffg-VMgbx_ZVTWGUY00nRYEWn-s3kY,1507 +h11/__pycache__/__init__.cpython-310.pyc,, +h11/__pycache__/_abnf.cpython-310.pyc,, +h11/__pycache__/_connection.cpython-310.pyc,, +h11/__pycache__/_events.cpython-310.pyc,, +h11/__pycache__/_headers.cpython-310.pyc,, +h11/__pycache__/_readers.cpython-310.pyc,, +h11/__pycache__/_receivebuffer.cpython-310.pyc,, +h11/__pycache__/_state.cpython-310.pyc,, +h11/__pycache__/_util.cpython-310.pyc,, +h11/__pycache__/_version.cpython-310.pyc,, +h11/__pycache__/_writers.cpython-310.pyc,, +h11/_abnf.py,sha256=ybixr0xsupnkA6GFAyMubuXF6Tc1lb_hF890NgCsfNc,4815 +h11/_connection.py,sha256=eS2sorMD0zKLCFiB9lW9W9F_Nzny2tjHa4e6s1ujr1c,26539 +h11/_events.py,sha256=LEfuvg1AbhHaVRwxCd0I-pFn9-ezUOaoL8o2Kvy1PBA,11816 +h11/_headers.py,sha256=RqB8cd8CN0blYPzcLe5qeCh-phv6D1U_CHj4hs67lgQ,10230 +h11/_readers.py,sha256=EbSed0jzwVUiD1nOPAeUcVE4Flf3wXkxfb8c06-OTBM,8383 +h11/_receivebuffer.py,sha256=xrspsdsNgWFxRfQcTXxR8RrdjRXXTK0Io5cQYWpJ1Ws,5252 +h11/_state.py,sha256=k1VL6SDbaPkSrZ-49ewCXDpuiUS69_46YhbWjuV1qEY,13300 +h11/_util.py,sha256=LWkkjXyJaFlAy6Lt39w73UStklFT5ovcvo0TkY7RYuk,4888 +h11/_version.py,sha256=LVyTdiZRzIIEv79UyOgbM5iUrJUllEzlCWaJEYBY1zc,686 +h11/_writers.py,sha256=oFKm6PtjeHfbj4RLX7VB7KDc1gIY53gXG3_HR9ltmTA,5081 +h11/py.typed,sha256=sow9soTwP9T_gEAQSVh7Gb8855h04Nwmhs2We-JRgZM,7 +h11/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +h11/tests/__pycache__/__init__.cpython-310.pyc,, +h11/tests/__pycache__/helpers.cpython-310.pyc,, +h11/tests/__pycache__/test_against_stdlib_http.cpython-310.pyc,, +h11/tests/__pycache__/test_connection.cpython-310.pyc,, +h11/tests/__pycache__/test_events.cpython-310.pyc,, +h11/tests/__pycache__/test_headers.cpython-310.pyc,, +h11/tests/__pycache__/test_helpers.cpython-310.pyc,, +h11/tests/__pycache__/test_io.cpython-310.pyc,, +h11/tests/__pycache__/test_receivebuffer.cpython-310.pyc,, +h11/tests/__pycache__/test_state.cpython-310.pyc,, +h11/tests/__pycache__/test_util.cpython-310.pyc,, +h11/tests/data/test-file,sha256=ZJ03Rqs98oJw29OHzJg7LlMzyGQaRAY0r3AqBeM2wVU,65 +h11/tests/helpers.py,sha256=a1EVG_p7xU4wRsa3tMPTRxuaKCmretok9sxXWvqfmQA,3355 +h11/tests/test_against_stdlib_http.py,sha256=cojCHgHXFQ8gWhNlEEwl3trmOpN-5uDukRoHnElqo3A,3995 +h11/tests/test_connection.py,sha256=ZbPLDPclKvjgjAhgk-WlCPBaf17c4XUIV2tpaW08jOI,38720 +h11/tests/test_events.py,sha256=LPVLbcV-NvPNK9fW3rraR6Bdpz1hAlsWubMtNaJ5gHg,4657 +h11/tests/test_headers.py,sha256=qd8T1Zenuz5GbD6wklSJ5G8VS7trrYgMV0jT-SMvqg8,5612 +h11/tests/test_helpers.py,sha256=kAo0CEM4LGqmyyP2ZFmhsyq3UFJqoFfAbzu3hbWreRM,794 +h11/tests/test_io.py,sha256=uCZVnjarkRBkudfC1ij-KSCQ71XWJhnkgkgWWkKgYPQ,16386 +h11/tests/test_receivebuffer.py,sha256=3jGbeJM36Akqg_pAhPb7XzIn2NS6RhPg-Ryg8Eu6ytk,3454 +h11/tests/test_state.py,sha256=rqll9WqFsJPE0zSrtCn9LH659mPKsDeXZ-DwXwleuBQ,8928 +h11/tests/test_util.py,sha256=VO5L4nSFe4pgtSwKuv6u_6l0H7UeizF5WKuHTWreg70,2970 diff --git a/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/WHEEL b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/WHEEL new file mode 100644 index 0000000..5bad85f --- /dev/null +++ b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/top_level.txt b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/top_level.txt new file mode 100644 index 0000000..0d24def --- /dev/null +++ b/env/lib/python3.10/site-packages/h11-0.14.0.dist-info/top_level.txt @@ -0,0 +1 @@ +h11 diff --git a/env/lib/python3.10/site-packages/h11/__init__.py b/env/lib/python3.10/site-packages/h11/__init__.py new file mode 100644 index 0000000..989e92c --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/__init__.py @@ -0,0 +1,62 @@ +# A highish-level implementation of the HTTP/1.1 wire protocol (RFC 7230), +# containing no networking code at all, loosely modelled on hyper-h2's generic +# implementation of HTTP/2 (and in particular the h2.connection.H2Connection +# class). There's still a bunch of subtle details you need to get right if you +# want to make this actually useful, because it doesn't implement all the +# semantics to check that what you're asking to write to the wire is sensible, +# but at least it gets you out of dealing with the wire itself. + +from h11._connection import Connection, NEED_DATA, PAUSED +from h11._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from h11._state import ( + CLIENT, + CLOSED, + DONE, + ERROR, + IDLE, + MIGHT_SWITCH_PROTOCOL, + MUST_CLOSE, + SEND_BODY, + SEND_RESPONSE, + SERVER, + SWITCHED_PROTOCOL, +) +from h11._util import LocalProtocolError, ProtocolError, RemoteProtocolError +from h11._version import __version__ + +PRODUCT_ID = "python-h11/" + __version__ + + +__all__ = ( + "Connection", + "NEED_DATA", + "PAUSED", + "ConnectionClosed", + "Data", + "EndOfMessage", + "Event", + "InformationalResponse", + "Request", + "Response", + "CLIENT", + "CLOSED", + "DONE", + "ERROR", + "IDLE", + "MUST_CLOSE", + "SEND_BODY", + "SEND_RESPONSE", + "SERVER", + "SWITCHED_PROTOCOL", + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", +) diff --git a/env/lib/python3.10/site-packages/h11/_abnf.py b/env/lib/python3.10/site-packages/h11/_abnf.py new file mode 100644 index 0000000..933587f --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/_abnf.py @@ -0,0 +1,132 @@ +# We use native strings for all the re patterns, to take advantage of string +# formatting, and then convert to bytestrings when compiling the final re +# objects. + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#whitespace +# OWS = *( SP / HTAB ) +# ; optional whitespace +OWS = r"[ \t]*" + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.token.separators +# token = 1*tchar +# +# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" +# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" +# / DIGIT / ALPHA +# ; any VCHAR, except delimiters +token = r"[-!#$%&'*+.^_`|~0-9a-zA-Z]+" + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#header.fields +# field-name = token +field_name = token + +# The standard says: +# +# field-value = *( field-content / obs-fold ) +# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +# field-vchar = VCHAR / obs-text +# obs-fold = CRLF 1*( SP / HTAB ) +# ; obsolete line folding +# ; see Section 3.2.4 +# +# https://tools.ietf.org/html/rfc5234#appendix-B.1 +# +# VCHAR = %x21-7E +# ; visible (printing) characters +# +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.quoted-string +# obs-text = %x80-FF +# +# However, the standard definition of field-content is WRONG! It disallows +# fields containing a single visible character surrounded by whitespace, +# e.g. "foo a bar". +# +# See: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189 +# +# So our definition of field_content attempts to fix it up... +# +# Also, we allow lots of control characters, because apparently people assume +# that they're legal in practice (e.g., google analytics makes cookies with +# \x01 in them!): +# https://github.com/python-hyper/h11/issues/57 +# We still don't allow NUL or whitespace, because those are often treated as +# meta-characters and letting them through can lead to nasty issues like SSRF. +vchar = r"[\x21-\x7e]" +vchar_or_obs_text = r"[^\x00\s]" +field_vchar = vchar_or_obs_text +field_content = r"{field_vchar}+(?:[ \t]+{field_vchar}+)*".format(**globals()) + +# We handle obs-fold at a different level, and our fixed-up field_content +# already grows to swallow the whole value, so ? instead of * +field_value = r"({field_content})?".format(**globals()) + +# header-field = field-name ":" OWS field-value OWS +header_field = ( + r"(?P{field_name})" + r":" + r"{OWS}" + r"(?P{field_value})" + r"{OWS}".format(**globals()) +) + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#request.line +# +# request-line = method SP request-target SP HTTP-version CRLF +# method = token +# HTTP-version = HTTP-name "/" DIGIT "." DIGIT +# HTTP-name = %x48.54.54.50 ; "HTTP", case-sensitive +# +# request-target is complicated (see RFC 7230 sec 5.3) -- could be path, full +# URL, host+port (for connect), or even "*", but in any case we are guaranteed +# that it contists of the visible printing characters. +method = token +request_target = r"{vchar}+".format(**globals()) +http_version = r"HTTP/(?P[0-9]\.[0-9])" +request_line = ( + r"(?P{method})" + r" " + r"(?P{request_target})" + r" " + r"{http_version}".format(**globals()) +) + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#status.line +# +# status-line = HTTP-version SP status-code SP reason-phrase CRLF +# status-code = 3DIGIT +# reason-phrase = *( HTAB / SP / VCHAR / obs-text ) +status_code = r"[0-9]{3}" +reason_phrase = r"([ \t]|{vchar_or_obs_text})*".format(**globals()) +status_line = ( + r"{http_version}" + r" " + r"(?P{status_code})" + # However, there are apparently a few too many servers out there that just + # leave out the reason phrase: + # https://github.com/scrapy/scrapy/issues/345#issuecomment-281756036 + # https://github.com/seanmonstar/httparse/issues/29 + # so make it optional. ?: is a non-capturing group. + r"(?: (?P{reason_phrase}))?".format(**globals()) +) + +HEXDIG = r"[0-9A-Fa-f]" +# Actually +# +# chunk-size = 1*HEXDIG +# +# but we impose an upper-limit to avoid ridiculosity. len(str(2**64)) == 20 +chunk_size = r"({HEXDIG}){{1,20}}".format(**globals()) +# Actually +# +# chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] ) +# +# but we aren't parsing the things so we don't really care. +chunk_ext = r";.*" +chunk_header = ( + r"(?P{chunk_size})" + r"(?P{chunk_ext})?" + r"{OWS}\r\n".format( + **globals() + ) # Even though the specification does not allow for extra whitespaces, + # we are lenient with trailing whitespaces because some servers on the wild use it. +) diff --git a/env/lib/python3.10/site-packages/h11/_connection.py b/env/lib/python3.10/site-packages/h11/_connection.py new file mode 100644 index 0000000..d175270 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/_connection.py @@ -0,0 +1,633 @@ +# This contains the main Connection class. Everything in h11 revolves around +# this. +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type, Union + +from ._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from ._headers import get_comma_header, has_expect_100_continue, set_comma_header +from ._readers import READERS, ReadersType +from ._receivebuffer import ReceiveBuffer +from ._state import ( + _SWITCH_CONNECT, + _SWITCH_UPGRADE, + CLIENT, + ConnectionState, + DONE, + ERROR, + MIGHT_SWITCH_PROTOCOL, + SEND_BODY, + SERVER, + SWITCHED_PROTOCOL, +) +from ._util import ( # Import the internal things we need + LocalProtocolError, + RemoteProtocolError, + Sentinel, +) +from ._writers import WRITERS, WritersType + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = ["Connection", "NEED_DATA", "PAUSED"] + + +class NEED_DATA(Sentinel, metaclass=Sentinel): + pass + + +class PAUSED(Sentinel, metaclass=Sentinel): + pass + + +# If we ever have this much buffered without it making a complete parseable +# event, we error out. The only time we really buffer is when reading the +# request/response line + headers together, so this is effectively the limit on +# the size of that. +# +# Some precedents for defaults: +# - node.js: 80 * 1024 +# - tomcat: 8 * 1024 +# - IIS: 16 * 1024 +# - Apache: <8 KiB per line> +DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024 + +# RFC 7230's rules for connection lifecycles: +# - If either side says they want to close the connection, then the connection +# must close. +# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close +# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive +# (and even this is a mess -- e.g. if you're implementing a proxy then +# sending Connection: keep-alive is forbidden). +# +# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So +# our rule is: +# - If someone says Connection: close, we will close +# - If someone uses HTTP/1.0, we will close. +def _keep_alive(event: Union[Request, Response]) -> bool: + connection = get_comma_header(event.headers, b"connection") + if b"close" in connection: + return False + if getattr(event, "http_version", b"1.1") < b"1.1": + return False + return True + + +def _body_framing( + request_method: bytes, event: Union[Request, Response] +) -> Tuple[str, Union[Tuple[()], Tuple[int]]]: + # Called when we enter SEND_BODY to figure out framing information for + # this body. + # + # These are the only two events that can trigger a SEND_BODY state: + assert type(event) in (Request, Response) + # Returns one of: + # + # ("content-length", count) + # ("chunked", ()) + # ("http/1.0", ()) + # + # which are (lookup key, *args) for constructing body reader/writer + # objects. + # + # Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3 + # + # Step 1: some responses always have an empty body, regardless of what the + # headers say. + if type(event) is Response: + if ( + event.status_code in (204, 304) + or request_method == b"HEAD" + or (request_method == b"CONNECT" and 200 <= event.status_code < 300) + ): + return ("content-length", (0,)) + # Section 3.3.3 also lists another case -- responses with status_code + # < 200. For us these are InformationalResponses, not Responses, so + # they can't get into this function in the first place. + assert event.status_code >= 200 + + # Step 2: check for Transfer-Encoding (T-E beats C-L): + transfer_encodings = get_comma_header(event.headers, b"transfer-encoding") + if transfer_encodings: + assert transfer_encodings == [b"chunked"] + return ("chunked", ()) + + # Step 3: check for Content-Length + content_lengths = get_comma_header(event.headers, b"content-length") + if content_lengths: + return ("content-length", (int(content_lengths[0]),)) + + # Step 4: no applicable headers; fallback/default depends on type + if type(event) is Request: + return ("content-length", (0,)) + else: + return ("http/1.0", ()) + + +################################################################ +# +# The main Connection class +# +################################################################ + + +class Connection: + """An object encapsulating the state of an HTTP connection. + + Args: + our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If + you're implementing a server, pass :data:`h11.SERVER`. + + max_incomplete_event_size (int): + The maximum number of bytes we're willing to buffer of an + incomplete event. In practice this mostly sets a limit on the + maximum size of the request/response line + headers. If this is + exceeded, then :meth:`next_event` will raise + :exc:`RemoteProtocolError`. + + """ + + def __init__( + self, + our_role: Type[Sentinel], + max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE, + ) -> None: + self._max_incomplete_event_size = max_incomplete_event_size + # State and role tracking + if our_role not in (CLIENT, SERVER): + raise ValueError("expected CLIENT or SERVER, not {!r}".format(our_role)) + self.our_role = our_role + self.their_role: Type[Sentinel] + if our_role is CLIENT: + self.their_role = SERVER + else: + self.their_role = CLIENT + self._cstate = ConnectionState() + + # Callables for converting data->events or vice-versa given the + # current state + self._writer = self._get_io_object(self.our_role, None, WRITERS) + self._reader = self._get_io_object(self.their_role, None, READERS) + + # Holds any unprocessed received data + self._receive_buffer = ReceiveBuffer() + # If this is true, then it indicates that the incoming connection was + # closed *after* the end of whatever's in self._receive_buffer: + self._receive_buffer_closed = False + + # Extra bits of state that don't fit into the state machine. + # + # These two are only used to interpret framing headers for figuring + # out how to read/write response bodies. their_http_version is also + # made available as a convenient public API. + self.their_http_version: Optional[bytes] = None + self._request_method: Optional[bytes] = None + # This is pure flow-control and doesn't at all affect the set of legal + # transitions, so no need to bother ConnectionState with it: + self.client_is_waiting_for_100_continue = False + + @property + def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]: + """A dictionary like:: + + {CLIENT: , SERVER: } + + See :ref:`state-machine` for details. + + """ + return dict(self._cstate.states) + + @property + def our_state(self) -> Type[Sentinel]: + """The current state of whichever role we are playing. See + :ref:`state-machine` for details. + """ + return self._cstate.states[self.our_role] + + @property + def their_state(self) -> Type[Sentinel]: + """The current state of whichever role we are NOT playing. See + :ref:`state-machine` for details. + """ + return self._cstate.states[self.their_role] + + @property + def they_are_waiting_for_100_continue(self) -> bool: + return self.their_role is CLIENT and self.client_is_waiting_for_100_continue + + def start_next_cycle(self) -> None: + """Attempt to reset our connection state for a new request/response + cycle. + + If both client and server are in :data:`DONE` state, then resets them + both to :data:`IDLE` state in preparation for a new request/response + cycle on this same connection. Otherwise, raises a + :exc:`LocalProtocolError`. + + See :ref:`keepalive-and-pipelining`. + + """ + old_states = dict(self._cstate.states) + self._cstate.start_next_cycle() + self._request_method = None + # self.their_http_version gets left alone, since it presumably lasts + # beyond a single request/response cycle + assert not self.client_is_waiting_for_100_continue + self._respond_to_state_changes(old_states) + + def _process_error(self, role: Type[Sentinel]) -> None: + old_states = dict(self._cstate.states) + self._cstate.process_error(role) + self._respond_to_state_changes(old_states) + + def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]: + if type(event) is InformationalResponse and event.status_code == 101: + return _SWITCH_UPGRADE + if type(event) is Response: + if ( + _SWITCH_CONNECT in self._cstate.pending_switch_proposals + and 200 <= event.status_code < 300 + ): + return _SWITCH_CONNECT + return None + + # All events go through here + def _process_event(self, role: Type[Sentinel], event: Event) -> None: + # First, pass the event through the state machine to make sure it + # succeeds. + old_states = dict(self._cstate.states) + if role is CLIENT and type(event) is Request: + if event.method == b"CONNECT": + self._cstate.process_client_switch_proposal(_SWITCH_CONNECT) + if get_comma_header(event.headers, b"upgrade"): + self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE) + server_switch_event = None + if role is SERVER: + server_switch_event = self._server_switch_event(event) + self._cstate.process_event(role, type(event), server_switch_event) + + # Then perform the updates triggered by it. + + if type(event) is Request: + self._request_method = event.method + + if role is self.their_role and type(event) in ( + Request, + Response, + InformationalResponse, + ): + event = cast(Union[Request, Response, InformationalResponse], event) + self.their_http_version = event.http_version + + # Keep alive handling + # + # RFC 7230 doesn't really say what one should do if Connection: close + # shows up on a 1xx InformationalResponse. I think the idea is that + # this is not supposed to happen. In any case, if it does happen, we + # ignore it. + if type(event) in (Request, Response) and not _keep_alive( + cast(Union[Request, Response], event) + ): + self._cstate.process_keep_alive_disabled() + + # 100-continue + if type(event) is Request and has_expect_100_continue(event): + self.client_is_waiting_for_100_continue = True + if type(event) in (InformationalResponse, Response): + self.client_is_waiting_for_100_continue = False + if role is CLIENT and type(event) in (Data, EndOfMessage): + self.client_is_waiting_for_100_continue = False + + self._respond_to_state_changes(old_states, event) + + def _get_io_object( + self, + role: Type[Sentinel], + event: Optional[Event], + io_dict: Union[ReadersType, WritersType], + ) -> Optional[Callable[..., Any]]: + # event may be None; it's only used when entering SEND_BODY + state = self._cstate.states[role] + if state is SEND_BODY: + # Special case: the io_dict has a dict of reader/writer factories + # that depend on the request/response framing. + framing_type, args = _body_framing( + cast(bytes, self._request_method), cast(Union[Request, Response], event) + ) + return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index] + else: + # General case: the io_dict just has the appropriate reader/writer + # for this state + return io_dict.get((role, state)) # type: ignore[return-value] + + # This must be called after any action that might have caused + # self._cstate.states to change. + def _respond_to_state_changes( + self, + old_states: Dict[Type[Sentinel], Type[Sentinel]], + event: Optional[Event] = None, + ) -> None: + # Update reader/writer + if self.our_state != old_states[self.our_role]: + self._writer = self._get_io_object(self.our_role, event, WRITERS) + if self.their_state != old_states[self.their_role]: + self._reader = self._get_io_object(self.their_role, event, READERS) + + @property + def trailing_data(self) -> Tuple[bytes, bool]: + """Data that has been received, but not yet processed, represented as + a tuple with two elements, where the first is a byte-string containing + the unprocessed data itself, and the second is a bool that is True if + the receive connection was closed. + + See :ref:`switching-protocols` for discussion of why you'd want this. + """ + return (bytes(self._receive_buffer), self._receive_buffer_closed) + + def receive_data(self, data: bytes) -> None: + """Add data to our internal receive buffer. + + This does not actually do any processing on the data, just stores + it. To trigger processing, you have to call :meth:`next_event`. + + Args: + data (:term:`bytes-like object`): + The new data that was just received. + + Special case: If *data* is an empty byte-string like ``b""``, + then this indicates that the remote side has closed the + connection (end of file). Normally this is convenient, because + standard Python APIs like :meth:`file.read` or + :meth:`socket.recv` use ``b""`` to indicate end-of-file, while + other failures to read are indicated using other mechanisms + like raising :exc:`TimeoutError`. When using such an API you + can just blindly pass through whatever you get from ``read`` + to :meth:`receive_data`, and everything will work. + + But, if you have an API where reading an empty string is a + valid non-EOF condition, then you need to be aware of this and + make sure to check for such strings and avoid passing them to + :meth:`receive_data`. + + Returns: + Nothing, but after calling this you should call :meth:`next_event` + to parse the newly received data. + + Raises: + RuntimeError: + Raised if you pass an empty *data*, indicating EOF, and then + pass a non-empty *data*, indicating more data that somehow + arrived after the EOF. + + (Calling ``receive_data(b"")`` multiple times is fine, + and equivalent to calling it once.) + + """ + if data: + if self._receive_buffer_closed: + raise RuntimeError("received close, then received more data?") + self._receive_buffer += data + else: + self._receive_buffer_closed = True + + def _extract_next_receive_event( + self, + ) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: + state = self.their_state + # We don't pause immediately when they enter DONE, because even in + # DONE state we can still process a ConnectionClosed() event. But + # if we have data in our buffer, then we definitely aren't getting + # a ConnectionClosed() immediately and we need to pause. + if state is DONE and self._receive_buffer: + return PAUSED + if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL: + return PAUSED + assert self._reader is not None + event = self._reader(self._receive_buffer) + if event is None: + if not self._receive_buffer and self._receive_buffer_closed: + # In some unusual cases (basically just HTTP/1.0 bodies), EOF + # triggers an actual protocol event; in that case, we want to + # return that event, and then the state will change and we'll + # get called again to generate the actual ConnectionClosed(). + if hasattr(self._reader, "read_eof"): + event = self._reader.read_eof() # type: ignore[attr-defined] + else: + event = ConnectionClosed() + if event is None: + event = NEED_DATA + return event # type: ignore[no-any-return] + + def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: + """Parse the next event out of our receive buffer, update our internal + state, and return it. + + This is a mutating operation -- think of it like calling :func:`next` + on an iterator. + + Returns: + : One of three things: + + 1) An event object -- see :ref:`events`. + + 2) The special constant :data:`NEED_DATA`, which indicates that + you need to read more data from your socket and pass it to + :meth:`receive_data` before this method will be able to return + any more events. + + 3) The special constant :data:`PAUSED`, which indicates that we + are not in a state where we can process incoming data (usually + because the peer has finished their part of the current + request/response cycle, and you have not yet called + :meth:`start_next_cycle`). See :ref:`flow-control` for details. + + Raises: + RemoteProtocolError: + The peer has misbehaved. You should close the connection + (possibly after sending some kind of 4xx response). + + Once this method returns :class:`ConnectionClosed` once, then all + subsequent calls will also return :class:`ConnectionClosed`. + + If this method raises any exception besides :exc:`RemoteProtocolError` + then that's a bug -- if it happens please file a bug report! + + If this method raises any exception then it also sets + :attr:`Connection.their_state` to :data:`ERROR` -- see + :ref:`error-handling` for discussion. + + """ + + if self.their_state is ERROR: + raise RemoteProtocolError("Can't receive data when peer state is ERROR") + try: + event = self._extract_next_receive_event() + if event not in [NEED_DATA, PAUSED]: + self._process_event(self.their_role, cast(Event, event)) + if event is NEED_DATA: + if len(self._receive_buffer) > self._max_incomplete_event_size: + # 431 is "Request header fields too large" which is pretty + # much the only situation where we can get here + raise RemoteProtocolError( + "Receive buffer too long", error_status_hint=431 + ) + if self._receive_buffer_closed: + # We're still trying to complete some event, but that's + # never going to happen because no more data is coming + raise RemoteProtocolError("peer unexpectedly closed connection") + return event + except BaseException as exc: + self._process_error(self.their_role) + if isinstance(exc, LocalProtocolError): + exc._reraise_as_remote_protocol_error() + else: + raise + + def send(self, event: Event) -> Optional[bytes]: + """Convert a high-level event into bytes that can be sent to the peer, + while updating our internal state machine. + + Args: + event: The :ref:`event ` to send. + + Returns: + If ``type(event) is ConnectionClosed``, then returns + ``None``. Otherwise, returns a :term:`bytes-like object`. + + Raises: + LocalProtocolError: + Sending this event at this time would violate our + understanding of the HTTP/1.1 protocol. + + If this method raises any exception then it also sets + :attr:`Connection.our_state` to :data:`ERROR` -- see + :ref:`error-handling` for discussion. + + """ + data_list = self.send_with_data_passthrough(event) + if data_list is None: + return None + else: + return b"".join(data_list) + + def send_with_data_passthrough(self, event: Event) -> Optional[List[bytes]]: + """Identical to :meth:`send`, except that in situations where + :meth:`send` returns a single :term:`bytes-like object`, this instead + returns a list of them -- and when sending a :class:`Data` event, this + list is guaranteed to contain the exact object you passed in as + :attr:`Data.data`. See :ref:`sendfile` for discussion. + + """ + if self.our_state is ERROR: + raise LocalProtocolError("Can't send data when our state is ERROR") + try: + if type(event) is Response: + event = self._clean_up_response_headers_for_sending(event) + # We want to call _process_event before calling the writer, + # because if someone tries to do something invalid then this will + # give a sensible error message, while our writers all just assume + # they will only receive valid events. But, _process_event might + # change self._writer. So we have to do a little dance: + writer = self._writer + self._process_event(self.our_role, event) + if type(event) is ConnectionClosed: + return None + else: + # In any situation where writer is None, process_event should + # have raised ProtocolError + assert writer is not None + data_list: List[bytes] = [] + writer(event, data_list.append) + return data_list + except: + self._process_error(self.our_role) + raise + + def send_failed(self) -> None: + """Notify the state machine that we failed to send the data it gave + us. + + This causes :attr:`Connection.our_state` to immediately become + :data:`ERROR` -- see :ref:`error-handling` for discussion. + + """ + self._process_error(self.our_role) + + # When sending a Response, we take responsibility for a few things: + # + # - Sometimes you MUST set Connection: close. We take care of those + # times. (You can also set it yourself if you want, and if you do then + # we'll respect that and close the connection at the right time. But you + # don't have to worry about that unless you want to.) + # + # - The user has to set Content-Length if they want it. Otherwise, for + # responses that have bodies (e.g. not HEAD), then we will automatically + # select the right mechanism for streaming a body of unknown length, + # which depends on depending on the peer's HTTP version. + # + # This function's *only* responsibility is making sure headers are set up + # right -- everything downstream just looks at the headers. There are no + # side channels. + def _clean_up_response_headers_for_sending(self, response: Response) -> Response: + assert type(response) is Response + + headers = response.headers + need_close = False + + # HEAD requests need some special handling: they always act like they + # have Content-Length: 0, and that's how _body_framing treats + # them. But their headers are supposed to match what we would send if + # the request was a GET. (Technically there is one deviation allowed: + # we're allowed to leave out the framing headers -- see + # https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as + # easy to get them right.) + method_for_choosing_headers = cast(bytes, self._request_method) + if method_for_choosing_headers == b"HEAD": + method_for_choosing_headers = b"GET" + framing_type, _ = _body_framing(method_for_choosing_headers, response) + if framing_type in ("chunked", "http/1.0"): + # This response has a body of unknown length. + # If our peer is HTTP/1.1, we use Transfer-Encoding: chunked + # If our peer is HTTP/1.0, we use no framing headers, and close the + # connection afterwards. + # + # Make sure to clear Content-Length (in principle user could have + # set both and then we ignored Content-Length b/c + # Transfer-Encoding overwrote it -- this would be naughty of them, + # but the HTTP spec says that if our peer does this then we have + # to fix it instead of erroring out, so we'll accord the user the + # same respect). + headers = set_comma_header(headers, b"content-length", []) + if self.their_http_version is None or self.their_http_version < b"1.1": + # Either we never got a valid request and are sending back an + # error (their_http_version is None), so we assume the worst; + # or else we did get a valid HTTP/1.0 request, so we know that + # they don't understand chunked encoding. + headers = set_comma_header(headers, b"transfer-encoding", []) + # This is actually redundant ATM, since currently we + # unconditionally disable keep-alive when talking to HTTP/1.0 + # peers. But let's be defensive just in case we add + # Connection: keep-alive support later: + if self._request_method != b"HEAD": + need_close = True + else: + headers = set_comma_header(headers, b"transfer-encoding", [b"chunked"]) + + if not self._cstate.keep_alive or need_close: + # Make sure Connection: close is set + connection = set(get_comma_header(headers, b"connection")) + connection.discard(b"keep-alive") + connection.add(b"close") + headers = set_comma_header(headers, b"connection", sorted(connection)) + + return Response( + headers=headers, + status_code=response.status_code, + http_version=response.http_version, + reason=response.reason, + ) diff --git a/env/lib/python3.10/site-packages/h11/_events.py b/env/lib/python3.10/site-packages/h11/_events.py new file mode 100644 index 0000000..075bf8a --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/_events.py @@ -0,0 +1,369 @@ +# High level events that make up HTTP/1.1 conversations. Loosely inspired by +# the corresponding events in hyper-h2: +# +# http://python-hyper.org/h2/en/stable/api.html#events +# +# Don't subclass these. Stuff will break. + +import re +from abc import ABC +from dataclasses import dataclass, field +from typing import Any, cast, Dict, List, Tuple, Union + +from ._abnf import method, request_target +from ._headers import Headers, normalize_and_validate +from ._util import bytesify, LocalProtocolError, validate + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = [ + "Event", + "Request", + "InformationalResponse", + "Response", + "Data", + "EndOfMessage", + "ConnectionClosed", +] + +method_re = re.compile(method.encode("ascii")) +request_target_re = re.compile(request_target.encode("ascii")) + + +class Event(ABC): + """ + Base class for h11 events. + """ + + __slots__ = () + + +@dataclass(init=False, frozen=True) +class Request(Event): + """The beginning of an HTTP request. + + Fields: + + .. attribute:: method + + An HTTP method, e.g. ``b"GET"`` or ``b"POST"``. Always a byte + string. :term:`Bytes-like objects ` and native + strings containing only ascii characters will be automatically + converted to byte strings. + + .. attribute:: target + + The target of an HTTP request, e.g. ``b"/index.html"``, or one of the + more exotic formats described in `RFC 7320, section 5.3 + `_. Always a byte + string. :term:`Bytes-like objects ` and native + strings containing only ascii characters will be automatically + converted to byte strings. + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + """ + + __slots__ = ("method", "headers", "target", "http_version") + + method: bytes + headers: Headers + target: bytes + http_version: bytes + + def __init__( + self, + *, + method: Union[bytes, str], + headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]], + target: Union[bytes, str], + http_version: Union[bytes, str] = b"1.1", + _parsed: bool = False, + ) -> None: + super().__init__() + if isinstance(headers, Headers): + object.__setattr__(self, "headers", headers) + else: + object.__setattr__( + self, "headers", normalize_and_validate(headers, _parsed=_parsed) + ) + if not _parsed: + object.__setattr__(self, "method", bytesify(method)) + object.__setattr__(self, "target", bytesify(target)) + object.__setattr__(self, "http_version", bytesify(http_version)) + else: + object.__setattr__(self, "method", method) + object.__setattr__(self, "target", target) + object.__setattr__(self, "http_version", http_version) + + # "A server MUST respond with a 400 (Bad Request) status code to any + # HTTP/1.1 request message that lacks a Host header field and to any + # request message that contains more than one Host header field or a + # Host header field with an invalid field-value." + # -- https://tools.ietf.org/html/rfc7230#section-5.4 + host_count = 0 + for name, value in self.headers: + if name == b"host": + host_count += 1 + if self.http_version == b"1.1" and host_count == 0: + raise LocalProtocolError("Missing mandatory Host: header") + if host_count > 1: + raise LocalProtocolError("Found multiple Host: headers") + + validate(method_re, self.method, "Illegal method characters") + validate(request_target_re, self.target, "Illegal target characters") + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class _ResponseBase(Event): + __slots__ = ("headers", "http_version", "reason", "status_code") + + headers: Headers + http_version: bytes + reason: bytes + status_code: int + + def __init__( + self, + *, + headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]], + status_code: int, + http_version: Union[bytes, str] = b"1.1", + reason: Union[bytes, str] = b"", + _parsed: bool = False, + ) -> None: + super().__init__() + if isinstance(headers, Headers): + object.__setattr__(self, "headers", headers) + else: + object.__setattr__( + self, "headers", normalize_and_validate(headers, _parsed=_parsed) + ) + if not _parsed: + object.__setattr__(self, "reason", bytesify(reason)) + object.__setattr__(self, "http_version", bytesify(http_version)) + if not isinstance(status_code, int): + raise LocalProtocolError("status code must be integer") + # Because IntEnum objects are instances of int, but aren't + # duck-compatible (sigh), see gh-72. + object.__setattr__(self, "status_code", int(status_code)) + else: + object.__setattr__(self, "reason", reason) + object.__setattr__(self, "http_version", http_version) + object.__setattr__(self, "status_code", status_code) + + self.__post_init__() + + def __post_init__(self) -> None: + pass + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class InformationalResponse(_ResponseBase): + """An HTTP informational response. + + Fields: + + .. attribute:: status_code + + The status code of this response, as an integer. For an + :class:`InformationalResponse`, this is always in the range [100, + 200). + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for + details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + .. attribute:: reason + + The reason phrase of this response, as a byte string. For example: + ``b"OK"``, or ``b"Not Found"``. + + """ + + def __post_init__(self) -> None: + if not (100 <= self.status_code < 200): + raise LocalProtocolError( + "InformationalResponse status_code should be in range " + "[100, 200), not {}".format(self.status_code) + ) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class Response(_ResponseBase): + """The beginning of an HTTP response. + + Fields: + + .. attribute:: status_code + + The status code of this response, as an integer. For an + :class:`Response`, this is always in the range [200, + 1000). + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + .. attribute:: reason + + The reason phrase of this response, as a byte string. For example: + ``b"OK"``, or ``b"Not Found"``. + + """ + + def __post_init__(self) -> None: + if not (200 <= self.status_code < 1000): + raise LocalProtocolError( + "Response status_code should be in range [200, 1000), not {}".format( + self.status_code + ) + ) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class Data(Event): + """Part of an HTTP message body. + + Fields: + + .. attribute:: data + + A :term:`bytes-like object` containing part of a message body. Or, if + using the ``combine=False`` argument to :meth:`Connection.send`, then + any object that your socket writing code knows what to do with, and for + which calling :func:`len` returns the number of bytes that will be + written -- see :ref:`sendfile` for details. + + .. attribute:: chunk_start + + A marker that indicates whether this data object is from the start of a + chunked transfer encoding chunk. This field is ignored when when a Data + event is provided to :meth:`Connection.send`: it is only valid on + events emitted from :meth:`Connection.next_event`. You probably + shouldn't use this attribute at all; see + :ref:`chunk-delimiters-are-bad` for details. + + .. attribute:: chunk_end + + A marker that indicates whether this data object is the last for a + given chunked transfer encoding chunk. This field is ignored when when + a Data event is provided to :meth:`Connection.send`: it is only valid + on events emitted from :meth:`Connection.next_event`. You probably + shouldn't use this attribute at all; see + :ref:`chunk-delimiters-are-bad` for details. + + """ + + __slots__ = ("data", "chunk_start", "chunk_end") + + data: bytes + chunk_start: bool + chunk_end: bool + + def __init__( + self, data: bytes, chunk_start: bool = False, chunk_end: bool = False + ) -> None: + object.__setattr__(self, "data", data) + object.__setattr__(self, "chunk_start", chunk_start) + object.__setattr__(self, "chunk_end", chunk_end) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +# XX FIXME: "A recipient MUST ignore (or consider as an error) any fields that +# are forbidden to be sent in a trailer, since processing them as if they were +# present in the header section might bypass external security filters." +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#chunked.trailer.part +# Unfortunately, the list of forbidden fields is long and vague :-/ +@dataclass(init=False, frozen=True) +class EndOfMessage(Event): + """The end of an HTTP message. + + Fields: + + .. attribute:: headers + + Default value: ``[]`` + + Any trailing headers attached to this message, represented as a list of + (name, value) pairs. See :ref:`the header normalization rules + ` for details. + + Must be empty unless ``Transfer-Encoding: chunked`` is in use. + + """ + + __slots__ = ("headers",) + + headers: Headers + + def __init__( + self, + *, + headers: Union[ + Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]], None + ] = None, + _parsed: bool = False, + ) -> None: + super().__init__() + if headers is None: + headers = Headers([]) + elif not isinstance(headers, Headers): + headers = normalize_and_validate(headers, _parsed=_parsed) + + object.__setattr__(self, "headers", headers) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(frozen=True) +class ConnectionClosed(Event): + """This event indicates that the sender has closed their outgoing + connection. + + Note that this does not necessarily mean that they can't *receive* further + data, because TCP connections are composed to two one-way channels which + can be closed independently. See :ref:`closing` for details. + + No fields. + """ + + pass diff --git a/env/lib/python3.10/site-packages/h11/_headers.py b/env/lib/python3.10/site-packages/h11/_headers.py new file mode 100644 index 0000000..b97d020 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/_headers.py @@ -0,0 +1,278 @@ +import re +from typing import AnyStr, cast, List, overload, Sequence, Tuple, TYPE_CHECKING, Union + +from ._abnf import field_name, field_value +from ._util import bytesify, LocalProtocolError, validate + +if TYPE_CHECKING: + from ._events import Request + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal # type: ignore + + +# Facts +# ----- +# +# Headers are: +# keys: case-insensitive ascii +# values: mixture of ascii and raw bytes +# +# "Historically, HTTP has allowed field content with text in the ISO-8859-1 +# charset [ISO-8859-1], supporting other charsets only through use of +# [RFC2047] encoding. In practice, most HTTP header field values use only a +# subset of the US-ASCII charset [USASCII]. Newly defined header fields SHOULD +# limit their field values to US-ASCII octets. A recipient SHOULD treat other +# octets in field content (obs-text) as opaque data." +# And it deprecates all non-ascii values +# +# Leading/trailing whitespace in header names is forbidden +# +# Values get leading/trailing whitespace stripped +# +# Content-Disposition actually needs to contain unicode semantically; to +# accomplish this it has a terrifically weird way of encoding the filename +# itself as ascii (and even this still has lots of cross-browser +# incompatibilities) +# +# Order is important: +# "a proxy MUST NOT change the order of these field values when forwarding a +# message" +# (and there are several headers where the order indicates a preference) +# +# Multiple occurences of the same header: +# "A sender MUST NOT generate multiple header fields with the same field name +# in a message unless either the entire field value for that header field is +# defined as a comma-separated list [or the header is Set-Cookie which gets a +# special exception]" - RFC 7230. (cookies are in RFC 6265) +# +# So every header aside from Set-Cookie can be merged by b", ".join if it +# occurs repeatedly. But, of course, they can't necessarily be split by +# .split(b","), because quoting. +# +# Given all this mess (case insensitive, duplicates allowed, order is +# important, ...), there doesn't appear to be any standard way to handle +# headers in Python -- they're almost like dicts, but... actually just +# aren't. For now we punt and just use a super simple representation: headers +# are a list of pairs +# +# [(name1, value1), (name2, value2), ...] +# +# where all entries are bytestrings, names are lowercase and have no +# leading/trailing whitespace, and values are bytestrings with no +# leading/trailing whitespace. Searching and updating are done via naive O(n) +# methods. +# +# Maybe a dict-of-lists would be better? + +_content_length_re = re.compile(rb"[0-9]+") +_field_name_re = re.compile(field_name.encode("ascii")) +_field_value_re = re.compile(field_value.encode("ascii")) + + +class Headers(Sequence[Tuple[bytes, bytes]]): + """ + A list-like interface that allows iterating over headers as byte-pairs + of (lowercased-name, value). + + Internally we actually store the representation as three-tuples, + including both the raw original casing, in order to preserve casing + over-the-wire, and the lowercased name, for case-insensitive comparisions. + + r = Request( + method="GET", + target="/", + headers=[("Host", "example.org"), ("Connection", "keep-alive")], + http_version="1.1", + ) + assert r.headers == [ + (b"host", b"example.org"), + (b"connection", b"keep-alive") + ] + assert r.headers.raw_items() == [ + (b"Host", b"example.org"), + (b"Connection", b"keep-alive") + ] + """ + + __slots__ = "_full_items" + + def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None: + self._full_items = full_items + + def __bool__(self) -> bool: + return bool(self._full_items) + + def __eq__(self, other: object) -> bool: + return list(self) == list(other) # type: ignore + + def __len__(self) -> int: + return len(self._full_items) + + def __repr__(self) -> str: + return "" % repr(list(self)) + + def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override] + _, name, value = self._full_items[idx] + return (name, value) + + def raw_items(self) -> List[Tuple[bytes, bytes]]: + return [(raw_name, value) for raw_name, _, value in self._full_items] + + +HeaderTypes = Union[ + List[Tuple[bytes, bytes]], + List[Tuple[bytes, str]], + List[Tuple[str, bytes]], + List[Tuple[str, str]], +] + + +@overload +def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers: + ... + + +@overload +def normalize_and_validate(headers: HeaderTypes, _parsed: Literal[False]) -> Headers: + ... + + +@overload +def normalize_and_validate( + headers: Union[Headers, HeaderTypes], _parsed: bool = False +) -> Headers: + ... + + +def normalize_and_validate( + headers: Union[Headers, HeaderTypes], _parsed: bool = False +) -> Headers: + new_headers = [] + seen_content_length = None + saw_transfer_encoding = False + for name, value in headers: + # For headers coming out of the parser, we can safely skip some steps, + # because it always returns bytes and has already run these regexes + # over the data: + if not _parsed: + name = bytesify(name) + value = bytesify(value) + validate(_field_name_re, name, "Illegal header name {!r}", name) + validate(_field_value_re, value, "Illegal header value {!r}", value) + assert isinstance(name, bytes) + assert isinstance(value, bytes) + + raw_name = name + name = name.lower() + if name == b"content-length": + lengths = {length.strip() for length in value.split(b",")} + if len(lengths) != 1: + raise LocalProtocolError("conflicting Content-Length headers") + value = lengths.pop() + validate(_content_length_re, value, "bad Content-Length") + if seen_content_length is None: + seen_content_length = value + new_headers.append((raw_name, name, value)) + elif seen_content_length != value: + raise LocalProtocolError("conflicting Content-Length headers") + elif name == b"transfer-encoding": + # "A server that receives a request message with a transfer coding + # it does not understand SHOULD respond with 501 (Not + # Implemented)." + # https://tools.ietf.org/html/rfc7230#section-3.3.1 + if saw_transfer_encoding: + raise LocalProtocolError( + "multiple Transfer-Encoding headers", error_status_hint=501 + ) + # "All transfer-coding names are case-insensitive" + # -- https://tools.ietf.org/html/rfc7230#section-4 + value = value.lower() + if value != b"chunked": + raise LocalProtocolError( + "Only Transfer-Encoding: chunked is supported", + error_status_hint=501, + ) + saw_transfer_encoding = True + new_headers.append((raw_name, name, value)) + else: + new_headers.append((raw_name, name, value)) + return Headers(new_headers) + + +def get_comma_header(headers: Headers, name: bytes) -> List[bytes]: + # Should only be used for headers whose value is a list of + # comma-separated, case-insensitive values. + # + # The header name `name` is expected to be lower-case bytes. + # + # Connection: meets these criteria (including cast insensitivity). + # + # Content-Length: technically is just a single value (1*DIGIT), but the + # standard makes reference to implementations that do multiple values, and + # using this doesn't hurt. Ditto, case insensitivity doesn't things either + # way. + # + # Transfer-Encoding: is more complex (allows for quoted strings), so + # splitting on , is actually wrong. For example, this is legal: + # + # Transfer-Encoding: foo; options="1,2", chunked + # + # and should be parsed as + # + # foo; options="1,2" + # chunked + # + # but this naive function will parse it as + # + # foo; options="1 + # 2" + # chunked + # + # However, this is okay because the only thing we are going to do with + # any Transfer-Encoding is reject ones that aren't just "chunked", so + # both of these will be treated the same anyway. + # + # Expect: the only legal value is the literal string + # "100-continue". Splitting on commas is harmless. Case insensitive. + # + out: List[bytes] = [] + for _, found_name, found_raw_value in headers._full_items: + if found_name == name: + found_raw_value = found_raw_value.lower() + for found_split_value in found_raw_value.split(b","): + found_split_value = found_split_value.strip() + if found_split_value: + out.append(found_split_value) + return out + + +def set_comma_header(headers: Headers, name: bytes, new_values: List[bytes]) -> Headers: + # The header name `name` is expected to be lower-case bytes. + # + # Note that when we store the header we use title casing for the header + # names, in order to match the conventional HTTP header style. + # + # Simply calling `.title()` is a blunt approach, but it's correct + # here given the cases where we're using `set_comma_header`... + # + # Connection, Content-Length, Transfer-Encoding. + new_headers: List[Tuple[bytes, bytes]] = [] + for found_raw_name, found_name, found_raw_value in headers._full_items: + if found_name != name: + new_headers.append((found_raw_name, found_raw_value)) + for new_value in new_values: + new_headers.append((name.title(), new_value)) + return normalize_and_validate(new_headers) + + +def has_expect_100_continue(request: "Request") -> bool: + # https://tools.ietf.org/html/rfc7231#section-5.1.1 + # "A server that receives a 100-continue expectation in an HTTP/1.0 request + # MUST ignore that expectation." + if request.http_version < b"1.1": + return False + expect = get_comma_header(request.headers, b"expect") + return b"100-continue" in expect diff --git a/env/lib/python3.10/site-packages/h11/_readers.py b/env/lib/python3.10/site-packages/h11/_readers.py new file mode 100644 index 0000000..08a9574 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/_readers.py @@ -0,0 +1,247 @@ +# Code to read HTTP data +# +# Strategy: each reader is a callable which takes a ReceiveBuffer object, and +# either: +# 1) consumes some of it and returns an Event +# 2) raises a LocalProtocolError (for consistency -- e.g. we call validate() +# and it might raise a LocalProtocolError, so simpler just to always use +# this) +# 3) returns None, meaning "I need more data" +# +# If they have a .read_eof attribute, then this will be called if an EOF is +# received -- but this is optional. Either way, the actual ConnectionClosed +# event will be generated afterwards. +# +# READERS is a dict describing how to pick a reader. It maps states to either: +# - a reader +# - or, for body readers, a dict of per-framing reader factories + +import re +from typing import Any, Callable, Dict, Iterable, NoReturn, Optional, Tuple, Type, Union + +from ._abnf import chunk_header, header_field, request_line, status_line +from ._events import Data, EndOfMessage, InformationalResponse, Request, Response +from ._receivebuffer import ReceiveBuffer +from ._state import ( + CLIENT, + CLOSED, + DONE, + IDLE, + MUST_CLOSE, + SEND_BODY, + SEND_RESPONSE, + SERVER, +) +from ._util import LocalProtocolError, RemoteProtocolError, Sentinel, validate + +__all__ = ["READERS"] + +header_field_re = re.compile(header_field.encode("ascii")) +obs_fold_re = re.compile(rb"[ \t]+") + + +def _obsolete_line_fold(lines: Iterable[bytes]) -> Iterable[bytes]: + it = iter(lines) + last: Optional[bytes] = None + for line in it: + match = obs_fold_re.match(line) + if match: + if last is None: + raise LocalProtocolError("continuation line at start of headers") + if not isinstance(last, bytearray): + # Cast to a mutable type, avoiding copy on append to ensure O(n) time + last = bytearray(last) + last += b" " + last += line[match.end() :] + else: + if last is not None: + yield last + last = line + if last is not None: + yield last + + +def _decode_header_lines( + lines: Iterable[bytes], +) -> Iterable[Tuple[bytes, bytes]]: + for line in _obsolete_line_fold(lines): + matches = validate(header_field_re, line, "illegal header line: {!r}", line) + yield (matches["field_name"], matches["field_value"]) + + +request_line_re = re.compile(request_line.encode("ascii")) + + +def maybe_read_from_IDLE_client(buf: ReceiveBuffer) -> Optional[Request]: + lines = buf.maybe_extract_lines() + if lines is None: + if buf.is_next_line_obviously_invalid_request_line(): + raise LocalProtocolError("illegal request line") + return None + if not lines: + raise LocalProtocolError("no request line received") + matches = validate( + request_line_re, lines[0], "illegal request line: {!r}", lines[0] + ) + return Request( + headers=list(_decode_header_lines(lines[1:])), _parsed=True, **matches + ) + + +status_line_re = re.compile(status_line.encode("ascii")) + + +def maybe_read_from_SEND_RESPONSE_server( + buf: ReceiveBuffer, +) -> Union[InformationalResponse, Response, None]: + lines = buf.maybe_extract_lines() + if lines is None: + if buf.is_next_line_obviously_invalid_request_line(): + raise LocalProtocolError("illegal request line") + return None + if not lines: + raise LocalProtocolError("no response line received") + matches = validate(status_line_re, lines[0], "illegal status line: {!r}", lines[0]) + http_version = ( + b"1.1" if matches["http_version"] is None else matches["http_version"] + ) + reason = b"" if matches["reason"] is None else matches["reason"] + status_code = int(matches["status_code"]) + class_: Union[Type[InformationalResponse], Type[Response]] = ( + InformationalResponse if status_code < 200 else Response + ) + return class_( + headers=list(_decode_header_lines(lines[1:])), + _parsed=True, + status_code=status_code, + reason=reason, + http_version=http_version, + ) + + +class ContentLengthReader: + def __init__(self, length: int) -> None: + self._length = length + self._remaining = length + + def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]: + if self._remaining == 0: + return EndOfMessage() + data = buf.maybe_extract_at_most(self._remaining) + if data is None: + return None + self._remaining -= len(data) + return Data(data=data) + + def read_eof(self) -> NoReturn: + raise RemoteProtocolError( + "peer closed connection without sending complete message body " + "(received {} bytes, expected {})".format( + self._length - self._remaining, self._length + ) + ) + + +chunk_header_re = re.compile(chunk_header.encode("ascii")) + + +class ChunkedReader: + def __init__(self) -> None: + self._bytes_in_chunk = 0 + # After reading a chunk, we have to throw away the trailing \r\n; if + # this is >0 then we discard that many bytes before resuming regular + # de-chunkification. + self._bytes_to_discard = 0 + self._reading_trailer = False + + def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]: + if self._reading_trailer: + lines = buf.maybe_extract_lines() + if lines is None: + return None + return EndOfMessage(headers=list(_decode_header_lines(lines))) + if self._bytes_to_discard > 0: + data = buf.maybe_extract_at_most(self._bytes_to_discard) + if data is None: + return None + self._bytes_to_discard -= len(data) + if self._bytes_to_discard > 0: + return None + # else, fall through and read some more + assert self._bytes_to_discard == 0 + if self._bytes_in_chunk == 0: + # We need to refill our chunk count + chunk_header = buf.maybe_extract_next_line() + if chunk_header is None: + return None + matches = validate( + chunk_header_re, + chunk_header, + "illegal chunk header: {!r}", + chunk_header, + ) + # XX FIXME: we discard chunk extensions. Does anyone care? + self._bytes_in_chunk = int(matches["chunk_size"], base=16) + if self._bytes_in_chunk == 0: + self._reading_trailer = True + return self(buf) + chunk_start = True + else: + chunk_start = False + assert self._bytes_in_chunk > 0 + data = buf.maybe_extract_at_most(self._bytes_in_chunk) + if data is None: + return None + self._bytes_in_chunk -= len(data) + if self._bytes_in_chunk == 0: + self._bytes_to_discard = 2 + chunk_end = True + else: + chunk_end = False + return Data(data=data, chunk_start=chunk_start, chunk_end=chunk_end) + + def read_eof(self) -> NoReturn: + raise RemoteProtocolError( + "peer closed connection without sending complete message body " + "(incomplete chunked read)" + ) + + +class Http10Reader: + def __call__(self, buf: ReceiveBuffer) -> Optional[Data]: + data = buf.maybe_extract_at_most(999999999) + if data is None: + return None + return Data(data=data) + + def read_eof(self) -> EndOfMessage: + return EndOfMessage() + + +def expect_nothing(buf: ReceiveBuffer) -> None: + if buf: + raise LocalProtocolError("Got data when expecting EOF") + return None + + +ReadersType = Dict[ + Union[Type[Sentinel], Tuple[Type[Sentinel], Type[Sentinel]]], + Union[Callable[..., Any], Dict[str, Callable[..., Any]]], +] + +READERS: ReadersType = { + (CLIENT, IDLE): maybe_read_from_IDLE_client, + (SERVER, IDLE): maybe_read_from_SEND_RESPONSE_server, + (SERVER, SEND_RESPONSE): maybe_read_from_SEND_RESPONSE_server, + (CLIENT, DONE): expect_nothing, + (CLIENT, MUST_CLOSE): expect_nothing, + (CLIENT, CLOSED): expect_nothing, + (SERVER, DONE): expect_nothing, + (SERVER, MUST_CLOSE): expect_nothing, + (SERVER, CLOSED): expect_nothing, + SEND_BODY: { + "chunked": ChunkedReader, + "content-length": ContentLengthReader, + "http/1.0": Http10Reader, + }, +} diff --git a/env/lib/python3.10/site-packages/h11/_receivebuffer.py b/env/lib/python3.10/site-packages/h11/_receivebuffer.py new file mode 100644 index 0000000..e5c4e08 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/_receivebuffer.py @@ -0,0 +1,153 @@ +import re +import sys +from typing import List, Optional, Union + +__all__ = ["ReceiveBuffer"] + + +# Operations we want to support: +# - find next \r\n or \r\n\r\n (\n or \n\n are also acceptable), +# or wait until there is one +# - read at-most-N bytes +# Goals: +# - on average, do this fast +# - worst case, do this in O(n) where n is the number of bytes processed +# Plan: +# - store bytearray, offset, how far we've searched for a separator token +# - use the how-far-we've-searched data to avoid rescanning +# - while doing a stream of uninterrupted processing, advance offset instead +# of constantly copying +# WARNING: +# - I haven't benchmarked or profiled any of this yet. +# +# Note that starting in Python 3.4, deleting the initial n bytes from a +# bytearray is amortized O(n), thanks to some excellent work by Antoine +# Martin: +# +# https://bugs.python.org/issue19087 +# +# This means that if we only supported 3.4+, we could get rid of the code here +# involving self._start and self.compress, because it's doing exactly the same +# thing that bytearray now does internally. +# +# BUT unfortunately, we still support 2.7, and reading short segments out of a +# long buffer MUST be O(bytes read) to avoid DoS issues, so we can't actually +# delete this code. Yet: +# +# https://pythonclock.org/ +# +# (Two things to double-check first though: make sure PyPy also has the +# optimization, and benchmark to make sure it's a win, since we do have a +# slightly clever thing where we delay calling compress() until we've +# processed a whole event, which could in theory be slightly more efficient +# than the internal bytearray support.) +blank_line_regex = re.compile(b"\n\r?\n", re.MULTILINE) + + +class ReceiveBuffer: + def __init__(self) -> None: + self._data = bytearray() + self._next_line_search = 0 + self._multiple_lines_search = 0 + + def __iadd__(self, byteslike: Union[bytes, bytearray]) -> "ReceiveBuffer": + self._data += byteslike + return self + + def __bool__(self) -> bool: + return bool(len(self)) + + def __len__(self) -> int: + return len(self._data) + + # for @property unprocessed_data + def __bytes__(self) -> bytes: + return bytes(self._data) + + def _extract(self, count: int) -> bytearray: + # extracting an initial slice of the data buffer and return it + out = self._data[:count] + del self._data[:count] + + self._next_line_search = 0 + self._multiple_lines_search = 0 + + return out + + def maybe_extract_at_most(self, count: int) -> Optional[bytearray]: + """ + Extract a fixed number of bytes from the buffer. + """ + out = self._data[:count] + if not out: + return None + + return self._extract(count) + + def maybe_extract_next_line(self) -> Optional[bytearray]: + """ + Extract the first line, if it is completed in the buffer. + """ + # Only search in buffer space that we've not already looked at. + search_start_index = max(0, self._next_line_search - 1) + partial_idx = self._data.find(b"\r\n", search_start_index) + + if partial_idx == -1: + self._next_line_search = len(self._data) + return None + + # + 2 is to compensate len(b"\r\n") + idx = partial_idx + 2 + + return self._extract(idx) + + def maybe_extract_lines(self) -> Optional[List[bytearray]]: + """ + Extract everything up to the first blank line, and return a list of lines. + """ + # Handle the case where we have an immediate empty line. + if self._data[:1] == b"\n": + self._extract(1) + return [] + + if self._data[:2] == b"\r\n": + self._extract(2) + return [] + + # Only search in buffer space that we've not already looked at. + match = blank_line_regex.search(self._data, self._multiple_lines_search) + if match is None: + self._multiple_lines_search = max(0, len(self._data) - 2) + return None + + # Truncate the buffer and return it. + idx = match.span(0)[-1] + out = self._extract(idx) + lines = out.split(b"\n") + + for line in lines: + if line.endswith(b"\r"): + del line[-1] + + assert lines[-2] == lines[-1] == b"" + + del lines[-2:] + + return lines + + # In theory we should wait until `\r\n` before starting to validate + # incoming data. However it's interesting to detect (very) invalid data + # early given they might not even contain `\r\n` at all (hence only + # timeout will get rid of them). + # This is not a 100% effective detection but more of a cheap sanity check + # allowing for early abort in some useful cases. + # This is especially interesting when peer is messing up with HTTPS and + # sent us a TLS stream where we were expecting plain HTTP given all + # versions of TLS so far start handshake with a 0x16 message type code. + def is_next_line_obviously_invalid_request_line(self) -> bool: + try: + # HTTP header line must not contain non-printable characters + # and should not start with a space + return self._data[0] < 0x21 + except IndexError: + return False diff --git a/env/lib/python3.10/site-packages/h11/_state.py b/env/lib/python3.10/site-packages/h11/_state.py new file mode 100644 index 0000000..3593430 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/_state.py @@ -0,0 +1,367 @@ +################################################################ +# The core state machine +################################################################ +# +# Rule 1: everything that affects the state machine and state transitions must +# live here in this file. As much as possible goes into the table-based +# representation, but for the bits that don't quite fit, the actual code and +# state must nonetheless live here. +# +# Rule 2: this file does not know about what role we're playing; it only knows +# about HTTP request/response cycles in the abstract. This ensures that we +# don't cheat and apply different rules to local and remote parties. +# +# +# Theory of operation +# =================== +# +# Possibly the simplest way to think about this is that we actually have 5 +# different state machines here. Yes, 5. These are: +# +# 1) The client state, with its complicated automaton (see the docs) +# 2) The server state, with its complicated automaton (see the docs) +# 3) The keep-alive state, with possible states {True, False} +# 4) The SWITCH_CONNECT state, with possible states {False, True} +# 5) The SWITCH_UPGRADE state, with possible states {False, True} +# +# For (3)-(5), the first state listed is the initial state. +# +# (1)-(3) are stored explicitly in member variables. The last +# two are stored implicitly in the pending_switch_proposals set as: +# (state of 4) == (_SWITCH_CONNECT in pending_switch_proposals) +# (state of 5) == (_SWITCH_UPGRADE in pending_switch_proposals) +# +# And each of these machines has two different kinds of transitions: +# +# a) Event-triggered +# b) State-triggered +# +# Event triggered is the obvious thing that you'd think it is: some event +# happens, and if it's the right event at the right time then a transition +# happens. But there are somewhat complicated rules for which machines can +# "see" which events. (As a rule of thumb, if a machine "sees" an event, this +# means two things: the event can affect the machine, and if the machine is +# not in a state where it expects that event then it's an error.) These rules +# are: +# +# 1) The client machine sees all h11.events objects emitted by the client. +# +# 2) The server machine sees all h11.events objects emitted by the server. +# +# It also sees the client's Request event. +# +# And sometimes, server events are annotated with a _SWITCH_* event. For +# example, we can have a (Response, _SWITCH_CONNECT) event, which is +# different from a regular Response event. +# +# 3) The keep-alive machine sees the process_keep_alive_disabled() event +# (which is derived from Request/Response events), and this event +# transitions it from True -> False, or from False -> False. There's no way +# to transition back. +# +# 4&5) The _SWITCH_* machines transition from False->True when we get a +# Request that proposes the relevant type of switch (via +# process_client_switch_proposals), and they go from True->False when we +# get a Response that has no _SWITCH_* annotation. +# +# So that's event-triggered transitions. +# +# State-triggered transitions are less standard. What they do here is couple +# the machines together. The way this works is, when certain *joint* +# configurations of states are achieved, then we automatically transition to a +# new *joint* state. So, for example, if we're ever in a joint state with +# +# client: DONE +# keep-alive: False +# +# then the client state immediately transitions to: +# +# client: MUST_CLOSE +# +# This is fundamentally different from an event-based transition, because it +# doesn't matter how we arrived at the {client: DONE, keep-alive: False} state +# -- maybe the client transitioned SEND_BODY -> DONE, or keep-alive +# transitioned True -> False. Either way, once this precondition is satisfied, +# this transition is immediately triggered. +# +# What if two conflicting state-based transitions get enabled at the same +# time? In practice there's only one case where this arises (client DONE -> +# MIGHT_SWITCH_PROTOCOL versus DONE -> MUST_CLOSE), and we resolve it by +# explicitly prioritizing the DONE -> MIGHT_SWITCH_PROTOCOL transition. +# +# Implementation +# -------------- +# +# The event-triggered transitions for the server and client machines are all +# stored explicitly in a table. Ditto for the state-triggered transitions that +# involve just the server and client state. +# +# The transitions for the other machines, and the state-triggered transitions +# that involve the other machines, are written out as explicit Python code. +# +# It'd be nice if there were some cleaner way to do all this. This isn't +# *too* terrible, but I feel like it could probably be better. +# +# WARNING +# ------- +# +# The script that generates the state machine diagrams for the docs knows how +# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS +# tables. But it can't automatically read the transitions that are written +# directly in Python code. So if you touch those, you need to also update the +# script to keep it in sync! +from typing import cast, Dict, Optional, Set, Tuple, Type, Union + +from ._events import * +from ._util import LocalProtocolError, Sentinel + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = [ + "CLIENT", + "SERVER", + "IDLE", + "SEND_RESPONSE", + "SEND_BODY", + "DONE", + "MUST_CLOSE", + "CLOSED", + "MIGHT_SWITCH_PROTOCOL", + "SWITCHED_PROTOCOL", + "ERROR", +] + + +class CLIENT(Sentinel, metaclass=Sentinel): + pass + + +class SERVER(Sentinel, metaclass=Sentinel): + pass + + +# States +class IDLE(Sentinel, metaclass=Sentinel): + pass + + +class SEND_RESPONSE(Sentinel, metaclass=Sentinel): + pass + + +class SEND_BODY(Sentinel, metaclass=Sentinel): + pass + + +class DONE(Sentinel, metaclass=Sentinel): + pass + + +class MUST_CLOSE(Sentinel, metaclass=Sentinel): + pass + + +class CLOSED(Sentinel, metaclass=Sentinel): + pass + + +class ERROR(Sentinel, metaclass=Sentinel): + pass + + +# Switch types +class MIGHT_SWITCH_PROTOCOL(Sentinel, metaclass=Sentinel): + pass + + +class SWITCHED_PROTOCOL(Sentinel, metaclass=Sentinel): + pass + + +class _SWITCH_UPGRADE(Sentinel, metaclass=Sentinel): + pass + + +class _SWITCH_CONNECT(Sentinel, metaclass=Sentinel): + pass + + +EventTransitionType = Dict[ + Type[Sentinel], + Dict[ + Type[Sentinel], + Dict[Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], Type[Sentinel]], + ], +] + +EVENT_TRIGGERED_TRANSITIONS: EventTransitionType = { + CLIENT: { + IDLE: {Request: SEND_BODY, ConnectionClosed: CLOSED}, + SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE}, + DONE: {ConnectionClosed: CLOSED}, + MUST_CLOSE: {ConnectionClosed: CLOSED}, + CLOSED: {ConnectionClosed: CLOSED}, + MIGHT_SWITCH_PROTOCOL: {}, + SWITCHED_PROTOCOL: {}, + ERROR: {}, + }, + SERVER: { + IDLE: { + ConnectionClosed: CLOSED, + Response: SEND_BODY, + # Special case: server sees client Request events, in this form + (Request, CLIENT): SEND_RESPONSE, + }, + SEND_RESPONSE: { + InformationalResponse: SEND_RESPONSE, + Response: SEND_BODY, + (InformationalResponse, _SWITCH_UPGRADE): SWITCHED_PROTOCOL, + (Response, _SWITCH_CONNECT): SWITCHED_PROTOCOL, + }, + SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE}, + DONE: {ConnectionClosed: CLOSED}, + MUST_CLOSE: {ConnectionClosed: CLOSED}, + CLOSED: {ConnectionClosed: CLOSED}, + SWITCHED_PROTOCOL: {}, + ERROR: {}, + }, +} + +StateTransitionType = Dict[ + Tuple[Type[Sentinel], Type[Sentinel]], Dict[Type[Sentinel], Type[Sentinel]] +] + +# NB: there are also some special-case state-triggered transitions hard-coded +# into _fire_state_triggered_transitions below. +STATE_TRIGGERED_TRANSITIONS: StateTransitionType = { + # (Client state, Server state) -> new states + # Protocol negotiation + (MIGHT_SWITCH_PROTOCOL, SWITCHED_PROTOCOL): {CLIENT: SWITCHED_PROTOCOL}, + # Socket shutdown + (CLOSED, DONE): {SERVER: MUST_CLOSE}, + (CLOSED, IDLE): {SERVER: MUST_CLOSE}, + (ERROR, DONE): {SERVER: MUST_CLOSE}, + (DONE, CLOSED): {CLIENT: MUST_CLOSE}, + (IDLE, CLOSED): {CLIENT: MUST_CLOSE}, + (DONE, ERROR): {CLIENT: MUST_CLOSE}, +} + + +class ConnectionState: + def __init__(self) -> None: + # Extra bits of state that don't quite fit into the state model. + + # If this is False then it enables the automatic DONE -> MUST_CLOSE + # transition. Don't set this directly; call .keep_alive_disabled() + self.keep_alive = True + + # This is a subset of {UPGRADE, CONNECT}, containing the proposals + # made by the client for switching protocols. + self.pending_switch_proposals: Set[Type[Sentinel]] = set() + + self.states: Dict[Type[Sentinel], Type[Sentinel]] = {CLIENT: IDLE, SERVER: IDLE} + + def process_error(self, role: Type[Sentinel]) -> None: + self.states[role] = ERROR + self._fire_state_triggered_transitions() + + def process_keep_alive_disabled(self) -> None: + self.keep_alive = False + self._fire_state_triggered_transitions() + + def process_client_switch_proposal(self, switch_event: Type[Sentinel]) -> None: + self.pending_switch_proposals.add(switch_event) + self._fire_state_triggered_transitions() + + def process_event( + self, + role: Type[Sentinel], + event_type: Type[Event], + server_switch_event: Optional[Type[Sentinel]] = None, + ) -> None: + _event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]] = event_type + if server_switch_event is not None: + assert role is SERVER + if server_switch_event not in self.pending_switch_proposals: + raise LocalProtocolError( + "Received server {} event without a pending proposal".format( + server_switch_event + ) + ) + _event_type = (event_type, server_switch_event) + if server_switch_event is None and _event_type is Response: + self.pending_switch_proposals = set() + self._fire_event_triggered_transitions(role, _event_type) + # Special case: the server state does get to see Request + # events. + if _event_type is Request: + assert role is CLIENT + self._fire_event_triggered_transitions(SERVER, (Request, CLIENT)) + self._fire_state_triggered_transitions() + + def _fire_event_triggered_transitions( + self, + role: Type[Sentinel], + event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], + ) -> None: + state = self.states[role] + try: + new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type] + except KeyError: + event_type = cast(Type[Event], event_type) + raise LocalProtocolError( + "can't handle event type {} when role={} and state={}".format( + event_type.__name__, role, self.states[role] + ) + ) from None + self.states[role] = new_state + + def _fire_state_triggered_transitions(self) -> None: + # We apply these rules repeatedly until converging on a fixed point + while True: + start_states = dict(self.states) + + # It could happen that both these special-case transitions are + # enabled at the same time: + # + # DONE -> MIGHT_SWITCH_PROTOCOL + # DONE -> MUST_CLOSE + # + # For example, this will always be true of a HTTP/1.0 client + # requesting CONNECT. If this happens, the protocol switch takes + # priority. From there the client will either go to + # SWITCHED_PROTOCOL, in which case it's none of our business when + # they close the connection, or else the server will deny the + # request, in which case the client will go back to DONE and then + # from there to MUST_CLOSE. + if self.pending_switch_proposals: + if self.states[CLIENT] is DONE: + self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL + + if not self.pending_switch_proposals: + if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL: + self.states[CLIENT] = DONE + + if not self.keep_alive: + for role in (CLIENT, SERVER): + if self.states[role] is DONE: + self.states[role] = MUST_CLOSE + + # Tabular state-triggered transitions + joint_state = (self.states[CLIENT], self.states[SERVER]) + changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {}) + self.states.update(changes) + + if self.states == start_states: + # Fixed point reached + return + + def start_next_cycle(self) -> None: + if self.states != {CLIENT: DONE, SERVER: DONE}: + raise LocalProtocolError( + "not in a reusable state. self.states={}".format(self.states) + ) + # Can't reach DONE/DONE with any of these active, but still, let's be + # sure. + assert self.keep_alive + assert not self.pending_switch_proposals + self.states = {CLIENT: IDLE, SERVER: IDLE} diff --git a/env/lib/python3.10/site-packages/h11/_util.py b/env/lib/python3.10/site-packages/h11/_util.py new file mode 100644 index 0000000..6718445 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/_util.py @@ -0,0 +1,135 @@ +from typing import Any, Dict, NoReturn, Pattern, Tuple, Type, TypeVar, Union + +__all__ = [ + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", + "validate", + "bytesify", +] + + +class ProtocolError(Exception): + """Exception indicating a violation of the HTTP/1.1 protocol. + + This as an abstract base class, with two concrete base classes: + :exc:`LocalProtocolError`, which indicates that you tried to do something + that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which + indicates that the remote peer tried to do something that HTTP/1.1 says is + illegal. See :ref:`error-handling` for details. + + In addition to the normal :exc:`Exception` features, it has one attribute: + + .. attribute:: error_status_hint + + This gives a suggestion as to what status code a server might use if + this error occurred as part of a request. + + For a :exc:`RemoteProtocolError`, this is useful as a suggestion for + how you might want to respond to a misbehaving peer, if you're + implementing a server. + + For a :exc:`LocalProtocolError`, this can be taken as a suggestion for + how your peer might have responded to *you* if h11 had allowed you to + continue. + + The default is 400 Bad Request, a generic catch-all for protocol + violations. + + """ + + def __init__(self, msg: str, error_status_hint: int = 400) -> None: + if type(self) is ProtocolError: + raise TypeError("tried to directly instantiate ProtocolError") + Exception.__init__(self, msg) + self.error_status_hint = error_status_hint + + +# Strategy: there are a number of public APIs where a LocalProtocolError can +# be raised (send(), all the different event constructors, ...), and only one +# public API where RemoteProtocolError can be raised +# (receive_data()). Therefore we always raise LocalProtocolError internally, +# and then receive_data will translate this into a RemoteProtocolError. +# +# Internally: +# LocalProtocolError is the generic "ProtocolError". +# Externally: +# LocalProtocolError is for local errors and RemoteProtocolError is for +# remote errors. +class LocalProtocolError(ProtocolError): + def _reraise_as_remote_protocol_error(self) -> NoReturn: + # After catching a LocalProtocolError, use this method to re-raise it + # as a RemoteProtocolError. This method must be called from inside an + # except: block. + # + # An easy way to get an equivalent RemoteProtocolError is just to + # modify 'self' in place. + self.__class__ = RemoteProtocolError # type: ignore + # But the re-raising is somewhat non-trivial -- you might think that + # now that we've modified the in-flight exception object, that just + # doing 'raise' to re-raise it would be enough. But it turns out that + # this doesn't work, because Python tracks the exception type + # (exc_info[0]) separately from the exception object (exc_info[1]), + # and we only modified the latter. So we really do need to re-raise + # the new type explicitly. + # On py3, the traceback is part of the exception object, so our + # in-place modification preserved it and we can just re-raise: + raise self + + +class RemoteProtocolError(ProtocolError): + pass + + +def validate( + regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any +) -> Dict[str, bytes]: + match = regex.fullmatch(data) + if not match: + if format_args: + msg = msg.format(*format_args) + raise LocalProtocolError(msg) + return match.groupdict() + + +# Sentinel values +# +# - Inherit identity-based comparison and hashing from object +# - Have a nice repr +# - Have a *bonus property*: type(sentinel) is sentinel +# +# The bonus property is useful if you want to take the return value from +# next_event() and do some sort of dispatch based on type(event). + +_T_Sentinel = TypeVar("_T_Sentinel", bound="Sentinel") + + +class Sentinel(type): + def __new__( + cls: Type[_T_Sentinel], + name: str, + bases: Tuple[type, ...], + namespace: Dict[str, Any], + **kwds: Any + ) -> _T_Sentinel: + assert bases == (Sentinel,) + v = super().__new__(cls, name, bases, namespace, **kwds) + v.__class__ = v # type: ignore + return v + + def __repr__(self) -> str: + return self.__name__ + + +# Used for methods, request targets, HTTP versions, header names, and header +# values. Accepts ascii-strings, or bytes/bytearray/memoryview/..., and always +# returns bytes. +def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes: + # Fast-path: + if type(s) is bytes: + return s + if isinstance(s, str): + s = s.encode("ascii") + if isinstance(s, int): + raise TypeError("expected bytes-like object, not int") + return bytes(s) diff --git a/env/lib/python3.10/site-packages/h11/_version.py b/env/lib/python3.10/site-packages/h11/_version.py new file mode 100644 index 0000000..4c89113 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/_version.py @@ -0,0 +1,16 @@ +# This file must be kept very simple, because it is consumed from several +# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc. + +# We use a simple scheme: +# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev +# where the +dev versions are never released into the wild, they're just what +# we stick into the VCS in between releases. +# +# This is compatible with PEP 440: +# http://legacy.python.org/dev/peps/pep-0440/ +# via the use of the "local suffix" "+dev", which is disallowed on index +# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we +# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before* +# 1.0.0.) + +__version__ = "0.14.0" diff --git a/env/lib/python3.10/site-packages/h11/_writers.py b/env/lib/python3.10/site-packages/h11/_writers.py new file mode 100644 index 0000000..939cdb9 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/_writers.py @@ -0,0 +1,145 @@ +# Code to read HTTP data +# +# Strategy: each writer takes an event + a write-some-bytes function, which is +# calls. +# +# WRITERS is a dict describing how to pick a reader. It maps states to either: +# - a writer +# - or, for body writers, a dict of framin-dependent writer factories + +from typing import Any, Callable, Dict, List, Tuple, Type, Union + +from ._events import Data, EndOfMessage, Event, InformationalResponse, Request, Response +from ._headers import Headers +from ._state import CLIENT, IDLE, SEND_BODY, SEND_RESPONSE, SERVER +from ._util import LocalProtocolError, Sentinel + +__all__ = ["WRITERS"] + +Writer = Callable[[bytes], Any] + + +def write_headers(headers: Headers, write: Writer) -> None: + # "Since the Host field-value is critical information for handling a + # request, a user agent SHOULD generate Host as the first header field + # following the request-line." - RFC 7230 + raw_items = headers._full_items + for raw_name, name, value in raw_items: + if name == b"host": + write(b"%s: %s\r\n" % (raw_name, value)) + for raw_name, name, value in raw_items: + if name != b"host": + write(b"%s: %s\r\n" % (raw_name, value)) + write(b"\r\n") + + +def write_request(request: Request, write: Writer) -> None: + if request.http_version != b"1.1": + raise LocalProtocolError("I only send HTTP/1.1") + write(b"%s %s HTTP/1.1\r\n" % (request.method, request.target)) + write_headers(request.headers, write) + + +# Shared between InformationalResponse and Response +def write_any_response( + response: Union[InformationalResponse, Response], write: Writer +) -> None: + if response.http_version != b"1.1": + raise LocalProtocolError("I only send HTTP/1.1") + status_bytes = str(response.status_code).encode("ascii") + # We don't bother sending ascii status messages like "OK"; they're + # optional and ignored by the protocol. (But the space after the numeric + # status code is mandatory.) + # + # XX FIXME: could at least make an effort to pull out the status message + # from stdlib's http.HTTPStatus table. Or maybe just steal their enums + # (either by import or copy/paste). We already accept them as status codes + # since they're of type IntEnum < int. + write(b"HTTP/1.1 %s %s\r\n" % (status_bytes, response.reason)) + write_headers(response.headers, write) + + +class BodyWriter: + def __call__(self, event: Event, write: Writer) -> None: + if type(event) is Data: + self.send_data(event.data, write) + elif type(event) is EndOfMessage: + self.send_eom(event.headers, write) + else: # pragma: no cover + assert False + + def send_data(self, data: bytes, write: Writer) -> None: + pass + + def send_eom(self, headers: Headers, write: Writer) -> None: + pass + + +# +# These are all careful not to do anything to 'data' except call len(data) and +# write(data). This allows us to transparently pass-through funny objects, +# like placeholder objects referring to files on disk that will be sent via +# sendfile(2). +# +class ContentLengthWriter(BodyWriter): + def __init__(self, length: int) -> None: + self._length = length + + def send_data(self, data: bytes, write: Writer) -> None: + self._length -= len(data) + if self._length < 0: + raise LocalProtocolError("Too much data for declared Content-Length") + write(data) + + def send_eom(self, headers: Headers, write: Writer) -> None: + if self._length != 0: + raise LocalProtocolError("Too little data for declared Content-Length") + if headers: + raise LocalProtocolError("Content-Length and trailers don't mix") + + +class ChunkedWriter(BodyWriter): + def send_data(self, data: bytes, write: Writer) -> None: + # if we encoded 0-length data in the naive way, it would look like an + # end-of-message. + if not data: + return + write(b"%x\r\n" % len(data)) + write(data) + write(b"\r\n") + + def send_eom(self, headers: Headers, write: Writer) -> None: + write(b"0\r\n") + write_headers(headers, write) + + +class Http10Writer(BodyWriter): + def send_data(self, data: bytes, write: Writer) -> None: + write(data) + + def send_eom(self, headers: Headers, write: Writer) -> None: + if headers: + raise LocalProtocolError("can't send trailers to HTTP/1.0 client") + # no need to close the socket ourselves, that will be taken care of by + # Connection: close machinery + + +WritersType = Dict[ + Union[Tuple[Type[Sentinel], Type[Sentinel]], Type[Sentinel]], + Union[ + Dict[str, Type[BodyWriter]], + Callable[[Union[InformationalResponse, Response], Writer], None], + Callable[[Request, Writer], None], + ], +] + +WRITERS: WritersType = { + (CLIENT, IDLE): write_request, + (SERVER, IDLE): write_any_response, + (SERVER, SEND_RESPONSE): write_any_response, + SEND_BODY: { + "chunked": ChunkedWriter, + "content-length": ContentLengthWriter, + "http/1.0": Http10Writer, + }, +} diff --git a/env/lib/python3.10/site-packages/h11/py.typed b/env/lib/python3.10/site-packages/h11/py.typed new file mode 100644 index 0000000..f5642f7 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/py.typed @@ -0,0 +1 @@ +Marker diff --git a/env/lib/python3.10/site-packages/h11/tests/__init__.py b/env/lib/python3.10/site-packages/h11/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/env/lib/python3.10/site-packages/h11/tests/data/test-file b/env/lib/python3.10/site-packages/h11/tests/data/test-file new file mode 100644 index 0000000..d0be0a6 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/tests/data/test-file @@ -0,0 +1 @@ +92b12bc045050b55b848d37167a1a63947c364579889ce1d39788e45e9fac9e5 diff --git a/env/lib/python3.10/site-packages/h11/tests/helpers.py b/env/lib/python3.10/site-packages/h11/tests/helpers.py new file mode 100644 index 0000000..571be44 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/tests/helpers.py @@ -0,0 +1,101 @@ +from typing import cast, List, Type, Union, ValuesView + +from .._connection import Connection, NEED_DATA, PAUSED +from .._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from .._state import CLIENT, CLOSED, DONE, MUST_CLOSE, SERVER +from .._util import Sentinel + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal # type: ignore + + +def get_all_events(conn: Connection) -> List[Event]: + got_events = [] + while True: + event = conn.next_event() + if event in (NEED_DATA, PAUSED): + break + event = cast(Event, event) + got_events.append(event) + if type(event) is ConnectionClosed: + break + return got_events + + +def receive_and_get(conn: Connection, data: bytes) -> List[Event]: + conn.receive_data(data) + return get_all_events(conn) + + +# Merges adjacent Data events, converts payloads to bytestrings, and removes +# chunk boundaries. +def normalize_data_events(in_events: List[Event]) -> List[Event]: + out_events: List[Event] = [] + for event in in_events: + if type(event) is Data: + event = Data(data=bytes(event.data), chunk_start=False, chunk_end=False) + if out_events and type(out_events[-1]) is type(event) is Data: + out_events[-1] = Data( + data=out_events[-1].data + event.data, + chunk_start=out_events[-1].chunk_start, + chunk_end=out_events[-1].chunk_end, + ) + else: + out_events.append(event) + return out_events + + +# Given that we want to write tests that push some events through a Connection +# and check that its state updates appropriately... we might as make a habit +# of pushing them through two Connections with a fake network link in +# between. +class ConnectionPair: + def __init__(self) -> None: + self.conn = {CLIENT: Connection(CLIENT), SERVER: Connection(SERVER)} + self.other = {CLIENT: SERVER, SERVER: CLIENT} + + @property + def conns(self) -> ValuesView[Connection]: + return self.conn.values() + + # expect="match" if expect=send_events; expect=[...] to say what expected + def send( + self, + role: Type[Sentinel], + send_events: Union[List[Event], Event], + expect: Union[List[Event], Event, Literal["match"]] = "match", + ) -> bytes: + if not isinstance(send_events, list): + send_events = [send_events] + data = b"" + closed = False + for send_event in send_events: + new_data = self.conn[role].send(send_event) + if new_data is None: + closed = True + else: + data += new_data + # send uses b"" to mean b"", and None to mean closed + # receive uses b"" to mean closed, and None to mean "try again" + # so we have to translate between the two conventions + if data: + self.conn[self.other[role]].receive_data(data) + if closed: + self.conn[self.other[role]].receive_data(b"") + got_events = get_all_events(self.conn[self.other[role]]) + if expect == "match": + expect = send_events + if not isinstance(expect, list): + expect = [expect] + assert got_events == expect + return data diff --git a/env/lib/python3.10/site-packages/h11/tests/test_against_stdlib_http.py b/env/lib/python3.10/site-packages/h11/tests/test_against_stdlib_http.py new file mode 100644 index 0000000..d2ee131 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/tests/test_against_stdlib_http.py @@ -0,0 +1,115 @@ +import json +import os.path +import socket +import socketserver +import threading +from contextlib import closing, contextmanager +from http.server import SimpleHTTPRequestHandler +from typing import Callable, Generator +from urllib.request import urlopen + +import h11 + + +@contextmanager +def socket_server( + handler: Callable[..., socketserver.BaseRequestHandler] +) -> Generator[socketserver.TCPServer, None, None]: + httpd = socketserver.TCPServer(("127.0.0.1", 0), handler) + thread = threading.Thread( + target=httpd.serve_forever, kwargs={"poll_interval": 0.01} + ) + thread.daemon = True + try: + thread.start() + yield httpd + finally: + httpd.shutdown() + + +test_file_path = os.path.join(os.path.dirname(__file__), "data/test-file") +with open(test_file_path, "rb") as f: + test_file_data = f.read() + + +class SingleMindedRequestHandler(SimpleHTTPRequestHandler): + def translate_path(self, path: str) -> str: + return test_file_path + + +def test_h11_as_client() -> None: + with socket_server(SingleMindedRequestHandler) as httpd: + with closing(socket.create_connection(httpd.server_address)) as s: + c = h11.Connection(h11.CLIENT) + + s.sendall( + c.send( # type: ignore[arg-type] + h11.Request( + method="GET", target="/foo", headers=[("Host", "localhost")] + ) + ) + ) + s.sendall(c.send(h11.EndOfMessage())) # type: ignore[arg-type] + + data = bytearray() + while True: + event = c.next_event() + print(event) + if event is h11.NEED_DATA: + # Use a small read buffer to make things more challenging + # and exercise more paths :-) + c.receive_data(s.recv(10)) + continue + if type(event) is h11.Response: + assert event.status_code == 200 + if type(event) is h11.Data: + data += event.data + if type(event) is h11.EndOfMessage: + break + assert bytes(data) == test_file_data + + +class H11RequestHandler(socketserver.BaseRequestHandler): + def handle(self) -> None: + with closing(self.request) as s: + c = h11.Connection(h11.SERVER) + request = None + while True: + event = c.next_event() + if event is h11.NEED_DATA: + # Use a small read buffer to make things more challenging + # and exercise more paths :-) + c.receive_data(s.recv(10)) + continue + if type(event) is h11.Request: + request = event + if type(event) is h11.EndOfMessage: + break + assert request is not None + info = json.dumps( + { + "method": request.method.decode("ascii"), + "target": request.target.decode("ascii"), + "headers": { + name.decode("ascii"): value.decode("ascii") + for (name, value) in request.headers + }, + } + ) + s.sendall(c.send(h11.Response(status_code=200, headers=[]))) # type: ignore[arg-type] + s.sendall(c.send(h11.Data(data=info.encode("ascii")))) + s.sendall(c.send(h11.EndOfMessage())) + + +def test_h11_as_server() -> None: + with socket_server(H11RequestHandler) as httpd: + host, port = httpd.server_address + url = "http://{}:{}/some-path".format(host, port) + with closing(urlopen(url)) as f: + assert f.getcode() == 200 + data = f.read() + info = json.loads(data.decode("ascii")) + print(info) + assert info["method"] == "GET" + assert info["target"] == "/some-path" + assert "urllib" in info["headers"]["user-agent"] diff --git a/env/lib/python3.10/site-packages/h11/tests/test_connection.py b/env/lib/python3.10/site-packages/h11/tests/test_connection.py new file mode 100644 index 0000000..73a27b9 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/tests/test_connection.py @@ -0,0 +1,1122 @@ +from typing import Any, cast, Dict, List, Optional, Tuple, Type + +import pytest + +from .._connection import _body_framing, _keep_alive, Connection, NEED_DATA, PAUSED +from .._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from .._state import ( + CLIENT, + CLOSED, + DONE, + ERROR, + IDLE, + MIGHT_SWITCH_PROTOCOL, + MUST_CLOSE, + SEND_BODY, + SEND_RESPONSE, + SERVER, + SWITCHED_PROTOCOL, +) +from .._util import LocalProtocolError, RemoteProtocolError, Sentinel +from .helpers import ConnectionPair, get_all_events, receive_and_get + + +def test__keep_alive() -> None: + assert _keep_alive( + Request(method="GET", target="/", headers=[("Host", "Example.com")]) + ) + assert not _keep_alive( + Request( + method="GET", + target="/", + headers=[("Host", "Example.com"), ("Connection", "close")], + ) + ) + assert not _keep_alive( + Request( + method="GET", + target="/", + headers=[("Host", "Example.com"), ("Connection", "a, b, cLOse, foo")], + ) + ) + assert not _keep_alive( + Request(method="GET", target="/", headers=[], http_version="1.0") # type: ignore[arg-type] + ) + + assert _keep_alive(Response(status_code=200, headers=[])) # type: ignore[arg-type] + assert not _keep_alive(Response(status_code=200, headers=[("Connection", "close")])) + assert not _keep_alive( + Response(status_code=200, headers=[("Connection", "a, b, cLOse, foo")]) + ) + assert not _keep_alive(Response(status_code=200, headers=[], http_version="1.0")) # type: ignore[arg-type] + + +def test__body_framing() -> None: + def headers(cl: Optional[int], te: bool) -> List[Tuple[str, str]]: + headers = [] + if cl is not None: + headers.append(("Content-Length", str(cl))) + if te: + headers.append(("Transfer-Encoding", "chunked")) + return headers + + def resp( + status_code: int = 200, cl: Optional[int] = None, te: bool = False + ) -> Response: + return Response(status_code=status_code, headers=headers(cl, te)) + + def req(cl: Optional[int] = None, te: bool = False) -> Request: + h = headers(cl, te) + h += [("Host", "example.com")] + return Request(method="GET", target="/", headers=h) + + # Special cases where the headers are ignored: + for kwargs in [{}, {"cl": 100}, {"te": True}, {"cl": 100, "te": True}]: + kwargs = cast(Dict[str, Any], kwargs) + for meth, r in [ + (b"HEAD", resp(**kwargs)), + (b"GET", resp(status_code=204, **kwargs)), + (b"GET", resp(status_code=304, **kwargs)), + ]: + assert _body_framing(meth, r) == ("content-length", (0,)) + + # Transfer-encoding + for kwargs in [{"te": True}, {"cl": 100, "te": True}]: + kwargs = cast(Dict[str, Any], kwargs) + for meth, r in [(None, req(**kwargs)), (b"GET", resp(**kwargs))]: # type: ignore + assert _body_framing(meth, r) == ("chunked", ()) + + # Content-Length + for meth, r in [(None, req(cl=100)), (b"GET", resp(cl=100))]: # type: ignore + assert _body_framing(meth, r) == ("content-length", (100,)) + + # No headers + assert _body_framing(None, req()) == ("content-length", (0,)) # type: ignore + assert _body_framing(b"GET", resp()) == ("http/1.0", ()) + + +def test_Connection_basics_and_content_length() -> None: + with pytest.raises(ValueError): + Connection("CLIENT") # type: ignore + + p = ConnectionPair() + assert p.conn[CLIENT].our_role is CLIENT + assert p.conn[CLIENT].their_role is SERVER + assert p.conn[SERVER].our_role is SERVER + assert p.conn[SERVER].their_role is CLIENT + + data = p.send( + CLIENT, + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Content-Length", "10")], + ), + ) + assert data == ( + b"GET / HTTP/1.1\r\n" b"Host: example.com\r\n" b"Content-Length: 10\r\n\r\n" + ) + + for conn in p.conns: + assert conn.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + assert p.conn[CLIENT].our_state is SEND_BODY + assert p.conn[CLIENT].their_state is SEND_RESPONSE + assert p.conn[SERVER].our_state is SEND_RESPONSE + assert p.conn[SERVER].their_state is SEND_BODY + + assert p.conn[CLIENT].their_http_version is None + assert p.conn[SERVER].their_http_version == b"1.1" + + data = p.send(SERVER, InformationalResponse(status_code=100, headers=[])) # type: ignore[arg-type] + assert data == b"HTTP/1.1 100 \r\n\r\n" + + data = p.send(SERVER, Response(status_code=200, headers=[("Content-Length", "11")])) + assert data == b"HTTP/1.1 200 \r\nContent-Length: 11\r\n\r\n" + + for conn in p.conns: + assert conn.states == {CLIENT: SEND_BODY, SERVER: SEND_BODY} + + assert p.conn[CLIENT].their_http_version == b"1.1" + assert p.conn[SERVER].their_http_version == b"1.1" + + data = p.send(CLIENT, Data(data=b"12345")) + assert data == b"12345" + data = p.send( + CLIENT, Data(data=b"67890"), expect=[Data(data=b"67890"), EndOfMessage()] + ) + assert data == b"67890" + data = p.send(CLIENT, EndOfMessage(), expect=[]) + assert data == b"" + + for conn in p.conns: + assert conn.states == {CLIENT: DONE, SERVER: SEND_BODY} + + data = p.send(SERVER, Data(data=b"1234567890")) + assert data == b"1234567890" + data = p.send(SERVER, Data(data=b"1"), expect=[Data(data=b"1"), EndOfMessage()]) + assert data == b"1" + data = p.send(SERVER, EndOfMessage(), expect=[]) + assert data == b"" + + for conn in p.conns: + assert conn.states == {CLIENT: DONE, SERVER: DONE} + + +def test_chunked() -> None: + p = ConnectionPair() + + p.send( + CLIENT, + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Transfer-Encoding", "chunked")], + ), + ) + data = p.send(CLIENT, Data(data=b"1234567890", chunk_start=True, chunk_end=True)) + assert data == b"a\r\n1234567890\r\n" + data = p.send(CLIENT, Data(data=b"abcde", chunk_start=True, chunk_end=True)) + assert data == b"5\r\nabcde\r\n" + data = p.send(CLIENT, Data(data=b""), expect=[]) + assert data == b"" + data = p.send(CLIENT, EndOfMessage(headers=[("hello", "there")])) + assert data == b"0\r\nhello: there\r\n\r\n" + + p.send( + SERVER, Response(status_code=200, headers=[("Transfer-Encoding", "chunked")]) + ) + p.send(SERVER, Data(data=b"54321", chunk_start=True, chunk_end=True)) + p.send(SERVER, Data(data=b"12345", chunk_start=True, chunk_end=True)) + p.send(SERVER, EndOfMessage()) + + for conn in p.conns: + assert conn.states == {CLIENT: DONE, SERVER: DONE} + + +def test_chunk_boundaries() -> None: + conn = Connection(our_role=SERVER) + + request = ( + b"POST / HTTP/1.1\r\n" + b"Host: example.com\r\n" + b"Transfer-Encoding: chunked\r\n" + b"\r\n" + ) + conn.receive_data(request) + assert conn.next_event() == Request( + method="POST", + target="/", + headers=[("Host", "example.com"), ("Transfer-Encoding", "chunked")], + ) + assert conn.next_event() is NEED_DATA + + conn.receive_data(b"5\r\nhello\r\n") + assert conn.next_event() == Data(data=b"hello", chunk_start=True, chunk_end=True) + + conn.receive_data(b"5\r\nhel") + assert conn.next_event() == Data(data=b"hel", chunk_start=True, chunk_end=False) + + conn.receive_data(b"l") + assert conn.next_event() == Data(data=b"l", chunk_start=False, chunk_end=False) + + conn.receive_data(b"o\r\n") + assert conn.next_event() == Data(data=b"o", chunk_start=False, chunk_end=True) + + conn.receive_data(b"5\r\nhello") + assert conn.next_event() == Data(data=b"hello", chunk_start=True, chunk_end=True) + + conn.receive_data(b"\r\n") + assert conn.next_event() == NEED_DATA + + conn.receive_data(b"0\r\n\r\n") + assert conn.next_event() == EndOfMessage() + + +def test_client_talking_to_http10_server() -> None: + c = Connection(CLIENT) + c.send(Request(method="GET", target="/", headers=[("Host", "example.com")])) + c.send(EndOfMessage()) + assert c.our_state is DONE + # No content-length, so Http10 framing for body + assert receive_and_get(c, b"HTTP/1.0 200 OK\r\n\r\n") == [ + Response(status_code=200, headers=[], http_version="1.0", reason=b"OK") # type: ignore[arg-type] + ] + assert c.our_state is MUST_CLOSE + assert receive_and_get(c, b"12345") == [Data(data=b"12345")] + assert receive_and_get(c, b"67890") == [Data(data=b"67890")] + assert receive_and_get(c, b"") == [EndOfMessage(), ConnectionClosed()] + assert c.their_state is CLOSED + + +def test_server_talking_to_http10_client() -> None: + c = Connection(SERVER) + # No content-length, so no body + # NB: no host header + assert receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n") == [ + Request(method="GET", target="/", headers=[], http_version="1.0"), # type: ignore[arg-type] + EndOfMessage(), + ] + assert c.their_state is MUST_CLOSE + + # We automatically Connection: close back at them + assert ( + c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type] + == b"HTTP/1.1 200 \r\nConnection: close\r\n\r\n" + ) + + assert c.send(Data(data=b"12345")) == b"12345" + assert c.send(EndOfMessage()) == b"" + assert c.our_state is MUST_CLOSE + + # Check that it works if they do send Content-Length + c = Connection(SERVER) + # NB: no host header + assert receive_and_get(c, b"POST / HTTP/1.0\r\nContent-Length: 10\r\n\r\n1") == [ + Request( + method="POST", + target="/", + headers=[("Content-Length", "10")], + http_version="1.0", + ), + Data(data=b"1"), + ] + assert receive_and_get(c, b"234567890") == [Data(data=b"234567890"), EndOfMessage()] + assert c.their_state is MUST_CLOSE + assert receive_and_get(c, b"") == [ConnectionClosed()] + + +def test_automatic_transfer_encoding_in_response() -> None: + # Check that in responses, the user can specify either Transfer-Encoding: + # chunked or no framing at all, and in both cases we automatically select + # the right option depending on whether the peer speaks HTTP/1.0 or + # HTTP/1.1 + for user_headers in [ + [("Transfer-Encoding", "chunked")], + [], + # In fact, this even works if Content-Length is set, + # because if both are set then Transfer-Encoding wins + [("Transfer-Encoding", "chunked"), ("Content-Length", "100")], + ]: + user_headers = cast(List[Tuple[str, str]], user_headers) + p = ConnectionPair() + p.send( + CLIENT, + [ + Request(method="GET", target="/", headers=[("Host", "example.com")]), + EndOfMessage(), + ], + ) + # When speaking to HTTP/1.1 client, all of the above cases get + # normalized to Transfer-Encoding: chunked + p.send( + SERVER, + Response(status_code=200, headers=user_headers), + expect=Response( + status_code=200, headers=[("Transfer-Encoding", "chunked")] + ), + ) + + # When speaking to HTTP/1.0 client, all of the above cases get + # normalized to no-framing-headers + c = Connection(SERVER) + receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n") + assert ( + c.send(Response(status_code=200, headers=user_headers)) + == b"HTTP/1.1 200 \r\nConnection: close\r\n\r\n" + ) + assert c.send(Data(data=b"12345")) == b"12345" + + +def test_automagic_connection_close_handling() -> None: + p = ConnectionPair() + # If the user explicitly sets Connection: close, then we notice and + # respect it + p.send( + CLIENT, + [ + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Connection", "close")], + ), + EndOfMessage(), + ], + ) + for conn in p.conns: + assert conn.states[CLIENT] is MUST_CLOSE + # And if the client sets it, the server automatically echoes it back + p.send( + SERVER, + # no header here... + [Response(status_code=204, headers=[]), EndOfMessage()], # type: ignore[arg-type] + # ...but oh look, it arrived anyway + expect=[ + Response(status_code=204, headers=[("connection", "close")]), + EndOfMessage(), + ], + ) + for conn in p.conns: + assert conn.states == {CLIENT: MUST_CLOSE, SERVER: MUST_CLOSE} + + +def test_100_continue() -> None: + def setup() -> ConnectionPair: + p = ConnectionPair() + p.send( + CLIENT, + Request( + method="GET", + target="/", + headers=[ + ("Host", "example.com"), + ("Content-Length", "100"), + ("Expect", "100-continue"), + ], + ), + ) + for conn in p.conns: + assert conn.client_is_waiting_for_100_continue + assert not p.conn[CLIENT].they_are_waiting_for_100_continue + assert p.conn[SERVER].they_are_waiting_for_100_continue + return p + + # Disabled by 100 Continue + p = setup() + p.send(SERVER, InformationalResponse(status_code=100, headers=[])) # type: ignore[arg-type] + for conn in p.conns: + assert not conn.client_is_waiting_for_100_continue + assert not conn.they_are_waiting_for_100_continue + + # Disabled by a real response + p = setup() + p.send( + SERVER, Response(status_code=200, headers=[("Transfer-Encoding", "chunked")]) + ) + for conn in p.conns: + assert not conn.client_is_waiting_for_100_continue + assert not conn.they_are_waiting_for_100_continue + + # Disabled by the client going ahead and sending stuff anyway + p = setup() + p.send(CLIENT, Data(data=b"12345")) + for conn in p.conns: + assert not conn.client_is_waiting_for_100_continue + assert not conn.they_are_waiting_for_100_continue + + +def test_max_incomplete_event_size_countermeasure() -> None: + # Infinitely long headers are definitely not okay + c = Connection(SERVER) + c.receive_data(b"GET / HTTP/1.0\r\nEndless: ") + assert c.next_event() is NEED_DATA + with pytest.raises(RemoteProtocolError): + while True: + c.receive_data(b"a" * 1024) + c.next_event() + + # Checking that the same header is accepted / rejected depending on the + # max_incomplete_event_size setting: + c = Connection(SERVER, max_incomplete_event_size=5000) + c.receive_data(b"GET / HTTP/1.0\r\nBig: ") + c.receive_data(b"a" * 4000) + c.receive_data(b"\r\n\r\n") + assert get_all_events(c) == [ + Request( + method="GET", target="/", http_version="1.0", headers=[("big", "a" * 4000)] + ), + EndOfMessage(), + ] + + c = Connection(SERVER, max_incomplete_event_size=4000) + c.receive_data(b"GET / HTTP/1.0\r\nBig: ") + c.receive_data(b"a" * 4000) + with pytest.raises(RemoteProtocolError): + c.next_event() + + # Temporarily exceeding the size limit is fine, as long as its done with + # complete events: + c = Connection(SERVER, max_incomplete_event_size=5000) + c.receive_data(b"GET / HTTP/1.0\r\nContent-Length: 10000") + c.receive_data(b"\r\n\r\n" + b"a" * 10000) + assert get_all_events(c) == [ + Request( + method="GET", + target="/", + http_version="1.0", + headers=[("Content-Length", "10000")], + ), + Data(data=b"a" * 10000), + EndOfMessage(), + ] + + c = Connection(SERVER, max_incomplete_event_size=100) + # Two pipelined requests to create a way-too-big receive buffer... but + # it's fine because we're not checking + c.receive_data( + b"GET /1 HTTP/1.1\r\nHost: a\r\n\r\n" + b"GET /2 HTTP/1.1\r\nHost: b\r\n\r\n" + b"X" * 1000 + ) + assert get_all_events(c) == [ + Request(method="GET", target="/1", headers=[("host", "a")]), + EndOfMessage(), + ] + # Even more data comes in, still no problem + c.receive_data(b"X" * 1000) + # We can respond and reuse to get the second pipelined request + c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type] + c.send(EndOfMessage()) + c.start_next_cycle() + assert get_all_events(c) == [ + Request(method="GET", target="/2", headers=[("host", "b")]), + EndOfMessage(), + ] + # But once we unpause and try to read the next message, and find that it's + # incomplete and the buffer is *still* way too large, then *that's* a + # problem: + c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type] + c.send(EndOfMessage()) + c.start_next_cycle() + with pytest.raises(RemoteProtocolError): + c.next_event() + + +def test_reuse_simple() -> None: + p = ConnectionPair() + p.send( + CLIENT, + [Request(method="GET", target="/", headers=[("Host", "a")]), EndOfMessage()], + ) + p.send( + SERVER, + [ + Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]), + EndOfMessage(), + ], + ) + for conn in p.conns: + assert conn.states == {CLIENT: DONE, SERVER: DONE} + conn.start_next_cycle() + + p.send( + CLIENT, + [ + Request(method="DELETE", target="/foo", headers=[("Host", "a")]), + EndOfMessage(), + ], + ) + p.send( + SERVER, + [ + Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]), + EndOfMessage(), + ], + ) + + +def test_pipelining() -> None: + # Client doesn't support pipelining, so we have to do this by hand + c = Connection(SERVER) + assert c.next_event() is NEED_DATA + # 3 requests all bunched up + c.receive_data( + b"GET /1 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n" + b"12345" + b"GET /2 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n" + b"67890" + b"GET /3 HTTP/1.1\r\nHost: a.com\r\n\r\n" + ) + assert get_all_events(c) == [ + Request( + method="GET", + target="/1", + headers=[("Host", "a.com"), ("Content-Length", "5")], + ), + Data(data=b"12345"), + EndOfMessage(), + ] + assert c.their_state is DONE + assert c.our_state is SEND_RESPONSE + + assert c.next_event() is PAUSED + + c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type] + c.send(EndOfMessage()) + assert c.their_state is DONE + assert c.our_state is DONE + + c.start_next_cycle() + + assert get_all_events(c) == [ + Request( + method="GET", + target="/2", + headers=[("Host", "a.com"), ("Content-Length", "5")], + ), + Data(data=b"67890"), + EndOfMessage(), + ] + assert c.next_event() is PAUSED + c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type] + c.send(EndOfMessage()) + c.start_next_cycle() + + assert get_all_events(c) == [ + Request(method="GET", target="/3", headers=[("Host", "a.com")]), + EndOfMessage(), + ] + # Doesn't pause this time, no trailing data + assert c.next_event() is NEED_DATA + c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type] + c.send(EndOfMessage()) + + # Arrival of more data triggers pause + assert c.next_event() is NEED_DATA + c.receive_data(b"SADF") + assert c.next_event() is PAUSED + assert c.trailing_data == (b"SADF", False) + # If EOF arrives while paused, we don't see that either: + c.receive_data(b"") + assert c.trailing_data == (b"SADF", True) + assert c.next_event() is PAUSED + c.receive_data(b"") + assert c.next_event() is PAUSED + # Can't call receive_data with non-empty buf after closing it + with pytest.raises(RuntimeError): + c.receive_data(b"FDSA") + + +def test_protocol_switch() -> None: + for (req, deny, accept) in [ + ( + Request( + method="CONNECT", + target="example.com:443", + headers=[("Host", "foo"), ("Content-Length", "1")], + ), + Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]), + Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]), + ), + ( + Request( + method="GET", + target="/", + headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")], + ), + Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]), + InformationalResponse(status_code=101, headers=[("Upgrade", "a")]), + ), + ( + Request( + method="CONNECT", + target="example.com:443", + headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")], + ), + Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]), + # Accept CONNECT, not upgrade + Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]), + ), + ( + Request( + method="CONNECT", + target="example.com:443", + headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")], + ), + Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]), + # Accept Upgrade, not CONNECT + InformationalResponse(status_code=101, headers=[("Upgrade", "b")]), + ), + ]: + + def setup() -> ConnectionPair: + p = ConnectionPair() + p.send(CLIENT, req) + # No switch-related state change stuff yet; the client has to + # finish the request before that kicks in + for conn in p.conns: + assert conn.states[CLIENT] is SEND_BODY + p.send(CLIENT, [Data(data=b"1"), EndOfMessage()]) + for conn in p.conns: + assert conn.states[CLIENT] is MIGHT_SWITCH_PROTOCOL + assert p.conn[SERVER].next_event() is PAUSED + return p + + # Test deny case + p = setup() + p.send(SERVER, deny) + for conn in p.conns: + assert conn.states == {CLIENT: DONE, SERVER: SEND_BODY} + p.send(SERVER, EndOfMessage()) + # Check that re-use is still allowed after a denial + for conn in p.conns: + conn.start_next_cycle() + + # Test accept case + p = setup() + p.send(SERVER, accept) + for conn in p.conns: + assert conn.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL} + conn.receive_data(b"123") + assert conn.next_event() is PAUSED + conn.receive_data(b"456") + assert conn.next_event() is PAUSED + assert conn.trailing_data == (b"123456", False) + + # Pausing in might-switch, then recovery + # (weird artificial case where the trailing data actually is valid + # HTTP for some reason, because this makes it easier to test the state + # logic) + p = setup() + sc = p.conn[SERVER] + sc.receive_data(b"GET / HTTP/1.0\r\n\r\n") + assert sc.next_event() is PAUSED + assert sc.trailing_data == (b"GET / HTTP/1.0\r\n\r\n", False) + sc.send(deny) + assert sc.next_event() is PAUSED + sc.send(EndOfMessage()) + sc.start_next_cycle() + assert get_all_events(sc) == [ + Request(method="GET", target="/", headers=[], http_version="1.0"), # type: ignore[arg-type] + EndOfMessage(), + ] + + # When we're DONE, have no trailing data, and the connection gets + # closed, we report ConnectionClosed(). When we're in might-switch or + # switched, we don't. + p = setup() + sc = p.conn[SERVER] + sc.receive_data(b"") + assert sc.next_event() is PAUSED + assert sc.trailing_data == (b"", True) + p.send(SERVER, accept) + assert sc.next_event() is PAUSED + + p = setup() + sc = p.conn[SERVER] + sc.receive_data(b"") + assert sc.next_event() is PAUSED + sc.send(deny) + assert sc.next_event() == ConnectionClosed() + + # You can't send after switching protocols, or while waiting for a + # protocol switch + p = setup() + with pytest.raises(LocalProtocolError): + p.conn[CLIENT].send( + Request(method="GET", target="/", headers=[("Host", "a")]) + ) + p = setup() + p.send(SERVER, accept) + with pytest.raises(LocalProtocolError): + p.conn[SERVER].send(Data(data=b"123")) + + +def test_close_simple() -> None: + # Just immediately closing a new connection without anything having + # happened yet. + for (who_shot_first, who_shot_second) in [(CLIENT, SERVER), (SERVER, CLIENT)]: + + def setup() -> ConnectionPair: + p = ConnectionPair() + p.send(who_shot_first, ConnectionClosed()) + for conn in p.conns: + assert conn.states == { + who_shot_first: CLOSED, + who_shot_second: MUST_CLOSE, + } + return p + + # You can keep putting b"" into a closed connection, and you keep + # getting ConnectionClosed() out: + p = setup() + assert p.conn[who_shot_second].next_event() == ConnectionClosed() + assert p.conn[who_shot_second].next_event() == ConnectionClosed() + p.conn[who_shot_second].receive_data(b"") + assert p.conn[who_shot_second].next_event() == ConnectionClosed() + # Second party can close... + p = setup() + p.send(who_shot_second, ConnectionClosed()) + for conn in p.conns: + assert conn.our_state is CLOSED + assert conn.their_state is CLOSED + # But trying to receive new data on a closed connection is a + # RuntimeError (not ProtocolError, because the problem here isn't + # violation of HTTP, it's violation of physics) + p = setup() + with pytest.raises(RuntimeError): + p.conn[who_shot_second].receive_data(b"123") + # And receiving new data on a MUST_CLOSE connection is a ProtocolError + p = setup() + p.conn[who_shot_first].receive_data(b"GET") + with pytest.raises(RemoteProtocolError): + p.conn[who_shot_first].next_event() + + +def test_close_different_states() -> None: + req = [ + Request(method="GET", target="/foo", headers=[("Host", "a")]), + EndOfMessage(), + ] + resp = [ + Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]), + EndOfMessage(), + ] + + # Client before request + p = ConnectionPair() + p.send(CLIENT, ConnectionClosed()) + for conn in p.conns: + assert conn.states == {CLIENT: CLOSED, SERVER: MUST_CLOSE} + + # Client after request + p = ConnectionPair() + p.send(CLIENT, req) + p.send(CLIENT, ConnectionClosed()) + for conn in p.conns: + assert conn.states == {CLIENT: CLOSED, SERVER: SEND_RESPONSE} + + # Server after request -> not allowed + p = ConnectionPair() + p.send(CLIENT, req) + with pytest.raises(LocalProtocolError): + p.conn[SERVER].send(ConnectionClosed()) + p.conn[CLIENT].receive_data(b"") + with pytest.raises(RemoteProtocolError): + p.conn[CLIENT].next_event() + + # Server after response + p = ConnectionPair() + p.send(CLIENT, req) + p.send(SERVER, resp) + p.send(SERVER, ConnectionClosed()) + for conn in p.conns: + assert conn.states == {CLIENT: MUST_CLOSE, SERVER: CLOSED} + + # Both after closing (ConnectionClosed() is idempotent) + p = ConnectionPair() + p.send(CLIENT, req) + p.send(SERVER, resp) + p.send(CLIENT, ConnectionClosed()) + p.send(SERVER, ConnectionClosed()) + p.send(CLIENT, ConnectionClosed()) + p.send(SERVER, ConnectionClosed()) + + # In the middle of sending -> not allowed + p = ConnectionPair() + p.send( + CLIENT, + Request( + method="GET", target="/", headers=[("Host", "a"), ("Content-Length", "10")] + ), + ) + with pytest.raises(LocalProtocolError): + p.conn[CLIENT].send(ConnectionClosed()) + p.conn[SERVER].receive_data(b"") + with pytest.raises(RemoteProtocolError): + p.conn[SERVER].next_event() + + +# Receive several requests and then client shuts down their side of the +# connection; we can respond to each +def test_pipelined_close() -> None: + c = Connection(SERVER) + # 2 requests then a close + c.receive_data( + b"GET /1 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n" + b"12345" + b"GET /2 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n" + b"67890" + ) + c.receive_data(b"") + assert get_all_events(c) == [ + Request( + method="GET", + target="/1", + headers=[("host", "a.com"), ("content-length", "5")], + ), + Data(data=b"12345"), + EndOfMessage(), + ] + assert c.states[CLIENT] is DONE + c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type] + c.send(EndOfMessage()) + assert c.states[SERVER] is DONE + c.start_next_cycle() + assert get_all_events(c) == [ + Request( + method="GET", + target="/2", + headers=[("host", "a.com"), ("content-length", "5")], + ), + Data(data=b"67890"), + EndOfMessage(), + ConnectionClosed(), + ] + assert c.states == {CLIENT: CLOSED, SERVER: SEND_RESPONSE} + c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type] + c.send(EndOfMessage()) + assert c.states == {CLIENT: CLOSED, SERVER: MUST_CLOSE} + c.send(ConnectionClosed()) + assert c.states == {CLIENT: CLOSED, SERVER: CLOSED} + + +def test_sendfile() -> None: + class SendfilePlaceholder: + def __len__(self) -> int: + return 10 + + placeholder = SendfilePlaceholder() + + def setup( + header: Tuple[str, str], http_version: str + ) -> Tuple[Connection, Optional[List[bytes]]]: + c = Connection(SERVER) + receive_and_get( + c, "GET / HTTP/{}\r\nHost: a\r\n\r\n".format(http_version).encode("ascii") + ) + headers = [] + if header: + headers.append(header) + c.send(Response(status_code=200, headers=headers)) + return c, c.send_with_data_passthrough(Data(data=placeholder)) # type: ignore + + c, data = setup(("Content-Length", "10"), "1.1") + assert data == [placeholder] # type: ignore + # Raises an error if the connection object doesn't think we've sent + # exactly 10 bytes + c.send(EndOfMessage()) + + _, data = setup(("Transfer-Encoding", "chunked"), "1.1") + assert placeholder in data # type: ignore + data[data.index(placeholder)] = b"x" * 10 # type: ignore + assert b"".join(data) == b"a\r\nxxxxxxxxxx\r\n" # type: ignore + + c, data = setup(None, "1.0") # type: ignore + assert data == [placeholder] # type: ignore + assert c.our_state is SEND_BODY + + +def test_errors() -> None: + # After a receive error, you can't receive + for role in [CLIENT, SERVER]: + c = Connection(our_role=role) + c.receive_data(b"gibberish\r\n\r\n") + with pytest.raises(RemoteProtocolError): + c.next_event() + # Now any attempt to receive continues to raise + assert c.their_state is ERROR + assert c.our_state is not ERROR + print(c._cstate.states) + with pytest.raises(RemoteProtocolError): + c.next_event() + # But we can still yell at the client for sending us gibberish + if role is SERVER: + assert ( + c.send(Response(status_code=400, headers=[])) # type: ignore[arg-type] + == b"HTTP/1.1 400 \r\nConnection: close\r\n\r\n" + ) + + # After an error sending, you can no longer send + # (This is especially important for things like content-length errors, + # where there's complex internal state being modified) + def conn(role: Type[Sentinel]) -> Connection: + c = Connection(our_role=role) + if role is SERVER: + # Put it into the state where it *could* send a response... + receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n") + assert c.our_state is SEND_RESPONSE + return c + + for role in [CLIENT, SERVER]: + if role is CLIENT: + # This HTTP/1.0 request won't be detected as bad until after we go + # through the state machine and hit the writing code + good = Request(method="GET", target="/", headers=[("Host", "example.com")]) + bad = Request( + method="GET", + target="/", + headers=[("Host", "example.com")], + http_version="1.0", + ) + elif role is SERVER: + good = Response(status_code=200, headers=[]) # type: ignore[arg-type,assignment] + bad = Response(status_code=200, headers=[], http_version="1.0") # type: ignore[arg-type,assignment] + # Make sure 'good' actually is good + c = conn(role) + c.send(good) + assert c.our_state is not ERROR + # Do that again, but this time sending 'bad' first + c = conn(role) + with pytest.raises(LocalProtocolError): + c.send(bad) + assert c.our_state is ERROR + assert c.their_state is not ERROR + # Now 'good' is not so good + with pytest.raises(LocalProtocolError): + c.send(good) + + # And check send_failed() too + c = conn(role) + c.send_failed() + assert c.our_state is ERROR + assert c.their_state is not ERROR + # This is idempotent + c.send_failed() + assert c.our_state is ERROR + assert c.their_state is not ERROR + + +def test_idle_receive_nothing() -> None: + # At one point this incorrectly raised an error + for role in [CLIENT, SERVER]: + c = Connection(role) + assert c.next_event() is NEED_DATA + + +def test_connection_drop() -> None: + c = Connection(SERVER) + c.receive_data(b"GET /") + assert c.next_event() is NEED_DATA + c.receive_data(b"") + with pytest.raises(RemoteProtocolError): + c.next_event() + + +def test_408_request_timeout() -> None: + # Should be able to send this spontaneously as a server without seeing + # anything from client + p = ConnectionPair() + p.send(SERVER, Response(status_code=408, headers=[(b"connection", b"close")])) + + +# This used to raise IndexError +def test_empty_request() -> None: + c = Connection(SERVER) + c.receive_data(b"\r\n") + with pytest.raises(RemoteProtocolError): + c.next_event() + + +# This used to raise IndexError +def test_empty_response() -> None: + c = Connection(CLIENT) + c.send(Request(method="GET", target="/", headers=[("Host", "a")])) + c.receive_data(b"\r\n") + with pytest.raises(RemoteProtocolError): + c.next_event() + + +@pytest.mark.parametrize( + "data", + [ + b"\x00", + b"\x20", + b"\x16\x03\x01\x00\xa5", # Typical start of a TLS Client Hello + ], +) +def test_early_detection_of_invalid_request(data: bytes) -> None: + c = Connection(SERVER) + # Early detection should occur before even receiving a `\r\n` + c.receive_data(data) + with pytest.raises(RemoteProtocolError): + c.next_event() + + +@pytest.mark.parametrize( + "data", + [ + b"\x00", + b"\x20", + b"\x16\x03\x03\x00\x31", # Typical start of a TLS Server Hello + ], +) +def test_early_detection_of_invalid_response(data: bytes) -> None: + c = Connection(CLIENT) + # Early detection should occur before even receiving a `\r\n` + c.receive_data(data) + with pytest.raises(RemoteProtocolError): + c.next_event() + + +# This used to give different headers for HEAD and GET. +# The correct way to handle HEAD is to put whatever headers we *would* have +# put if it were a GET -- even though we know that for HEAD, those headers +# will be ignored. +def test_HEAD_framing_headers() -> None: + def setup(method: bytes, http_version: bytes) -> Connection: + c = Connection(SERVER) + c.receive_data( + method + b" / HTTP/" + http_version + b"\r\n" + b"Host: example.com\r\n\r\n" + ) + assert type(c.next_event()) is Request + assert type(c.next_event()) is EndOfMessage + return c + + for method in [b"GET", b"HEAD"]: + # No Content-Length, HTTP/1.1 peer, should use chunked + c = setup(method, b"1.1") + assert ( + c.send(Response(status_code=200, headers=[])) == b"HTTP/1.1 200 \r\n" # type: ignore[arg-type] + b"Transfer-Encoding: chunked\r\n\r\n" + ) + + # No Content-Length, HTTP/1.0 peer, frame with connection: close + c = setup(method, b"1.0") + assert ( + c.send(Response(status_code=200, headers=[])) == b"HTTP/1.1 200 \r\n" # type: ignore[arg-type] + b"Connection: close\r\n\r\n" + ) + + # Content-Length + Transfer-Encoding, TE wins + c = setup(method, b"1.1") + assert ( + c.send( + Response( + status_code=200, + headers=[ + ("Content-Length", "100"), + ("Transfer-Encoding", "chunked"), + ], + ) + ) + == b"HTTP/1.1 200 \r\n" + b"Transfer-Encoding: chunked\r\n\r\n" + ) + + +def test_special_exceptions_for_lost_connection_in_message_body() -> None: + c = Connection(SERVER) + c.receive_data( + b"POST / HTTP/1.1\r\n" b"Host: example.com\r\n" b"Content-Length: 100\r\n\r\n" + ) + assert type(c.next_event()) is Request + assert c.next_event() is NEED_DATA + c.receive_data(b"12345") + assert c.next_event() == Data(data=b"12345") + c.receive_data(b"") + with pytest.raises(RemoteProtocolError) as excinfo: + c.next_event() + assert "received 5 bytes" in str(excinfo.value) + assert "expected 100" in str(excinfo.value) + + c = Connection(SERVER) + c.receive_data( + b"POST / HTTP/1.1\r\n" + b"Host: example.com\r\n" + b"Transfer-Encoding: chunked\r\n\r\n" + ) + assert type(c.next_event()) is Request + assert c.next_event() is NEED_DATA + c.receive_data(b"8\r\n012345") + assert c.next_event().data == b"012345" # type: ignore + c.receive_data(b"") + with pytest.raises(RemoteProtocolError) as excinfo: + c.next_event() + assert "incomplete chunked read" in str(excinfo.value) diff --git a/env/lib/python3.10/site-packages/h11/tests/test_events.py b/env/lib/python3.10/site-packages/h11/tests/test_events.py new file mode 100644 index 0000000..bc6c313 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/tests/test_events.py @@ -0,0 +1,150 @@ +from http import HTTPStatus + +import pytest + +from .. import _events +from .._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from .._util import LocalProtocolError + + +def test_events() -> None: + with pytest.raises(LocalProtocolError): + # Missing Host: + req = Request( + method="GET", target="/", headers=[("a", "b")], http_version="1.1" + ) + # But this is okay (HTTP/1.0) + req = Request(method="GET", target="/", headers=[("a", "b")], http_version="1.0") + # fields are normalized + assert req.method == b"GET" + assert req.target == b"/" + assert req.headers == [(b"a", b"b")] + assert req.http_version == b"1.0" + + # This is also okay -- has a Host (with weird capitalization, which is ok) + req = Request( + method="GET", + target="/", + headers=[("a", "b"), ("hOSt", "example.com")], + http_version="1.1", + ) + # we normalize header capitalization + assert req.headers == [(b"a", b"b"), (b"host", b"example.com")] + + # Multiple host is bad too + with pytest.raises(LocalProtocolError): + req = Request( + method="GET", + target="/", + headers=[("Host", "a"), ("Host", "a")], + http_version="1.1", + ) + # Even for HTTP/1.0 + with pytest.raises(LocalProtocolError): + req = Request( + method="GET", + target="/", + headers=[("Host", "a"), ("Host", "a")], + http_version="1.0", + ) + + # Header values are validated + for bad_char in "\x00\r\n\f\v": + with pytest.raises(LocalProtocolError): + req = Request( + method="GET", + target="/", + headers=[("Host", "a"), ("Foo", "asd" + bad_char)], + http_version="1.0", + ) + + # But for compatibility we allow non-whitespace control characters, even + # though they're forbidden by the spec. + Request( + method="GET", + target="/", + headers=[("Host", "a"), ("Foo", "asd\x01\x02\x7f")], + http_version="1.0", + ) + + # Request target is validated + for bad_byte in b"\x00\x20\x7f\xee": + target = bytearray(b"/") + target.append(bad_byte) + with pytest.raises(LocalProtocolError): + Request( + method="GET", target=target, headers=[("Host", "a")], http_version="1.1" + ) + + # Request method is validated + with pytest.raises(LocalProtocolError): + Request( + method="GET / HTTP/1.1", + target=target, + headers=[("Host", "a")], + http_version="1.1", + ) + + ir = InformationalResponse(status_code=100, headers=[("Host", "a")]) + assert ir.status_code == 100 + assert ir.headers == [(b"host", b"a")] + assert ir.http_version == b"1.1" + + with pytest.raises(LocalProtocolError): + InformationalResponse(status_code=200, headers=[("Host", "a")]) + + resp = Response(status_code=204, headers=[], http_version="1.0") # type: ignore[arg-type] + assert resp.status_code == 204 + assert resp.headers == [] + assert resp.http_version == b"1.0" + + with pytest.raises(LocalProtocolError): + resp = Response(status_code=100, headers=[], http_version="1.0") # type: ignore[arg-type] + + with pytest.raises(LocalProtocolError): + Response(status_code="100", headers=[], http_version="1.0") # type: ignore[arg-type] + + with pytest.raises(LocalProtocolError): + InformationalResponse(status_code=b"100", headers=[], http_version="1.0") # type: ignore[arg-type] + + d = Data(data=b"asdf") + assert d.data == b"asdf" + + eom = EndOfMessage() + assert eom.headers == [] + + cc = ConnectionClosed() + assert repr(cc) == "ConnectionClosed()" + + +def test_intenum_status_code() -> None: + # https://github.com/python-hyper/h11/issues/72 + + r = Response(status_code=HTTPStatus.OK, headers=[], http_version="1.0") # type: ignore[arg-type] + assert r.status_code == HTTPStatus.OK + assert type(r.status_code) is not type(HTTPStatus.OK) + assert type(r.status_code) is int + + +def test_header_casing() -> None: + r = Request( + method="GET", + target="/", + headers=[("Host", "example.org"), ("Connection", "keep-alive")], + http_version="1.1", + ) + assert len(r.headers) == 2 + assert r.headers[0] == (b"host", b"example.org") + assert r.headers == [(b"host", b"example.org"), (b"connection", b"keep-alive")] + assert r.headers.raw_items() == [ + (b"Host", b"example.org"), + (b"Connection", b"keep-alive"), + ] diff --git a/env/lib/python3.10/site-packages/h11/tests/test_headers.py b/env/lib/python3.10/site-packages/h11/tests/test_headers.py new file mode 100644 index 0000000..ba53d08 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/tests/test_headers.py @@ -0,0 +1,157 @@ +import pytest + +from .._events import Request +from .._headers import ( + get_comma_header, + has_expect_100_continue, + Headers, + normalize_and_validate, + set_comma_header, +) +from .._util import LocalProtocolError + + +def test_normalize_and_validate() -> None: + assert normalize_and_validate([("foo", "bar")]) == [(b"foo", b"bar")] + assert normalize_and_validate([(b"foo", b"bar")]) == [(b"foo", b"bar")] + + # no leading/trailing whitespace in names + with pytest.raises(LocalProtocolError): + normalize_and_validate([(b"foo ", "bar")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([(b" foo", "bar")]) + + # no weird characters in names + with pytest.raises(LocalProtocolError) as excinfo: + normalize_and_validate([(b"foo bar", b"baz")]) + assert "foo bar" in str(excinfo.value) + with pytest.raises(LocalProtocolError): + normalize_and_validate([(b"foo\x00bar", b"baz")]) + # Not even 8-bit characters: + with pytest.raises(LocalProtocolError): + normalize_and_validate([(b"foo\xffbar", b"baz")]) + # And not even the control characters we allow in values: + with pytest.raises(LocalProtocolError): + normalize_and_validate([(b"foo\x01bar", b"baz")]) + + # no return or NUL characters in values + with pytest.raises(LocalProtocolError) as excinfo: + normalize_and_validate([("foo", "bar\rbaz")]) + assert "bar\\rbaz" in str(excinfo.value) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", "bar\nbaz")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", "bar\x00baz")]) + # no leading/trailing whitespace + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", "barbaz ")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", " barbaz")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", "barbaz\t")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("foo", "\tbarbaz")]) + + # content-length + assert normalize_and_validate([("Content-Length", "1")]) == [ + (b"content-length", b"1") + ] + with pytest.raises(LocalProtocolError): + normalize_and_validate([("Content-Length", "asdf")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("Content-Length", "1x")]) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("Content-Length", "1"), ("Content-Length", "2")]) + assert normalize_and_validate( + [("Content-Length", "0"), ("Content-Length", "0")] + ) == [(b"content-length", b"0")] + assert normalize_and_validate([("Content-Length", "0 , 0")]) == [ + (b"content-length", b"0") + ] + with pytest.raises(LocalProtocolError): + normalize_and_validate( + [("Content-Length", "1"), ("Content-Length", "1"), ("Content-Length", "2")] + ) + with pytest.raises(LocalProtocolError): + normalize_and_validate([("Content-Length", "1 , 1,2")]) + + # transfer-encoding + assert normalize_and_validate([("Transfer-Encoding", "chunked")]) == [ + (b"transfer-encoding", b"chunked") + ] + assert normalize_and_validate([("Transfer-Encoding", "cHuNkEd")]) == [ + (b"transfer-encoding", b"chunked") + ] + with pytest.raises(LocalProtocolError) as excinfo: + normalize_and_validate([("Transfer-Encoding", "gzip")]) + assert excinfo.value.error_status_hint == 501 # Not Implemented + with pytest.raises(LocalProtocolError) as excinfo: + normalize_and_validate( + [("Transfer-Encoding", "chunked"), ("Transfer-Encoding", "gzip")] + ) + assert excinfo.value.error_status_hint == 501 # Not Implemented + + +def test_get_set_comma_header() -> None: + headers = normalize_and_validate( + [ + ("Connection", "close"), + ("whatever", "something"), + ("connectiON", "fOo,, , BAR"), + ] + ) + + assert get_comma_header(headers, b"connection") == [b"close", b"foo", b"bar"] + + headers = set_comma_header(headers, b"newthing", ["a", "b"]) # type: ignore + + with pytest.raises(LocalProtocolError): + set_comma_header(headers, b"newthing", [" a", "b"]) # type: ignore + + assert headers == [ + (b"connection", b"close"), + (b"whatever", b"something"), + (b"connection", b"fOo,, , BAR"), + (b"newthing", b"a"), + (b"newthing", b"b"), + ] + + headers = set_comma_header(headers, b"whatever", ["different thing"]) # type: ignore + + assert headers == [ + (b"connection", b"close"), + (b"connection", b"fOo,, , BAR"), + (b"newthing", b"a"), + (b"newthing", b"b"), + (b"whatever", b"different thing"), + ] + + +def test_has_100_continue() -> None: + assert has_expect_100_continue( + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Expect", "100-continue")], + ) + ) + assert not has_expect_100_continue( + Request(method="GET", target="/", headers=[("Host", "example.com")]) + ) + # Case insensitive + assert has_expect_100_continue( + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Expect", "100-Continue")], + ) + ) + # Doesn't work in HTTP/1.0 + assert not has_expect_100_continue( + Request( + method="GET", + target="/", + headers=[("Host", "example.com"), ("Expect", "100-continue")], + http_version="1.0", + ) + ) diff --git a/env/lib/python3.10/site-packages/h11/tests/test_helpers.py b/env/lib/python3.10/site-packages/h11/tests/test_helpers.py new file mode 100644 index 0000000..c329c76 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/tests/test_helpers.py @@ -0,0 +1,32 @@ +from .._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from .helpers import normalize_data_events + + +def test_normalize_data_events() -> None: + assert normalize_data_events( + [ + Data(data=bytearray(b"1")), + Data(data=b"2"), + Response(status_code=200, headers=[]), # type: ignore[arg-type] + Data(data=b"3"), + Data(data=b"4"), + EndOfMessage(), + Data(data=b"5"), + Data(data=b"6"), + Data(data=b"7"), + ] + ) == [ + Data(data=b"12"), + Response(status_code=200, headers=[]), # type: ignore[arg-type] + Data(data=b"34"), + EndOfMessage(), + Data(data=b"567"), + ] diff --git a/env/lib/python3.10/site-packages/h11/tests/test_io.py b/env/lib/python3.10/site-packages/h11/tests/test_io.py new file mode 100644 index 0000000..2b47c0e --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/tests/test_io.py @@ -0,0 +1,572 @@ +from typing import Any, Callable, Generator, List + +import pytest + +from .._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from .._headers import Headers, normalize_and_validate +from .._readers import ( + _obsolete_line_fold, + ChunkedReader, + ContentLengthReader, + Http10Reader, + READERS, +) +from .._receivebuffer import ReceiveBuffer +from .._state import ( + CLIENT, + CLOSED, + DONE, + IDLE, + MIGHT_SWITCH_PROTOCOL, + MUST_CLOSE, + SEND_BODY, + SEND_RESPONSE, + SERVER, + SWITCHED_PROTOCOL, +) +from .._util import LocalProtocolError +from .._writers import ( + ChunkedWriter, + ContentLengthWriter, + Http10Writer, + write_any_response, + write_headers, + write_request, + WRITERS, +) +from .helpers import normalize_data_events + +SIMPLE_CASES = [ + ( + (CLIENT, IDLE), + Request( + method="GET", + target="/a", + headers=[("Host", "foo"), ("Connection", "close")], + ), + b"GET /a HTTP/1.1\r\nHost: foo\r\nConnection: close\r\n\r\n", + ), + ( + (SERVER, SEND_RESPONSE), + Response(status_code=200, headers=[("Connection", "close")], reason=b"OK"), + b"HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + ), + ( + (SERVER, SEND_RESPONSE), + Response(status_code=200, headers=[], reason=b"OK"), # type: ignore[arg-type] + b"HTTP/1.1 200 OK\r\n\r\n", + ), + ( + (SERVER, SEND_RESPONSE), + InformationalResponse( + status_code=101, headers=[("Upgrade", "websocket")], reason=b"Upgrade" + ), + b"HTTP/1.1 101 Upgrade\r\nUpgrade: websocket\r\n\r\n", + ), + ( + (SERVER, SEND_RESPONSE), + InformationalResponse(status_code=101, headers=[], reason=b"Upgrade"), # type: ignore[arg-type] + b"HTTP/1.1 101 Upgrade\r\n\r\n", + ), +] + + +def dowrite(writer: Callable[..., None], obj: Any) -> bytes: + got_list: List[bytes] = [] + writer(obj, got_list.append) + return b"".join(got_list) + + +def tw(writer: Any, obj: Any, expected: Any) -> None: + got = dowrite(writer, obj) + assert got == expected + + +def makebuf(data: bytes) -> ReceiveBuffer: + buf = ReceiveBuffer() + buf += data + return buf + + +def tr(reader: Any, data: bytes, expected: Any) -> None: + def check(got: Any) -> None: + assert got == expected + # Headers should always be returned as bytes, not e.g. bytearray + # https://github.com/python-hyper/wsproto/pull/54#issuecomment-377709478 + for name, value in getattr(got, "headers", []): + assert type(name) is bytes + assert type(value) is bytes + + # Simple: consume whole thing + buf = makebuf(data) + check(reader(buf)) + assert not buf + + # Incrementally growing buffer + buf = ReceiveBuffer() + for i in range(len(data)): + assert reader(buf) is None + buf += data[i : i + 1] + check(reader(buf)) + + # Trailing data + buf = makebuf(data) + buf += b"trailing" + check(reader(buf)) + assert bytes(buf) == b"trailing" + + +def test_writers_simple() -> None: + for ((role, state), event, binary) in SIMPLE_CASES: + tw(WRITERS[role, state], event, binary) + + +def test_readers_simple() -> None: + for ((role, state), event, binary) in SIMPLE_CASES: + tr(READERS[role, state], binary, event) + + +def test_writers_unusual() -> None: + # Simple test of the write_headers utility routine + tw( + write_headers, + normalize_and_validate([("foo", "bar"), ("baz", "quux")]), + b"foo: bar\r\nbaz: quux\r\n\r\n", + ) + tw(write_headers, Headers([]), b"\r\n") + + # We understand HTTP/1.0, but we don't speak it + with pytest.raises(LocalProtocolError): + tw( + write_request, + Request( + method="GET", + target="/", + headers=[("Host", "foo"), ("Connection", "close")], + http_version="1.0", + ), + None, + ) + with pytest.raises(LocalProtocolError): + tw( + write_any_response, + Response( + status_code=200, headers=[("Connection", "close")], http_version="1.0" + ), + None, + ) + + +def test_readers_unusual() -> None: + # Reading HTTP/1.0 + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.0\r\nSome: header\r\n\r\n", + Request( + method="HEAD", + target="/foo", + headers=[("Some", "header")], + http_version="1.0", + ), + ) + + # check no-headers, since it's only legal with HTTP/1.0 + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.0\r\n\r\n", + Request(method="HEAD", target="/foo", headers=[], http_version="1.0"), # type: ignore[arg-type] + ) + + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.0 200 OK\r\nSome: header\r\n\r\n", + Response( + status_code=200, + headers=[("Some", "header")], + http_version="1.0", + reason=b"OK", + ), + ) + + # single-character header values (actually disallowed by the ABNF in RFC + # 7230 -- this is a bug in the standard that we originally copied...) + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.0 200 OK\r\n" b"Foo: a a a a a \r\n\r\n", + Response( + status_code=200, + headers=[("Foo", "a a a a a")], + http_version="1.0", + reason=b"OK", + ), + ) + + # Empty headers -- also legal + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.0 200 OK\r\n" b"Foo:\r\n\r\n", + Response( + status_code=200, headers=[("Foo", "")], http_version="1.0", reason=b"OK" + ), + ) + + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.0 200 OK\r\n" b"Foo: \t \t \r\n\r\n", + Response( + status_code=200, headers=[("Foo", "")], http_version="1.0", reason=b"OK" + ), + ) + + # Tolerate broken servers that leave off the response code + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.0 200\r\n" b"Foo: bar\r\n\r\n", + Response( + status_code=200, headers=[("Foo", "bar")], http_version="1.0", reason=b"" + ), + ) + + # Tolerate headers line endings (\r\n and \n) + # \n\r\b between headers and body + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.1 200 OK\r\nSomeHeader: val\n\r\n", + Response( + status_code=200, + headers=[("SomeHeader", "val")], + http_version="1.1", + reason="OK", + ), + ) + + # delimited only with \n + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.1 200 OK\nSomeHeader1: val1\nSomeHeader2: val2\n\n", + Response( + status_code=200, + headers=[("SomeHeader1", "val1"), ("SomeHeader2", "val2")], + http_version="1.1", + reason="OK", + ), + ) + + # mixed \r\n and \n + tr( + READERS[SERVER, SEND_RESPONSE], + b"HTTP/1.1 200 OK\r\nSomeHeader1: val1\nSomeHeader2: val2\n\r\n", + Response( + status_code=200, + headers=[("SomeHeader1", "val1"), ("SomeHeader2", "val2")], + http_version="1.1", + reason="OK", + ), + ) + + # obsolete line folding + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" + b"Host: example.com\r\n" + b"Some: multi-line\r\n" + b" header\r\n" + b"\tnonsense\r\n" + b" \t \t\tI guess\r\n" + b"Connection: close\r\n" + b"More-nonsense: in the\r\n" + b" last header \r\n\r\n", + Request( + method="HEAD", + target="/foo", + headers=[ + ("Host", "example.com"), + ("Some", "multi-line header nonsense I guess"), + ("Connection", "close"), + ("More-nonsense", "in the last header"), + ], + ), + ) + + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" b" folded: line\r\n\r\n", + None, + ) + + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" b"foo : line\r\n\r\n", + None, + ) + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" b"foo\t: line\r\n\r\n", + None, + ) + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" b"foo\t: line\r\n\r\n", + None, + ) + with pytest.raises(LocalProtocolError): + tr(READERS[CLIENT, IDLE], b"HEAD /foo HTTP/1.1\r\n" b": line\r\n\r\n", None) + + +def test__obsolete_line_fold_bytes() -> None: + # _obsolete_line_fold has a defensive cast to bytearray, which is + # necessary to protect against O(n^2) behavior in case anyone ever passes + # in regular bytestrings... but right now we never pass in regular + # bytestrings. so this test just exists to get some coverage on that + # defensive cast. + assert list(_obsolete_line_fold([b"aaa", b"bbb", b" ccc", b"ddd"])) == [ + b"aaa", + bytearray(b"bbb ccc"), + b"ddd", + ] + + +def _run_reader_iter( + reader: Any, buf: bytes, do_eof: bool +) -> Generator[Any, None, None]: + while True: + event = reader(buf) + if event is None: + break + yield event + # body readers have undefined behavior after returning EndOfMessage, + # because this changes the state so they don't get called again + if type(event) is EndOfMessage: + break + if do_eof: + assert not buf + yield reader.read_eof() + + +def _run_reader(*args: Any) -> List[Event]: + events = list(_run_reader_iter(*args)) + return normalize_data_events(events) + + +def t_body_reader(thunk: Any, data: bytes, expected: Any, do_eof: bool = False) -> None: + # Simple: consume whole thing + print("Test 1") + buf = makebuf(data) + assert _run_reader(thunk(), buf, do_eof) == expected + + # Incrementally growing buffer + print("Test 2") + reader = thunk() + buf = ReceiveBuffer() + events = [] + for i in range(len(data)): + events += _run_reader(reader, buf, False) + buf += data[i : i + 1] + events += _run_reader(reader, buf, do_eof) + assert normalize_data_events(events) == expected + + is_complete = any(type(event) is EndOfMessage for event in expected) + if is_complete and not do_eof: + buf = makebuf(data + b"trailing") + assert _run_reader(thunk(), buf, False) == expected + + +def test_ContentLengthReader() -> None: + t_body_reader(lambda: ContentLengthReader(0), b"", [EndOfMessage()]) + + t_body_reader( + lambda: ContentLengthReader(10), + b"0123456789", + [Data(data=b"0123456789"), EndOfMessage()], + ) + + +def test_Http10Reader() -> None: + t_body_reader(Http10Reader, b"", [EndOfMessage()], do_eof=True) + t_body_reader(Http10Reader, b"asdf", [Data(data=b"asdf")], do_eof=False) + t_body_reader( + Http10Reader, b"asdf", [Data(data=b"asdf"), EndOfMessage()], do_eof=True + ) + + +def test_ChunkedReader() -> None: + t_body_reader(ChunkedReader, b"0\r\n\r\n", [EndOfMessage()]) + + t_body_reader( + ChunkedReader, + b"0\r\nSome: header\r\n\r\n", + [EndOfMessage(headers=[("Some", "header")])], + ) + + t_body_reader( + ChunkedReader, + b"5\r\n01234\r\n" + + b"10\r\n0123456789abcdef\r\n" + + b"0\r\n" + + b"Some: header\r\n\r\n", + [ + Data(data=b"012340123456789abcdef"), + EndOfMessage(headers=[("Some", "header")]), + ], + ) + + t_body_reader( + ChunkedReader, + b"5\r\n01234\r\n" + b"10\r\n0123456789abcdef\r\n" + b"0\r\n\r\n", + [Data(data=b"012340123456789abcdef"), EndOfMessage()], + ) + + # handles upper and lowercase hex + t_body_reader( + ChunkedReader, + b"aA\r\n" + b"x" * 0xAA + b"\r\n" + b"0\r\n\r\n", + [Data(data=b"x" * 0xAA), EndOfMessage()], + ) + + # refuses arbitrarily long chunk integers + with pytest.raises(LocalProtocolError): + # Technically this is legal HTTP/1.1, but we refuse to process chunk + # sizes that don't fit into 20 characters of hex + t_body_reader(ChunkedReader, b"9" * 100 + b"\r\nxxx", [Data(data=b"xxx")]) + + # refuses garbage in the chunk count + with pytest.raises(LocalProtocolError): + t_body_reader(ChunkedReader, b"10\x00\r\nxxx", None) + + # handles (and discards) "chunk extensions" omg wtf + t_body_reader( + ChunkedReader, + b"5; hello=there\r\n" + + b"xxxxx" + + b"\r\n" + + b'0; random="junk"; some=more; canbe=lonnnnngg\r\n\r\n', + [Data(data=b"xxxxx"), EndOfMessage()], + ) + + t_body_reader( + ChunkedReader, + b"5 \r\n01234\r\n" + b"0\r\n\r\n", + [Data(data=b"01234"), EndOfMessage()], + ) + + +def test_ContentLengthWriter() -> None: + w = ContentLengthWriter(5) + assert dowrite(w, Data(data=b"123")) == b"123" + assert dowrite(w, Data(data=b"45")) == b"45" + assert dowrite(w, EndOfMessage()) == b"" + + w = ContentLengthWriter(5) + with pytest.raises(LocalProtocolError): + dowrite(w, Data(data=b"123456")) + + w = ContentLengthWriter(5) + dowrite(w, Data(data=b"123")) + with pytest.raises(LocalProtocolError): + dowrite(w, Data(data=b"456")) + + w = ContentLengthWriter(5) + dowrite(w, Data(data=b"123")) + with pytest.raises(LocalProtocolError): + dowrite(w, EndOfMessage()) + + w = ContentLengthWriter(5) + dowrite(w, Data(data=b"123")) == b"123" + dowrite(w, Data(data=b"45")) == b"45" + with pytest.raises(LocalProtocolError): + dowrite(w, EndOfMessage(headers=[("Etag", "asdf")])) + + +def test_ChunkedWriter() -> None: + w = ChunkedWriter() + assert dowrite(w, Data(data=b"aaa")) == b"3\r\naaa\r\n" + assert dowrite(w, Data(data=b"a" * 20)) == b"14\r\n" + b"a" * 20 + b"\r\n" + + assert dowrite(w, Data(data=b"")) == b"" + + assert dowrite(w, EndOfMessage()) == b"0\r\n\r\n" + + assert ( + dowrite(w, EndOfMessage(headers=[("Etag", "asdf"), ("a", "b")])) + == b"0\r\nEtag: asdf\r\na: b\r\n\r\n" + ) + + +def test_Http10Writer() -> None: + w = Http10Writer() + assert dowrite(w, Data(data=b"1234")) == b"1234" + assert dowrite(w, EndOfMessage()) == b"" + + with pytest.raises(LocalProtocolError): + dowrite(w, EndOfMessage(headers=[("Etag", "asdf")])) + + +def test_reject_garbage_after_request_line() -> None: + with pytest.raises(LocalProtocolError): + tr(READERS[SERVER, SEND_RESPONSE], b"HTTP/1.0 200 OK\x00xxxx\r\n\r\n", None) + + +def test_reject_garbage_after_response_line() -> None: + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1 xxxxxx\r\n" b"Host: a\r\n\r\n", + None, + ) + + +def test_reject_garbage_in_header_line() -> None: + with pytest.raises(LocalProtocolError): + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" b"Host: foo\x00bar\r\n\r\n", + None, + ) + + +def test_reject_non_vchar_in_path() -> None: + for bad_char in b"\x00\x20\x7f\xee": + message = bytearray(b"HEAD /") + message.append(bad_char) + message.extend(b" HTTP/1.1\r\nHost: foobar\r\n\r\n") + with pytest.raises(LocalProtocolError): + tr(READERS[CLIENT, IDLE], message, None) + + +# https://github.com/python-hyper/h11/issues/57 +def test_allow_some_garbage_in_cookies() -> None: + tr( + READERS[CLIENT, IDLE], + b"HEAD /foo HTTP/1.1\r\n" + b"Host: foo\r\n" + b"Set-Cookie: ___utmvafIumyLc=kUd\x01UpAt; path=/; Max-Age=900\r\n" + b"\r\n", + Request( + method="HEAD", + target="/foo", + headers=[ + ("Host", "foo"), + ("Set-Cookie", "___utmvafIumyLc=kUd\x01UpAt; path=/; Max-Age=900"), + ], + ), + ) + + +def test_host_comes_first() -> None: + tw( + write_headers, + normalize_and_validate([("foo", "bar"), ("Host", "example.com")]), + b"Host: example.com\r\nfoo: bar\r\n\r\n", + ) diff --git a/env/lib/python3.10/site-packages/h11/tests/test_receivebuffer.py b/env/lib/python3.10/site-packages/h11/tests/test_receivebuffer.py new file mode 100644 index 0000000..21a3870 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/tests/test_receivebuffer.py @@ -0,0 +1,135 @@ +import re +from typing import Tuple + +import pytest + +from .._receivebuffer import ReceiveBuffer + + +def test_receivebuffer() -> None: + b = ReceiveBuffer() + assert not b + assert len(b) == 0 + assert bytes(b) == b"" + + b += b"123" + assert b + assert len(b) == 3 + assert bytes(b) == b"123" + + assert bytes(b) == b"123" + + assert b.maybe_extract_at_most(2) == b"12" + assert b + assert len(b) == 1 + assert bytes(b) == b"3" + + assert bytes(b) == b"3" + + assert b.maybe_extract_at_most(10) == b"3" + assert bytes(b) == b"" + + assert b.maybe_extract_at_most(10) is None + assert not b + + ################################################################ + # maybe_extract_until_next + ################################################################ + + b += b"123\n456\r\n789\r\n" + + assert b.maybe_extract_next_line() == b"123\n456\r\n" + assert bytes(b) == b"789\r\n" + + assert b.maybe_extract_next_line() == b"789\r\n" + assert bytes(b) == b"" + + b += b"12\r" + assert b.maybe_extract_next_line() is None + assert bytes(b) == b"12\r" + + b += b"345\n\r" + assert b.maybe_extract_next_line() is None + assert bytes(b) == b"12\r345\n\r" + + # here we stopped at the middle of b"\r\n" delimiter + + b += b"\n6789aaa123\r\n" + assert b.maybe_extract_next_line() == b"12\r345\n\r\n" + assert b.maybe_extract_next_line() == b"6789aaa123\r\n" + assert b.maybe_extract_next_line() is None + assert bytes(b) == b"" + + ################################################################ + # maybe_extract_lines + ################################################################ + + b += b"123\r\na: b\r\nfoo:bar\r\n\r\ntrailing" + lines = b.maybe_extract_lines() + assert lines == [b"123", b"a: b", b"foo:bar"] + assert bytes(b) == b"trailing" + + assert b.maybe_extract_lines() is None + + b += b"\r\n\r" + assert b.maybe_extract_lines() is None + + assert b.maybe_extract_at_most(100) == b"trailing\r\n\r" + assert not b + + # Empty body case (as happens at the end of chunked encoding if there are + # no trailing headers, e.g.) + b += b"\r\ntrailing" + assert b.maybe_extract_lines() == [] + assert bytes(b) == b"trailing" + + +@pytest.mark.parametrize( + "data", + [ + pytest.param( + ( + b"HTTP/1.1 200 OK\r\n", + b"Content-type: text/plain\r\n", + b"Connection: close\r\n", + b"\r\n", + b"Some body", + ), + id="with_crlf_delimiter", + ), + pytest.param( + ( + b"HTTP/1.1 200 OK\n", + b"Content-type: text/plain\n", + b"Connection: close\n", + b"\n", + b"Some body", + ), + id="with_lf_only_delimiter", + ), + pytest.param( + ( + b"HTTP/1.1 200 OK\n", + b"Content-type: text/plain\r\n", + b"Connection: close\n", + b"\n", + b"Some body", + ), + id="with_mixed_crlf_and_lf", + ), + ], +) +def test_receivebuffer_for_invalid_delimiter(data: Tuple[bytes]) -> None: + b = ReceiveBuffer() + + for line in data: + b += line + + lines = b.maybe_extract_lines() + + assert lines == [ + b"HTTP/1.1 200 OK", + b"Content-type: text/plain", + b"Connection: close", + ] + assert bytes(b) == b"Some body" diff --git a/env/lib/python3.10/site-packages/h11/tests/test_state.py b/env/lib/python3.10/site-packages/h11/tests/test_state.py new file mode 100644 index 0000000..bc974e6 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/tests/test_state.py @@ -0,0 +1,271 @@ +import pytest + +from .._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from .._state import ( + _SWITCH_CONNECT, + _SWITCH_UPGRADE, + CLIENT, + CLOSED, + ConnectionState, + DONE, + IDLE, + MIGHT_SWITCH_PROTOCOL, + MUST_CLOSE, + SEND_BODY, + SEND_RESPONSE, + SERVER, + SWITCHED_PROTOCOL, +) +from .._util import LocalProtocolError + + +def test_ConnectionState() -> None: + cs = ConnectionState() + + # Basic event-triggered transitions + + assert cs.states == {CLIENT: IDLE, SERVER: IDLE} + + cs.process_event(CLIENT, Request) + # The SERVER-Request special case: + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + # Illegal transitions raise an error and nothing happens + with pytest.raises(LocalProtocolError): + cs.process_event(CLIENT, Request) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + cs.process_event(SERVER, InformationalResponse) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + cs.process_event(SERVER, Response) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_BODY} + + cs.process_event(CLIENT, EndOfMessage) + cs.process_event(SERVER, EndOfMessage) + assert cs.states == {CLIENT: DONE, SERVER: DONE} + + # State-triggered transition + + cs.process_event(SERVER, ConnectionClosed) + assert cs.states == {CLIENT: MUST_CLOSE, SERVER: CLOSED} + + +def test_ConnectionState_keep_alive() -> None: + # keep_alive = False + cs = ConnectionState() + cs.process_event(CLIENT, Request) + cs.process_keep_alive_disabled() + cs.process_event(CLIENT, EndOfMessage) + assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_RESPONSE} + + cs.process_event(SERVER, Response) + cs.process_event(SERVER, EndOfMessage) + assert cs.states == {CLIENT: MUST_CLOSE, SERVER: MUST_CLOSE} + + +def test_ConnectionState_keep_alive_in_DONE() -> None: + # Check that if keep_alive is disabled when the CLIENT is already in DONE, + # then this is sufficient to immediately trigger the DONE -> MUST_CLOSE + # transition + cs = ConnectionState() + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + assert cs.states[CLIENT] is DONE + cs.process_keep_alive_disabled() + assert cs.states[CLIENT] is MUST_CLOSE + + +def test_ConnectionState_switch_denied() -> None: + for switch_type in (_SWITCH_CONNECT, _SWITCH_UPGRADE): + for deny_early in (True, False): + cs = ConnectionState() + cs.process_client_switch_proposal(switch_type) + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, Data) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + assert switch_type in cs.pending_switch_proposals + + if deny_early: + # before client reaches DONE + cs.process_event(SERVER, Response) + assert not cs.pending_switch_proposals + + cs.process_event(CLIENT, EndOfMessage) + + if deny_early: + assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY} + else: + assert cs.states == { + CLIENT: MIGHT_SWITCH_PROTOCOL, + SERVER: SEND_RESPONSE, + } + + cs.process_event(SERVER, InformationalResponse) + assert cs.states == { + CLIENT: MIGHT_SWITCH_PROTOCOL, + SERVER: SEND_RESPONSE, + } + + cs.process_event(SERVER, Response) + assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY} + assert not cs.pending_switch_proposals + + +_response_type_for_switch = { + _SWITCH_UPGRADE: InformationalResponse, + _SWITCH_CONNECT: Response, + None: Response, +} + + +def test_ConnectionState_protocol_switch_accepted() -> None: + for switch_event in [_SWITCH_UPGRADE, _SWITCH_CONNECT]: + cs = ConnectionState() + cs.process_client_switch_proposal(switch_event) + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, Data) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + cs.process_event(CLIENT, EndOfMessage) + assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} + + cs.process_event(SERVER, InformationalResponse) + assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} + + cs.process_event(SERVER, _response_type_for_switch[switch_event], switch_event) + assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL} + + +def test_ConnectionState_double_protocol_switch() -> None: + # CONNECT + Upgrade is legal! Very silly, but legal. So we support + # it. Because sometimes doing the silly thing is easier than not. + for server_switch in [None, _SWITCH_UPGRADE, _SWITCH_CONNECT]: + cs = ConnectionState() + cs.process_client_switch_proposal(_SWITCH_UPGRADE) + cs.process_client_switch_proposal(_SWITCH_CONNECT) + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} + cs.process_event( + SERVER, _response_type_for_switch[server_switch], server_switch + ) + if server_switch is None: + assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY} + else: + assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL} + + +def test_ConnectionState_inconsistent_protocol_switch() -> None: + for client_switches, server_switch in [ + ([], _SWITCH_CONNECT), + ([], _SWITCH_UPGRADE), + ([_SWITCH_UPGRADE], _SWITCH_CONNECT), + ([_SWITCH_CONNECT], _SWITCH_UPGRADE), + ]: + cs = ConnectionState() + for client_switch in client_switches: # type: ignore[attr-defined] + cs.process_client_switch_proposal(client_switch) + cs.process_event(CLIENT, Request) + with pytest.raises(LocalProtocolError): + cs.process_event(SERVER, Response, server_switch) + + +def test_ConnectionState_keepalive_protocol_switch_interaction() -> None: + # keep_alive=False + pending_switch_proposals + cs = ConnectionState() + cs.process_client_switch_proposal(_SWITCH_UPGRADE) + cs.process_event(CLIENT, Request) + cs.process_keep_alive_disabled() + cs.process_event(CLIENT, Data) + assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE} + + # the protocol switch "wins" + cs.process_event(CLIENT, EndOfMessage) + assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE} + + # but when the server denies the request, keep_alive comes back into play + cs.process_event(SERVER, Response) + assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_BODY} + + +def test_ConnectionState_reuse() -> None: + cs = ConnectionState() + + with pytest.raises(LocalProtocolError): + cs.start_next_cycle() + + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + + with pytest.raises(LocalProtocolError): + cs.start_next_cycle() + + cs.process_event(SERVER, Response) + cs.process_event(SERVER, EndOfMessage) + + cs.start_next_cycle() + assert cs.states == {CLIENT: IDLE, SERVER: IDLE} + + # No keepalive + + cs.process_event(CLIENT, Request) + cs.process_keep_alive_disabled() + cs.process_event(CLIENT, EndOfMessage) + cs.process_event(SERVER, Response) + cs.process_event(SERVER, EndOfMessage) + + with pytest.raises(LocalProtocolError): + cs.start_next_cycle() + + # One side closed + + cs = ConnectionState() + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + cs.process_event(CLIENT, ConnectionClosed) + cs.process_event(SERVER, Response) + cs.process_event(SERVER, EndOfMessage) + + with pytest.raises(LocalProtocolError): + cs.start_next_cycle() + + # Succesful protocol switch + + cs = ConnectionState() + cs.process_client_switch_proposal(_SWITCH_UPGRADE) + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + cs.process_event(SERVER, InformationalResponse, _SWITCH_UPGRADE) + + with pytest.raises(LocalProtocolError): + cs.start_next_cycle() + + # Failed protocol switch + + cs = ConnectionState() + cs.process_client_switch_proposal(_SWITCH_UPGRADE) + cs.process_event(CLIENT, Request) + cs.process_event(CLIENT, EndOfMessage) + cs.process_event(SERVER, Response) + cs.process_event(SERVER, EndOfMessage) + + cs.start_next_cycle() + assert cs.states == {CLIENT: IDLE, SERVER: IDLE} + + +def test_server_request_is_illegal() -> None: + # There used to be a bug in how we handled the Request special case that + # made this allowed... + cs = ConnectionState() + with pytest.raises(LocalProtocolError): + cs.process_event(SERVER, Request) diff --git a/env/lib/python3.10/site-packages/h11/tests/test_util.py b/env/lib/python3.10/site-packages/h11/tests/test_util.py new file mode 100644 index 0000000..79bc095 --- /dev/null +++ b/env/lib/python3.10/site-packages/h11/tests/test_util.py @@ -0,0 +1,112 @@ +import re +import sys +import traceback +from typing import NoReturn + +import pytest + +from .._util import ( + bytesify, + LocalProtocolError, + ProtocolError, + RemoteProtocolError, + Sentinel, + validate, +) + + +def test_ProtocolError() -> None: + with pytest.raises(TypeError): + ProtocolError("abstract base class") + + +def test_LocalProtocolError() -> None: + try: + raise LocalProtocolError("foo") + except LocalProtocolError as e: + assert str(e) == "foo" + assert e.error_status_hint == 400 + + try: + raise LocalProtocolError("foo", error_status_hint=418) + except LocalProtocolError as e: + assert str(e) == "foo" + assert e.error_status_hint == 418 + + def thunk() -> NoReturn: + raise LocalProtocolError("a", error_status_hint=420) + + try: + try: + thunk() + except LocalProtocolError as exc1: + orig_traceback = "".join(traceback.format_tb(sys.exc_info()[2])) + exc1._reraise_as_remote_protocol_error() + except RemoteProtocolError as exc2: + assert type(exc2) is RemoteProtocolError + assert exc2.args == ("a",) + assert exc2.error_status_hint == 420 + new_traceback = "".join(traceback.format_tb(sys.exc_info()[2])) + assert new_traceback.endswith(orig_traceback) + + +def test_validate() -> None: + my_re = re.compile(rb"(?P[0-9]+)\.(?P[0-9]+)") + with pytest.raises(LocalProtocolError): + validate(my_re, b"0.") + + groups = validate(my_re, b"0.1") + assert groups == {"group1": b"0", "group2": b"1"} + + # successful partial matches are an error - must match whole string + with pytest.raises(LocalProtocolError): + validate(my_re, b"0.1xx") + with pytest.raises(LocalProtocolError): + validate(my_re, b"0.1\n") + + +def test_validate_formatting() -> None: + my_re = re.compile(rb"foo") + + with pytest.raises(LocalProtocolError) as excinfo: + validate(my_re, b"", "oops") + assert "oops" in str(excinfo.value) + + with pytest.raises(LocalProtocolError) as excinfo: + validate(my_re, b"", "oops {}") + assert "oops {}" in str(excinfo.value) + + with pytest.raises(LocalProtocolError) as excinfo: + validate(my_re, b"", "oops {} xx", 10) + assert "oops 10 xx" in str(excinfo.value) + + +def test_make_sentinel() -> None: + class S(Sentinel, metaclass=Sentinel): + pass + + assert repr(S) == "S" + assert S == S + assert type(S).__name__ == "S" + assert S in {S} + assert type(S) is S + + class S2(Sentinel, metaclass=Sentinel): + pass + + assert repr(S2) == "S2" + assert S != S2 + assert S not in {S2} + assert type(S) is not type(S2) + + +def test_bytesify() -> None: + assert bytesify(b"123") == b"123" + assert bytesify(bytearray(b"123")) == b"123" + assert bytesify("123") == b"123" + + with pytest.raises(UnicodeEncodeError): + bytesify("\u1234") + + with pytest.raises(TypeError): + bytesify(10) diff --git a/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/INSTALLER b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/LICENSE b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/LICENSE new file mode 100644 index 0000000..c22109a --- /dev/null +++ b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Miguel Grinberg + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/METADATA b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/METADATA new file mode 100644 index 0000000..d6c227b --- /dev/null +++ b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/METADATA @@ -0,0 +1,48 @@ +Metadata-Version: 2.1 +Name: python-engineio +Version: 4.11.2 +Summary: Engine.IO server and client for Python +Author-email: Miguel Grinberg +Project-URL: Homepage, https://github.com/miguelgrinberg/python-engineio +Project-URL: Bug Tracker, https://github.com/miguelgrinberg/python-engineio/issues +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: simple-websocket>=0.10.0 +Provides-Extra: client +Requires-Dist: requests>=2.21.0; extra == "client" +Requires-Dist: websocket-client>=0.54.0; extra == "client" +Provides-Extra: asyncio-client +Requires-Dist: aiohttp>=3.4; extra == "asyncio-client" +Provides-Extra: docs +Requires-Dist: sphinx; extra == "docs" + +python-engineio +=============== + +[![Build status](https://github.com/miguelgrinberg/python-engineio/workflows/build/badge.svg)](https://github.com/miguelgrinberg/python-engineio/actions) [![codecov](https://codecov.io/gh/miguelgrinberg/python-engineio/branch/main/graph/badge.svg)](https://codecov.io/gh/miguelgrinberg/python-engineio) + +Python implementation of the `Engine.IO` realtime client and server. + +Sponsors +-------- + +The following organizations are funding this project: + +![Socket.IO](https://images.opencollective.com/socketio/050e5eb/logo/64.png)
[Socket.IO](https://socket.io) | [Add your company here!](https://github.com/sponsors/miguelgrinberg)| +-|- + +Many individual sponsors also support this project through small ongoing contributions. Why not [join them](https://github.com/sponsors/miguelgrinberg)? + +Resources +--------- + +- [Documentation](https://python-engineio.readthedocs.io/) +- [PyPI](https://pypi.python.org/pypi/python-engineio) +- [Change Log](https://github.com/miguelgrinberg/python-engineio/blob/main/CHANGES.md) +- Questions? See the [questions](https://stackoverflow.com/questions/tagged/python-socketio) others have asked on Stack Overflow, or [ask](https://stackoverflow.com/questions/ask?tags=python+python-socketio) your own question. diff --git a/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/RECORD b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/RECORD new file mode 100644 index 0000000..aeaaaeb --- /dev/null +++ b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/RECORD @@ -0,0 +1,58 @@ +engineio/__init__.py,sha256=0R2PY1EXu3sicP7mkA0_QxEVGRlFlgvsxfhByqREE1A,481 +engineio/__pycache__/__init__.cpython-310.pyc,, +engineio/__pycache__/async_client.cpython-310.pyc,, +engineio/__pycache__/async_server.cpython-310.pyc,, +engineio/__pycache__/async_socket.cpython-310.pyc,, +engineio/__pycache__/base_client.cpython-310.pyc,, +engineio/__pycache__/base_server.cpython-310.pyc,, +engineio/__pycache__/base_socket.cpython-310.pyc,, +engineio/__pycache__/client.cpython-310.pyc,, +engineio/__pycache__/exceptions.cpython-310.pyc,, +engineio/__pycache__/json.cpython-310.pyc,, +engineio/__pycache__/middleware.cpython-310.pyc,, +engineio/__pycache__/packet.cpython-310.pyc,, +engineio/__pycache__/payload.cpython-310.pyc,, +engineio/__pycache__/server.cpython-310.pyc,, +engineio/__pycache__/socket.cpython-310.pyc,, +engineio/__pycache__/static_files.cpython-310.pyc,, +engineio/async_client.py,sha256=ZrK9j_sUNKRwjqeT6W26d1TtjoGAp2nULhIbX_1iivs,29446 +engineio/async_drivers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +engineio/async_drivers/__pycache__/__init__.cpython-310.pyc,, +engineio/async_drivers/__pycache__/_websocket_wsgi.cpython-310.pyc,, +engineio/async_drivers/__pycache__/aiohttp.cpython-310.pyc,, +engineio/async_drivers/__pycache__/asgi.cpython-310.pyc,, +engineio/async_drivers/__pycache__/eventlet.cpython-310.pyc,, +engineio/async_drivers/__pycache__/gevent.cpython-310.pyc,, +engineio/async_drivers/__pycache__/gevent_uwsgi.cpython-310.pyc,, +engineio/async_drivers/__pycache__/sanic.cpython-310.pyc,, +engineio/async_drivers/__pycache__/threading.cpython-310.pyc,, +engineio/async_drivers/__pycache__/tornado.cpython-310.pyc,, +engineio/async_drivers/_websocket_wsgi.py,sha256=LuOEfKhbAw8SplB5PMpYKIUqfCPEadQEpqeiq_leOIA,949 +engineio/async_drivers/aiohttp.py,sha256=OBDGhaNXWHxQkwhzZT2vlTAOqWReGS6Sjk9u3BEh_Mc,3754 +engineio/async_drivers/asgi.py,sha256=mBu109j7R6esH-wI62jvTnfVAEjWvw2YNp0dZ74NyHg,11210 +engineio/async_drivers/eventlet.py,sha256=n1y4OjPdj4J2GIep5N56O29oa5NQgFJVcTBjyO1C-Gs,1735 +engineio/async_drivers/gevent.py,sha256=hnJHeWdDQE2jfoLCP5DnwVPzsQlcTLJUMA5EVf1UL-k,2962 +engineio/async_drivers/gevent_uwsgi.py,sha256=m6ay5dov9FDQl0fbeiKeE-Orh5LiF6zLlYQ64Oa3T5g,5954 +engineio/async_drivers/sanic.py,sha256=GYX8YWR1GbRm-GkMTAQkfkWbY12MOT1IV2DzH0Xx8Ns,4495 +engineio/async_drivers/threading.py,sha256=ywmG59d4H6OHZjKarBN97-9BHEsRxFEz9YN-E9QAu_I,463 +engineio/async_drivers/tornado.py,sha256=mbVHs1mECfzFSNv33uigkpTBtNPT0u49k5zaybewdIo,5893 +engineio/async_server.py,sha256=8Af_uwf8mKOCJGqVKa3QN0kz7_B2K1cdpSmsvkAoBR4,27412 +engineio/async_socket.py,sha256=nHY0DPPk0FtI9djUnQWtzZ3ce2OD184Tu-Dop7JLg9I,10715 +engineio/base_client.py,sha256=uEI6OglvhdIGD1DAZuptbn0KXQMk_o3w0AH4wJvPwZ0,5322 +engineio/base_server.py,sha256=H-aKQ9hMQjiIrjDaqbw_edw9yUVIbCQMzyIy9egLv-g,14458 +engineio/base_socket.py,sha256=sQqbNSfGhMQG3xzwar6IXMal28C7Q5TIAQRGp74Wt2o,399 +engineio/client.py,sha256=wfgB_MrpEh-v4dBt6W2eW6i0CRNEP0CIJ6d4Ap4CnZE,27227 +engineio/exceptions.py,sha256=FyuMb5qhX9CUYP3fEoe1m-faU96ApdQTSbblaaoo8LA,292 +engineio/json.py,sha256=SG5FTojqd1ix6u0dKXJsZVqqdYioZLO4S2GPL7BKl3U,405 +engineio/middleware.py,sha256=5NKBXz-ftuFErUB_V9IDvRHaSOsjhtW-NnuJtquB1nc,3750 +engineio/packet.py,sha256=Tejm9U5JcYs5LwZ_n_Xh0PIRv-U_JbHwGEivNXQN4eg,3181 +engineio/payload.py,sha256=GIWu0Vnay4WNZlDxHqVgP34tKTBXX58OArJ-mO5zD3E,1539 +engineio/server.py,sha256=4_xdtH0tyusigQurwOeDFrHtodthFTOIbOVZ-_ckh5U,22957 +engineio/socket.py,sha256=Oaw1E7ZDyOCaS7KV151g1u9rqOf1JJHh5gttEU0cSeA,10342 +engineio/static_files.py,sha256=pwez9LQFaSQXMbtI0vLyD6UDiokQ4rNfmRYgVLKOthc,2064 +python_engineio-4.11.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +python_engineio-4.11.2.dist-info/LICENSE,sha256=yel9Pbwfu82094CLKCzWRtuIev9PUxP-a76NTDFAWpw,1082 +python_engineio-4.11.2.dist-info/METADATA,sha256=WfrX2DOOQdxr6FoiFDIuNe8lorFgUdilJYBfWuD2-Kw,2237 +python_engineio-4.11.2.dist-info/RECORD,, +python_engineio-4.11.2.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91 +python_engineio-4.11.2.dist-info/top_level.txt,sha256=u8PmNisCZLwRYcWrNLe9wutQ2tt4zNi8IH362c-HWuA,9 diff --git a/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/WHEEL b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/WHEEL new file mode 100644 index 0000000..ae527e7 --- /dev/null +++ b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.6.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/top_level.txt b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/top_level.txt new file mode 100755 index 0000000..8f23d7e --- /dev/null +++ b/env/lib/python3.10/site-packages/python_engineio-4.11.2.dist-info/top_level.txt @@ -0,0 +1 @@ +engineio diff --git a/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/INSTALLER b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/LICENSE b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/LICENSE new file mode 100644 index 0000000..c22109a --- /dev/null +++ b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Miguel Grinberg + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/METADATA b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/METADATA new file mode 100644 index 0000000..7135f4f --- /dev/null +++ b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/METADATA @@ -0,0 +1,71 @@ +Metadata-Version: 2.1 +Name: python-socketio +Version: 5.12.1 +Summary: Socket.IO server and client for Python +Author-email: Miguel Grinberg +Project-URL: Homepage, https://github.com/miguelgrinberg/python-socketio +Project-URL: Bug Tracker, https://github.com/miguelgrinberg/python-socketio/issues +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: bidict>=0.21.0 +Requires-Dist: python-engineio>=4.11.0 +Provides-Extra: client +Requires-Dist: requests>=2.21.0; extra == "client" +Requires-Dist: websocket-client>=0.54.0; extra == "client" +Provides-Extra: asyncio-client +Requires-Dist: aiohttp>=3.4; extra == "asyncio-client" +Provides-Extra: docs +Requires-Dist: sphinx; extra == "docs" + +python-socketio +=============== + +[![Build status](https://github.com/miguelgrinberg/python-socketio/workflows/build/badge.svg)](https://github.com/miguelgrinberg/python-socketio/actions) [![codecov](https://codecov.io/gh/miguelgrinberg/python-socketio/branch/main/graph/badge.svg)](https://codecov.io/gh/miguelgrinberg/python-socketio) + +Python implementation of the `Socket.IO` realtime client and server. + +Sponsors +-------- + +The following organizations are funding this project: + +![Socket.IO](https://images.opencollective.com/socketio/050e5eb/logo/64.png)
[Socket.IO](https://socket.io) | [Add your company here!](https://github.com/sponsors/miguelgrinberg)| +-|- + +Many individual sponsors also support this project through small ongoing contributions. Why not [join them](https://github.com/sponsors/miguelgrinberg)? + +Version compatibility +--------------------- + +The Socket.IO protocol has been through a number of revisions, and some of these +introduced backward incompatible changes, which means that the client and the +server must use compatible versions for everything to work. + +If you are using the Python client and server, the easiest way to ensure compatibility +is to use the same version of this package for the client and the server. If you are +using this package with a different client or server, then you must ensure the +versions are compatible. + +The version compatibility chart below maps versions of this package to versions +of the JavaScript reference implementation and the versions of the Socket.IO and +Engine.IO protocols. + +JavaScript Socket.IO version | Socket.IO protocol revision | Engine.IO protocol revision | python-socketio version +-|-|-|- +0.9.x | 1, 2 | 1, 2 | Not supported +1.x and 2.x | 3, 4 | 3 | 4.x +3.x and 4.x | 5 | 4 | 5.x + +Resources +--------- + +- [Documentation](http://python-socketio.readthedocs.io/) +- [PyPI](https://pypi.python.org/pypi/python-socketio) +- [Change Log](https://github.com/miguelgrinberg/python-socketio/blob/main/CHANGES.md) +- Questions? See the [questions](https://stackoverflow.com/questions/tagged/python-socketio) others have asked on Stack Overflow, or [ask](https://stackoverflow.com/questions/ask?tags=python+python-socketio) your own question. diff --git a/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/RECORD b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/RECORD new file mode 100644 index 0000000..e9ee36e --- /dev/null +++ b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/RECORD @@ -0,0 +1,68 @@ +python_socketio-5.12.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +python_socketio-5.12.1.dist-info/LICENSE,sha256=yel9Pbwfu82094CLKCzWRtuIev9PUxP-a76NTDFAWpw,1082 +python_socketio-5.12.1.dist-info/METADATA,sha256=Fv_RBJ7M_Ob-O245jQ0Z4TSY7kDEEoww7do3s6BfFqY,3205 +python_socketio-5.12.1.dist-info/RECORD,, +python_socketio-5.12.1.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91 +python_socketio-5.12.1.dist-info/top_level.txt,sha256=xWd-HVUanhys_VzQQTRTRZBX8W448ayFytYf1Zffivs,9 +socketio/__init__.py,sha256=DXxtwPIqHFIqV4BGTgJ86OvCXD6Mth3PxBYhFoJ1_7g,1269 +socketio/__pycache__/__init__.cpython-310.pyc,, +socketio/__pycache__/admin.cpython-310.pyc,, +socketio/__pycache__/asgi.cpython-310.pyc,, +socketio/__pycache__/async_admin.cpython-310.pyc,, +socketio/__pycache__/async_aiopika_manager.cpython-310.pyc,, +socketio/__pycache__/async_client.cpython-310.pyc,, +socketio/__pycache__/async_manager.cpython-310.pyc,, +socketio/__pycache__/async_namespace.cpython-310.pyc,, +socketio/__pycache__/async_pubsub_manager.cpython-310.pyc,, +socketio/__pycache__/async_redis_manager.cpython-310.pyc,, +socketio/__pycache__/async_server.cpython-310.pyc,, +socketio/__pycache__/async_simple_client.cpython-310.pyc,, +socketio/__pycache__/base_client.cpython-310.pyc,, +socketio/__pycache__/base_manager.cpython-310.pyc,, +socketio/__pycache__/base_namespace.cpython-310.pyc,, +socketio/__pycache__/base_server.cpython-310.pyc,, +socketio/__pycache__/client.cpython-310.pyc,, +socketio/__pycache__/exceptions.cpython-310.pyc,, +socketio/__pycache__/kafka_manager.cpython-310.pyc,, +socketio/__pycache__/kombu_manager.cpython-310.pyc,, +socketio/__pycache__/manager.cpython-310.pyc,, +socketio/__pycache__/middleware.cpython-310.pyc,, +socketio/__pycache__/msgpack_packet.cpython-310.pyc,, +socketio/__pycache__/namespace.cpython-310.pyc,, +socketio/__pycache__/packet.cpython-310.pyc,, +socketio/__pycache__/pubsub_manager.cpython-310.pyc,, +socketio/__pycache__/redis_manager.cpython-310.pyc,, +socketio/__pycache__/server.cpython-310.pyc,, +socketio/__pycache__/simple_client.cpython-310.pyc,, +socketio/__pycache__/tornado.cpython-310.pyc,, +socketio/__pycache__/zmq_manager.cpython-310.pyc,, +socketio/admin.py,sha256=pfZ7ZtcZ9-aeaFZkOR4mFhsNPcy9WjZs4_5Os6xc9tA,15966 +socketio/asgi.py,sha256=NaJtYhOswVVcwHU0zcMM5H5TrSzXq9K-CAYaeSNTZRY,2192 +socketio/async_admin.py,sha256=opwgGfkREXb_T25FL7At6hkC3hTfY33bDooyNi1Dgvw,16317 +socketio/async_aiopika_manager.py,sha256=DaBUjGRYaNIsOsk2xNjWylUsz2egmTAFFUiQkV6mNmk,5193 +socketio/async_client.py,sha256=iVXDsHiU9aohwE2QkSwUOtU8GYivCZRapolJMCWeCPY,27810 +socketio/async_manager.py,sha256=dSD2XVtWYwKHDWxAXSu4Xgqw6dXyy9P_6C8rwlguybM,4503 +socketio/async_namespace.py,sha256=pSyJjIekWgydsmQHxmJvuc_NdI8SMGjGTAatLUtRvAk,12028 +socketio/async_pubsub_manager.py,sha256=Dzt34zwWgxqGsB_61_hegSlTSZucciHX6aJrEPSuKos,11141 +socketio/async_redis_manager.py,sha256=UZXKunvbSk8neRVhGqigQF5S0WwLYTKV0BKondnV_yY,4299 +socketio/async_server.py,sha256=YrZ69AN1i8hK-TMZGtRiD6UnoQk_zwl2amHYaKk_1uI,36382 +socketio/async_simple_client.py,sha256=Dj2h0iRR1qZ4BhOV6gpzvDM0K5XO4f-vdxmISiREzhQ,8908 +socketio/base_client.py,sha256=AKwZprl7qwgdOaQwV2drBNx9bB3PBCyABm6HKton-w4,11637 +socketio/base_manager.py,sha256=vmHGHlIUDJTCdp9MIFppqFJJuoN2M1MmEWTTyV35FeY,5727 +socketio/base_namespace.py,sha256=mXECdZZ7jPLphU9yH4U4yOayqjMh6OyWgZ71mOJzl5A,970 +socketio/base_server.py,sha256=JtHtmxFjtclcdORg7FIBoMtMxiaCFnuwulXrpLUSjUE,10637 +socketio/client.py,sha256=gE8NH3oZrdwTMQN1j-D3J_opGZlYCxPyMO6m3rjFDC0,26040 +socketio/exceptions.py,sha256=c8yKss_oJl-fkL52X_AagyJecL-9Mxlgb5xDRqSz5tA,975 +socketio/kafka_manager.py,sha256=BbpNbEus0DCFXaohBAXlKoV2IHU8RhbGzpkL9QcqQNM,2388 +socketio/kombu_manager.py,sha256=MhDhnbZoncW5_Y02Ojhu8qFUFdT7STZDnLPsMUARuik,5748 +socketio/manager.py,sha256=RPYPcVBFAjN-fEtLfcsPlk6SOW_SBATvw0Tkq_PkGZw,3861 +socketio/middleware.py,sha256=P8wOgSzy3YKOcRVI-r3KNKsEejBz_f5p2wdV8ZqW12E,1591 +socketio/msgpack_packet.py,sha256=0K_XXM-OF3SdqOaLN_O5B4a1xHE6N_UhhiaRhQdseNw,514 +socketio/namespace.py,sha256=80y8BN2FFlHK8JKF1TirWvvE4pn9FkGKk14IVFkCLEs,9488 +socketio/packet.py,sha256=nYvjUEIEUMHThZj--xrmRCZX9jN1V9BwFB2GzRpDLWU,7069 +socketio/pubsub_manager.py,sha256=JCB9aaEBbEw8Or6XaosoSpO-f6p5iF_BnNJOCul7ps4,10442 +socketio/redis_manager.py,sha256=DIvqRXjsSsmvXYBwuRvEap70IFyJILLaicj1X2Hssug,4403 +socketio/server.py,sha256=laukqFmlQK24bCmGtMP9KGGGUP8CqebTO4_SJeZrMGY,34788 +socketio/simple_client.py,sha256=tZiX2sAPY66OJTIJPk-PIGQjmnmUxu3RnpgJ0nc1-y8,8326 +socketio/tornado.py,sha256=R82JCqz-E1ibZAQX708h7FX3sguCHQ1OLYpnMag-LY8,295 +socketio/zmq_manager.py,sha256=PVlx175_MqKQ6j0sqGpqbqN2vW5zf4BzviotbBQpdEE,3544 diff --git a/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/WHEEL b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/WHEEL new file mode 100644 index 0000000..ae527e7 --- /dev/null +++ b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.6.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/top_level.txt b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/top_level.txt new file mode 100644 index 0000000..b8f5d36 --- /dev/null +++ b/env/lib/python3.10/site-packages/python_socketio-5.12.1.dist-info/top_level.txt @@ -0,0 +1 @@ +socketio diff --git a/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/INSTALLER b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/LICENSE b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/LICENSE new file mode 100644 index 0000000..264533f --- /dev/null +++ b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Miguel Grinberg + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/METADATA b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/METADATA new file mode 100644 index 0000000..dfdb05e --- /dev/null +++ b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/METADATA @@ -0,0 +1,37 @@ +Metadata-Version: 2.1 +Name: simple-websocket +Version: 1.1.0 +Summary: Simple WebSocket server and client for Python +Author-email: Miguel Grinberg +Project-URL: Homepage, https://github.com/miguelgrinberg/simple-websocket +Project-URL: Bug Tracker, https://github.com/miguelgrinberg/simple-websocket/issues +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: wsproto +Provides-Extra: dev +Requires-Dist: tox ; extra == 'dev' +Requires-Dist: flake8 ; extra == 'dev' +Requires-Dist: pytest ; extra == 'dev' +Requires-Dist: pytest-cov ; extra == 'dev' +Provides-Extra: docs +Requires-Dist: sphinx ; extra == 'docs' + +simple-websocket +================ + +[![Build status](https://github.com/miguelgrinberg/simple-websocket/workflows/build/badge.svg)](https://github.com/miguelgrinberg/simple-websocket/actions) [![codecov](https://codecov.io/gh/miguelgrinberg/simple-websocket/branch/main/graph/badge.svg)](https://codecov.io/gh/miguelgrinberg/simple-websocket) + +Simple WebSocket server and client for Python. + +## Resources + +- [Documentation](http://simple-websocket.readthedocs.io/en/latest/) +- [PyPI](https://pypi.python.org/pypi/simple-websocket) +- [Change Log](https://github.com/miguelgrinberg/simple-websocket/blob/main/CHANGES.md) + diff --git a/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/RECORD b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/RECORD new file mode 100644 index 0000000..3570c31 --- /dev/null +++ b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/RECORD @@ -0,0 +1,16 @@ +simple_websocket-1.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +simple_websocket-1.1.0.dist-info/LICENSE,sha256=S4q63MXj3SnHGQW4SVKUVpnwp7pB5q-Z6rpG-qvpW7c,1072 +simple_websocket-1.1.0.dist-info/METADATA,sha256=jIZUFRCbg8Ae1BNwEipAlSMvuPJ05_bXpptlk2DHiNQ,1530 +simple_websocket-1.1.0.dist-info/RECORD,, +simple_websocket-1.1.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91 +simple_websocket-1.1.0.dist-info/top_level.txt,sha256=gslMtkYd2H3exn9JQxdAgsKBCESZDyTxmukAF9Iz5aA,17 +simple_websocket/__init__.py,sha256=EKakMkVO9vg5WlXjHEJiTwI2emAqs9q22ZxJz9vJ4co,167 +simple_websocket/__pycache__/__init__.cpython-310.pyc,, +simple_websocket/__pycache__/aiows.cpython-310.pyc,, +simple_websocket/__pycache__/asgi.cpython-310.pyc,, +simple_websocket/__pycache__/errors.cpython-310.pyc,, +simple_websocket/__pycache__/ws.cpython-310.pyc,, +simple_websocket/aiows.py,sha256=CHIBIAN2cz004S4tPeTLAcQuT9iBgw6-hA0QD_JZD1A,20978 +simple_websocket/asgi.py,sha256=ic2tmrUI-u9vjMNzjqIORc8g7pAsGwFd9YJIjppHHVU,1823 +simple_websocket/errors.py,sha256=BtR8B4OI-FL2O_VSIi9cmLMobHqcJ2FhvQnRtvvMlSo,652 +simple_websocket/ws.py,sha256=Nj7DSMnUhOXGYI9j5wvJMpm5X_c7iNDg0H7EpJQPb9o,22789 diff --git a/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/WHEEL b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/WHEEL new file mode 100644 index 0000000..dcfdc6e --- /dev/null +++ b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.1.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/top_level.txt b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/top_level.txt new file mode 100644 index 0000000..9959339 --- /dev/null +++ b/env/lib/python3.10/site-packages/simple_websocket-1.1.0.dist-info/top_level.txt @@ -0,0 +1 @@ +simple_websocket diff --git a/env/lib/python3.10/site-packages/simple_websocket/__init__.py b/env/lib/python3.10/site-packages/simple_websocket/__init__.py new file mode 100644 index 0000000..98a5cc6 --- /dev/null +++ b/env/lib/python3.10/site-packages/simple_websocket/__init__.py @@ -0,0 +1,3 @@ +from .ws import Server, Client # noqa: F401 +from .aiows import AioServer, AioClient # noqa: F401 +from .errors import ConnectionError, ConnectionClosed # noqa: F401 diff --git a/env/lib/python3.10/site-packages/simple_websocket/aiows.py b/env/lib/python3.10/site-packages/simple_websocket/aiows.py new file mode 100644 index 0000000..18ce116 --- /dev/null +++ b/env/lib/python3.10/site-packages/simple_websocket/aiows.py @@ -0,0 +1,467 @@ +import asyncio +import ssl +from time import time +from urllib.parse import urlsplit + +from wsproto import ConnectionType, WSConnection +from wsproto.events import ( + AcceptConnection, + RejectConnection, + CloseConnection, + Message, + Request, + Ping, + Pong, + TextMessage, + BytesMessage, +) +from wsproto.extensions import PerMessageDeflate +from wsproto.frame_protocol import CloseReason +from wsproto.utilities import LocalProtocolError +from .errors import ConnectionError, ConnectionClosed + + +class AioBase: + def __init__(self, connection_type=None, receive_bytes=4096, + ping_interval=None, max_message_size=None): + #: The name of the subprotocol chosen for the WebSocket connection. + self.subprotocol = None + + self.connection_type = connection_type + self.receive_bytes = receive_bytes + self.ping_interval = ping_interval + self.max_message_size = max_message_size + self.pong_received = True + self.input_buffer = [] + self.incoming_message = None + self.incoming_message_len = 0 + self.connected = False + self.is_server = (connection_type == ConnectionType.SERVER) + self.close_reason = CloseReason.NO_STATUS_RCVD + self.close_message = None + + self.rsock = None + self.wsock = None + self.event = asyncio.Event() + self.ws = None + self.task = None + + async def connect(self): + self.ws = WSConnection(self.connection_type) + await self.handshake() + + if not self.connected: # pragma: no cover + raise ConnectionError() + self.task = asyncio.create_task(self._task()) + + async def handshake(self): # pragma: no cover + # to be implemented by subclasses + pass + + async def send(self, data): + """Send data over the WebSocket connection. + + :param data: The data to send. If ``data`` is of type ``bytes``, then + a binary message is sent. Else, the message is sent in + text format. + """ + if not self.connected: + raise ConnectionClosed(self.close_reason, self.close_message) + if isinstance(data, bytes): + out_data = self.ws.send(Message(data=data)) + else: + out_data = self.ws.send(TextMessage(data=str(data))) + self.wsock.write(out_data) + + async def receive(self, timeout=None): + """Receive data over the WebSocket connection. + + :param timeout: Amount of time to wait for the data, in seconds. Set + to ``None`` (the default) to wait indefinitely. Set + to 0 to read without blocking. + + The data received is returned, as ``bytes`` or ``str``, depending on + the type of the incoming message. + """ + while self.connected and not self.input_buffer: + try: + await asyncio.wait_for(self.event.wait(), timeout=timeout) + except asyncio.TimeoutError: + return None + self.event.clear() # pragma: no cover + try: + return self.input_buffer.pop(0) + except IndexError: + pass + if not self.connected: # pragma: no cover + raise ConnectionClosed(self.close_reason, self.close_message) + + async def close(self, reason=None, message=None): + """Close the WebSocket connection. + + :param reason: A numeric status code indicating the reason of the + closure, as defined by the WebSocket specification. The + default is 1000 (normal closure). + :param message: A text message to be sent to the other side. + """ + if not self.connected: + raise ConnectionClosed(self.close_reason, self.close_message) + out_data = self.ws.send(CloseConnection( + reason or CloseReason.NORMAL_CLOSURE, message)) + try: + self.wsock.write(out_data) + except BrokenPipeError: # pragma: no cover + pass + self.connected = False + + def choose_subprotocol(self, request): # pragma: no cover + # The method should return the subprotocol to use, or ``None`` if no + # subprotocol is chosen. Can be overridden by subclasses that implement + # the server-side of the WebSocket protocol. + return None + + async def _task(self): + next_ping = None + if self.ping_interval: + next_ping = time() + self.ping_interval + + while self.connected: + try: + in_data = b'' + if next_ping: + now = time() + timed_out = True + if next_ping > now: + timed_out = False + try: + in_data = await asyncio.wait_for( + self.rsock.read(self.receive_bytes), + timeout=next_ping - now) + except asyncio.TimeoutError: + timed_out = True + if timed_out: + # we reached the timeout, we have to send a ping + if not self.pong_received: + await self.close( + reason=CloseReason.POLICY_VIOLATION, + message='Ping/Pong timeout') + break + self.pong_received = False + self.wsock.write(self.ws.send(Ping())) + next_ping = max(now, next_ping) + self.ping_interval + continue + else: + in_data = await self.rsock.read(self.receive_bytes) + if len(in_data) == 0: + raise OSError() + except (OSError, ConnectionResetError): # pragma: no cover + self.connected = False + self.event.set() + break + + self.ws.receive_data(in_data) + self.connected = await self._handle_events() + self.wsock.close() + + async def _handle_events(self): + keep_going = True + out_data = b'' + for event in self.ws.events(): + try: + if isinstance(event, Request): + self.subprotocol = self.choose_subprotocol(event) + out_data += self.ws.send(AcceptConnection( + subprotocol=self.subprotocol, + extensions=[PerMessageDeflate()])) + elif isinstance(event, CloseConnection): + if self.is_server: + out_data += self.ws.send(event.response()) + self.close_reason = event.code + self.close_message = event.reason + self.connected = False + self.event.set() + keep_going = False + elif isinstance(event, Ping): + out_data += self.ws.send(event.response()) + elif isinstance(event, Pong): + self.pong_received = True + elif isinstance(event, (TextMessage, BytesMessage)): + self.incoming_message_len += len(event.data) + if self.max_message_size and \ + self.incoming_message_len > self.max_message_size: + out_data += self.ws.send(CloseConnection( + CloseReason.MESSAGE_TOO_BIG, 'Message is too big')) + self.event.set() + keep_going = False + break + if self.incoming_message is None: + # store message as is first + # if it is the first of a group, the message will be + # converted to bytearray on arrival of the second + # part, since bytearrays are mutable and can be + # concatenated more efficiently + self.incoming_message = event.data + elif isinstance(event, TextMessage): + if not isinstance(self.incoming_message, bytearray): + # convert to bytearray and append + self.incoming_message = bytearray( + (self.incoming_message + event.data).encode()) + else: + # append to bytearray + self.incoming_message += event.data.encode() + else: + if not isinstance(self.incoming_message, bytearray): + # convert to mutable bytearray and append + self.incoming_message = bytearray( + self.incoming_message + event.data) + else: + # append to bytearray + self.incoming_message += event.data + if not event.message_finished: + continue + if isinstance(self.incoming_message, (str, bytes)): + # single part message + self.input_buffer.append(self.incoming_message) + elif isinstance(event, TextMessage): + # convert multi-part message back to text + self.input_buffer.append( + self.incoming_message.decode()) + else: + # convert multi-part message back to bytes + self.input_buffer.append(bytes(self.incoming_message)) + self.incoming_message = None + self.incoming_message_len = 0 + self.event.set() + else: # pragma: no cover + pass + except LocalProtocolError: # pragma: no cover + out_data = b'' + self.event.set() + keep_going = False + if out_data: + self.wsock.write(out_data) + return keep_going + + +class AioServer(AioBase): + """This class implements a WebSocket server. + + Instead of creating an instance of this class directly, use the + ``accept()`` class method to create individual instances of the server, + each bound to a client request. + """ + def __init__(self, request, subprotocols=None, receive_bytes=4096, + ping_interval=None, max_message_size=None): + super().__init__(connection_type=ConnectionType.SERVER, + receive_bytes=receive_bytes, + ping_interval=ping_interval, + max_message_size=max_message_size) + self.request = request + self.headers = {} + self.subprotocols = subprotocols or [] + if isinstance(self.subprotocols, str): + self.subprotocols = [self.subprotocols] + self.mode = 'unknown' + + @classmethod + async def accept(cls, aiohttp=None, asgi=None, sock=None, headers=None, + subprotocols=None, receive_bytes=4096, ping_interval=None, + max_message_size=None): + """Accept a WebSocket connection from a client. + + :param aiohttp: The request object from aiohttp. If this argument is + provided, ``asgi``, ``sock`` and ``headers`` must not + be set. + :param asgi: A (scope, receive, send) tuple from an ASGI request. If + this argument is provided, ``aiohttp``, ``sock`` and + ``headers`` must not be set. + :param sock: A connected socket to use. If this argument is provided, + ``aiohttp`` and ``asgi`` must not be set. The ``headers`` + argument must be set with the incoming request headers. + :param headers: A dictionary with the incoming request headers, when + ``sock`` is used. + :param subprotocols: A list of supported subprotocols, or ``None`` (the + default) to disable subprotocol negotiation. + :param receive_bytes: The size of the receive buffer, in bytes. The + default is 4096. + :param ping_interval: Send ping packets to clients at the requested + interval in seconds. Set to ``None`` (the + default) to disable ping/pong logic. Enable to + prevent disconnections when the line is idle for + a certain amount of time, or to detect + unresponsive clients and disconnect them. A + recommended interval is 25 seconds. + :param max_message_size: The maximum size allowed for a message, in + bytes, or ``None`` for no limit. The default + is ``None``. + """ + if aiohttp and (asgi or sock): + raise ValueError('aiohttp argument cannot be used with asgi or ' + 'sock') + if asgi and (aiohttp or sock): + raise ValueError('asgi argument cannot be used with aiohttp or ' + 'sock') + if asgi: # pragma: no cover + from .asgi import WebSocketASGI + return await WebSocketASGI.accept(asgi[0], asgi[1], asgi[2], + subprotocols=subprotocols) + + ws = cls({'aiohttp': aiohttp, 'sock': sock, 'headers': headers}, + subprotocols=subprotocols, receive_bytes=receive_bytes, + ping_interval=ping_interval, + max_message_size=max_message_size) + await ws._accept() + return ws + + async def _accept(self): + if self.request['sock']: # pragma: no cover + # custom integration, request is a tuple with (socket, headers) + sock = self.request['sock'] + self.headers = self.request['headers'] + self.mode = 'custom' + elif self.request['aiohttp']: + # default implementation, request is an aiohttp request object + sock = self.request['aiohttp'].transport.get_extra_info( + 'socket').dup() + self.headers = self.request['aiohttp'].headers + self.mode = 'aiohttp' + else: # pragma: no cover + raise ValueError('Invalid request') + self.rsock, self.wsock = await asyncio.open_connection(sock=sock) + await super().connect() + + async def handshake(self): + in_data = b'GET / HTTP/1.1\r\n' + for header, value in self.headers.items(): + in_data += f'{header}: {value}\r\n'.encode() + in_data += b'\r\n' + self.ws.receive_data(in_data) + self.connected = await self._handle_events() + + def choose_subprotocol(self, request): + """Choose a subprotocol to use for the WebSocket connection. + + The default implementation selects the first protocol requested by the + client that is accepted by the server. Subclasses can override this + method to implement a different subprotocol negotiation algorithm. + + :param request: A ``Request`` object. + + The method should return the subprotocol to use, or ``None`` if no + subprotocol is chosen. + """ + for subprotocol in request.subprotocols: + if subprotocol in self.subprotocols: + return subprotocol + return None + + +class AioClient(AioBase): + """This class implements a WebSocket client. + + Instead of creating an instance of this class directly, use the + ``connect()`` class method to create an instance that is connected to a + server. + """ + def __init__(self, url, subprotocols=None, headers=None, + receive_bytes=4096, ping_interval=None, max_message_size=None, + ssl_context=None): + super().__init__(connection_type=ConnectionType.CLIENT, + receive_bytes=receive_bytes, + ping_interval=ping_interval, + max_message_size=max_message_size) + self.url = url + self.ssl_context = ssl_context + parsed_url = urlsplit(url) + self.is_secure = parsed_url.scheme in ['https', 'wss'] + self.host = parsed_url.hostname + self.port = parsed_url.port or (443 if self.is_secure else 80) + self.path = parsed_url.path + if parsed_url.query: + self.path += '?' + parsed_url.query + self.subprotocols = subprotocols or [] + if isinstance(self.subprotocols, str): + self.subprotocols = [self.subprotocols] + + self.extra_headeers = [] + if isinstance(headers, dict): + for key, value in headers.items(): + self.extra_headeers.append((key, value)) + elif isinstance(headers, list): + self.extra_headeers = headers + + @classmethod + async def connect(cls, url, subprotocols=None, headers=None, + receive_bytes=4096, ping_interval=None, + max_message_size=None, ssl_context=None, + thread_class=None, event_class=None): + """Returns a WebSocket client connection. + + :param url: The connection URL. Both ``ws://`` and ``wss://`` URLs are + accepted. + :param subprotocols: The name of the subprotocol to use, or a list of + subprotocol names in order of preference. Set to + ``None`` (the default) to not use a subprotocol. + :param headers: A dictionary or list of tuples with additional HTTP + headers to send with the connection request. Note that + custom headers are not supported by the WebSocket + protocol, so the use of this parameter is not + recommended. + :param receive_bytes: The size of the receive buffer, in bytes. The + default is 4096. + :param ping_interval: Send ping packets to the server at the requested + interval in seconds. Set to ``None`` (the + default) to disable ping/pong logic. Enable to + prevent disconnections when the line is idle for + a certain amount of time, or to detect an + unresponsive server and disconnect. A recommended + interval is 25 seconds. In general it is + preferred to enable ping/pong on the server, and + let the client respond with pong (which it does + regardless of this setting). + :param max_message_size: The maximum size allowed for a message, in + bytes, or ``None`` for no limit. The default + is ``None``. + :param ssl_context: An ``SSLContext`` instance, if a default SSL + context isn't sufficient. + """ + ws = cls(url, subprotocols=subprotocols, headers=headers, + receive_bytes=receive_bytes, ping_interval=ping_interval, + max_message_size=max_message_size, ssl_context=ssl_context) + await ws._connect() + return ws + + async def _connect(self): + if self.is_secure: # pragma: no cover + if self.ssl_context is None: + self.ssl_context = ssl.create_default_context( + purpose=ssl.Purpose.SERVER_AUTH) + self.rsock, self.wsock = await asyncio.open_connection( + self.host, self.port, ssl=self.ssl_context) + await super().connect() + + async def handshake(self): + out_data = self.ws.send(Request(host=self.host, target=self.path, + subprotocols=self.subprotocols, + extra_headers=self.extra_headeers)) + self.wsock.write(out_data) + + while True: + in_data = await self.rsock.read(self.receive_bytes) + self.ws.receive_data(in_data) + try: + event = next(self.ws.events()) + except StopIteration: # pragma: no cover + pass + else: # pragma: no cover + break + if isinstance(event, RejectConnection): # pragma: no cover + raise ConnectionError(event.status_code) + elif not isinstance(event, AcceptConnection): # pragma: no cover + raise ConnectionError(400) + self.subprotocol = event.subprotocol + self.connected = True + + async def close(self, reason=None, message=None): + await super().close(reason=reason, message=message) + self.wsock.close() diff --git a/env/lib/python3.10/site-packages/simple_websocket/asgi.py b/env/lib/python3.10/site-packages/simple_websocket/asgi.py new file mode 100644 index 0000000..33d4d48 --- /dev/null +++ b/env/lib/python3.10/site-packages/simple_websocket/asgi.py @@ -0,0 +1,50 @@ +from .errors import ConnectionClosed # pragma: no cover + + +class WebSocketASGI: # pragma: no cover + def __init__(self, scope, receive, send, subprotocols=None): + self._scope = scope + self._receive = receive + self._send = send + self.subprotocols = subprotocols or [] + self.subprotocol = None + self.connected = False + + @classmethod + async def accept(cls, scope, receive, send, subprotocols=None): + ws = WebSocketASGI(scope, receive, send, subprotocols=subprotocols) + await ws._accept() + return ws + + async def _accept(self): + connect = await self._receive() + if connect['type'] != 'websocket.connect': + raise ValueError('Expected websocket.connect') + for subprotocol in self._scope['subprotocols']: + if subprotocol in self.subprotocols: + self.subprotocol = subprotocol + break + await self._send({'type': 'websocket.accept', + 'subprotocol': self.subprotocol}) + + async def receive(self): + message = await self._receive() + if message['type'] == 'websocket.disconnect': + raise ConnectionClosed() + elif message['type'] != 'websocket.receive': + raise OSError(32, 'Websocket message type not supported') + return message.get('text', message.get('bytes')) + + async def send(self, data): + if isinstance(data, str): + await self._send({'type': 'websocket.send', 'text': data}) + else: + await self._send({'type': 'websocket.send', 'bytes': data}) + + async def close(self): + if not self.connected: + self.conncted = False + try: + await self._send({'type': 'websocket.close'}) + except Exception: + pass diff --git a/env/lib/python3.10/site-packages/simple_websocket/errors.py b/env/lib/python3.10/site-packages/simple_websocket/errors.py new file mode 100644 index 0000000..3a2a70b --- /dev/null +++ b/env/lib/python3.10/site-packages/simple_websocket/errors.py @@ -0,0 +1,20 @@ +from wsproto.frame_protocol import CloseReason + + +class SimpleWebsocketError(RuntimeError): + pass + + +class ConnectionError(SimpleWebsocketError): + """Connection error exception class.""" + def __init__(self, status_code=None): # pragma: no cover + self.status_code = status_code + super().__init__(f'Connection error: {status_code}') + + +class ConnectionClosed(SimpleWebsocketError): + """Connection closed exception class.""" + def __init__(self, reason=CloseReason.NO_STATUS_RCVD, message=None): + self.reason = reason + self.message = message + super().__init__(f'Connection closed: {reason} {message or ""}') diff --git a/env/lib/python3.10/site-packages/simple_websocket/ws.py b/env/lib/python3.10/site-packages/simple_websocket/ws.py new file mode 100644 index 0000000..33519e7 --- /dev/null +++ b/env/lib/python3.10/site-packages/simple_websocket/ws.py @@ -0,0 +1,488 @@ +import selectors +import socket +import ssl +from time import time +from urllib.parse import urlsplit + +from wsproto import ConnectionType, WSConnection +from wsproto.events import ( + AcceptConnection, + RejectConnection, + CloseConnection, + Message, + Request, + Ping, + Pong, + TextMessage, + BytesMessage, +) +from wsproto.extensions import PerMessageDeflate +from wsproto.frame_protocol import CloseReason +from wsproto.utilities import LocalProtocolError +from .errors import ConnectionError, ConnectionClosed + + +class Base: + def __init__(self, sock=None, connection_type=None, receive_bytes=4096, + ping_interval=None, max_message_size=None, + thread_class=None, event_class=None, selector_class=None): + #: The name of the subprotocol chosen for the WebSocket connection. + self.subprotocol = None + + self.sock = sock + self.receive_bytes = receive_bytes + self.ping_interval = ping_interval + self.max_message_size = max_message_size + self.pong_received = True + self.input_buffer = [] + self.incoming_message = None + self.incoming_message_len = 0 + self.connected = False + self.is_server = (connection_type == ConnectionType.SERVER) + self.close_reason = CloseReason.NO_STATUS_RCVD + self.close_message = None + + if thread_class is None: + import threading + thread_class = threading.Thread + if event_class is None: # pragma: no branch + import threading + event_class = threading.Event + if selector_class is None: + selector_class = selectors.DefaultSelector + self.selector_class = selector_class + self.event = event_class() + + self.ws = WSConnection(connection_type) + self.handshake() + + if not self.connected: # pragma: no cover + raise ConnectionError() + self.thread = thread_class(target=self._thread) + self.thread.name = self.thread.name.replace( + '(_thread)', '(simple_websocket.Base._thread)') + self.thread.start() + + def handshake(self): # pragma: no cover + # to be implemented by subclasses + pass + + def send(self, data): + """Send data over the WebSocket connection. + + :param data: The data to send. If ``data`` is of type ``bytes``, then + a binary message is sent. Else, the message is sent in + text format. + """ + if not self.connected: + raise ConnectionClosed(self.close_reason, self.close_message) + if isinstance(data, bytes): + out_data = self.ws.send(Message(data=data)) + else: + out_data = self.ws.send(TextMessage(data=str(data))) + self.sock.send(out_data) + + def receive(self, timeout=None): + """Receive data over the WebSocket connection. + + :param timeout: Amount of time to wait for the data, in seconds. Set + to ``None`` (the default) to wait indefinitely. Set + to 0 to read without blocking. + + The data received is returned, as ``bytes`` or ``str``, depending on + the type of the incoming message. + """ + while self.connected and not self.input_buffer: + if not self.event.wait(timeout=timeout): + return None + self.event.clear() + try: + return self.input_buffer.pop(0) + except IndexError: + pass + if not self.connected: # pragma: no cover + raise ConnectionClosed(self.close_reason, self.close_message) + + def close(self, reason=None, message=None): + """Close the WebSocket connection. + + :param reason: A numeric status code indicating the reason of the + closure, as defined by the WebSocket specification. The + default is 1000 (normal closure). + :param message: A text message to be sent to the other side. + """ + if not self.connected: + raise ConnectionClosed(self.close_reason, self.close_message) + out_data = self.ws.send(CloseConnection( + reason or CloseReason.NORMAL_CLOSURE, message)) + try: + self.sock.send(out_data) + except BrokenPipeError: # pragma: no cover + pass + self.connected = False + + def choose_subprotocol(self, request): # pragma: no cover + # The method should return the subprotocol to use, or ``None`` if no + # subprotocol is chosen. Can be overridden by subclasses that implement + # the server-side of the WebSocket protocol. + return None + + def _thread(self): + sel = None + if self.ping_interval: + next_ping = time() + self.ping_interval + sel = self.selector_class() + try: + sel.register(self.sock, selectors.EVENT_READ, True) + except ValueError: # pragma: no cover + self.connected = False + + while self.connected: + try: + if sel: + now = time() + if next_ping <= now or not sel.select(next_ping - now): + # we reached the timeout, we have to send a ping + if not self.pong_received: + self.close(reason=CloseReason.POLICY_VIOLATION, + message='Ping/Pong timeout') + self.event.set() + break + self.pong_received = False + self.sock.send(self.ws.send(Ping())) + next_ping = max(now, next_ping) + self.ping_interval + continue + in_data = self.sock.recv(self.receive_bytes) + if len(in_data) == 0: + raise OSError() + self.ws.receive_data(in_data) + self.connected = self._handle_events() + except (OSError, ConnectionResetError, + LocalProtocolError): # pragma: no cover + self.connected = False + self.event.set() + break + sel.close() if sel else None + self.sock.close() + + def _handle_events(self): + keep_going = True + out_data = b'' + for event in self.ws.events(): + try: + if isinstance(event, Request): + self.subprotocol = self.choose_subprotocol(event) + out_data += self.ws.send(AcceptConnection( + subprotocol=self.subprotocol, + extensions=[PerMessageDeflate()])) + elif isinstance(event, CloseConnection): + if self.is_server: + out_data += self.ws.send(event.response()) + self.close_reason = event.code + self.close_message = event.reason + self.connected = False + self.event.set() + keep_going = False + elif isinstance(event, Ping): + out_data += self.ws.send(event.response()) + elif isinstance(event, Pong): + self.pong_received = True + elif isinstance(event, (TextMessage, BytesMessage)): + self.incoming_message_len += len(event.data) + if self.max_message_size and \ + self.incoming_message_len > self.max_message_size: + out_data += self.ws.send(CloseConnection( + CloseReason.MESSAGE_TOO_BIG, 'Message is too big')) + self.event.set() + keep_going = False + break + if self.incoming_message is None: + # store message as is first + # if it is the first of a group, the message will be + # converted to bytearray on arrival of the second + # part, since bytearrays are mutable and can be + # concatenated more efficiently + self.incoming_message = event.data + elif isinstance(event, TextMessage): + if not isinstance(self.incoming_message, bytearray): + # convert to bytearray and append + self.incoming_message = bytearray( + (self.incoming_message + event.data).encode()) + else: + # append to bytearray + self.incoming_message += event.data.encode() + else: + if not isinstance(self.incoming_message, bytearray): + # convert to mutable bytearray and append + self.incoming_message = bytearray( + self.incoming_message + event.data) + else: + # append to bytearray + self.incoming_message += event.data + if not event.message_finished: + continue + if isinstance(self.incoming_message, (str, bytes)): + # single part message + self.input_buffer.append(self.incoming_message) + elif isinstance(event, TextMessage): + # convert multi-part message back to text + self.input_buffer.append( + self.incoming_message.decode()) + else: + # convert multi-part message back to bytes + self.input_buffer.append(bytes(self.incoming_message)) + self.incoming_message = None + self.incoming_message_len = 0 + self.event.set() + else: # pragma: no cover + pass + except LocalProtocolError: # pragma: no cover + out_data = b'' + self.event.set() + keep_going = False + if out_data: + self.sock.send(out_data) + return keep_going + + +class Server(Base): + """This class implements a WebSocket server. + + Instead of creating an instance of this class directly, use the + ``accept()`` class method to create individual instances of the server, + each bound to a client request. + """ + def __init__(self, environ, subprotocols=None, receive_bytes=4096, + ping_interval=None, max_message_size=None, thread_class=None, + event_class=None, selector_class=None): + self.environ = environ + self.subprotocols = subprotocols or [] + if isinstance(self.subprotocols, str): + self.subprotocols = [self.subprotocols] + self.mode = 'unknown' + sock = None + if 'werkzeug.socket' in environ: + # extract socket from Werkzeug's WSGI environment + sock = environ.get('werkzeug.socket') + self.mode = 'werkzeug' + elif 'gunicorn.socket' in environ: + # extract socket from Gunicorn WSGI environment + sock = environ.get('gunicorn.socket') + self.mode = 'gunicorn' + elif 'eventlet.input' in environ: # pragma: no cover + # extract socket from Eventlet's WSGI environment + sock = environ.get('eventlet.input').get_socket() + self.mode = 'eventlet' + elif environ.get('SERVER_SOFTWARE', '').startswith( + 'gevent'): # pragma: no cover + # extract socket from Gevent's WSGI environment + wsgi_input = environ['wsgi.input'] + if not hasattr(wsgi_input, 'raw') and hasattr(wsgi_input, 'rfile'): + wsgi_input = wsgi_input.rfile + if hasattr(wsgi_input, 'raw'): + sock = wsgi_input.raw._sock + try: + sock = sock.dup() + except NotImplementedError: + pass + self.mode = 'gevent' + if sock is None: + raise RuntimeError('Cannot obtain socket from WSGI environment.') + super().__init__(sock, connection_type=ConnectionType.SERVER, + receive_bytes=receive_bytes, + ping_interval=ping_interval, + max_message_size=max_message_size, + thread_class=thread_class, event_class=event_class, + selector_class=selector_class) + + @classmethod + def accept(cls, environ, subprotocols=None, receive_bytes=4096, + ping_interval=None, max_message_size=None, thread_class=None, + event_class=None, selector_class=None): + """Accept a WebSocket connection from a client. + + :param environ: A WSGI ``environ`` dictionary with the request details. + Among other things, this class expects to find the + low-level network socket for the connection somewhere + in this dictionary. Since the WSGI specification does + not cover where or how to store this socket, each web + server does this in its own different way. Werkzeug, + Gunicorn, Eventlet and Gevent are the only web servers + that are currently supported. + :param subprotocols: A list of supported subprotocols, or ``None`` (the + default) to disable subprotocol negotiation. + :param receive_bytes: The size of the receive buffer, in bytes. The + default is 4096. + :param ping_interval: Send ping packets to clients at the requested + interval in seconds. Set to ``None`` (the + default) to disable ping/pong logic. Enable to + prevent disconnections when the line is idle for + a certain amount of time, or to detect + unresponsive clients and disconnect them. A + recommended interval is 25 seconds. + :param max_message_size: The maximum size allowed for a message, in + bytes, or ``None`` for no limit. The default + is ``None``. + :param thread_class: The ``Thread`` class to use when creating + background threads. The default is the + ``threading.Thread`` class from the Python + standard library. + :param event_class: The ``Event`` class to use when creating event + objects. The default is the `threading.Event`` + class from the Python standard library. + :param selector_class: The ``Selector`` class to use when creating + selectors. The default is the + ``selectors.DefaultSelector`` class from the + Python standard library. + """ + return cls(environ, subprotocols=subprotocols, + receive_bytes=receive_bytes, ping_interval=ping_interval, + max_message_size=max_message_size, + thread_class=thread_class, event_class=event_class, + selector_class=selector_class) + + def handshake(self): + in_data = b'GET / HTTP/1.1\r\n' + for key, value in self.environ.items(): + if key.startswith('HTTP_'): + header = '-'.join([p.capitalize() for p in key[5:].split('_')]) + in_data += f'{header}: {value}\r\n'.encode() + in_data += b'\r\n' + self.ws.receive_data(in_data) + self.connected = self._handle_events() + + def choose_subprotocol(self, request): + """Choose a subprotocol to use for the WebSocket connection. + + The default implementation selects the first protocol requested by the + client that is accepted by the server. Subclasses can override this + method to implement a different subprotocol negotiation algorithm. + + :param request: A ``Request`` object. + + The method should return the subprotocol to use, or ``None`` if no + subprotocol is chosen. + """ + for subprotocol in request.subprotocols: + if subprotocol in self.subprotocols: + return subprotocol + return None + + +class Client(Base): + """This class implements a WebSocket client. + + Instead of creating an instance of this class directly, use the + ``connect()`` class method to create an instance that is connected to a + server. + """ + def __init__(self, url, subprotocols=None, headers=None, + receive_bytes=4096, ping_interval=None, max_message_size=None, + ssl_context=None, thread_class=None, event_class=None): + parsed_url = urlsplit(url) + is_secure = parsed_url.scheme in ['https', 'wss'] + self.host = parsed_url.hostname + self.port = parsed_url.port or (443 if is_secure else 80) + self.path = parsed_url.path + if parsed_url.query: + self.path += '?' + parsed_url.query + self.subprotocols = subprotocols or [] + if isinstance(self.subprotocols, str): + self.subprotocols = [self.subprotocols] + + self.extra_headeers = [] + if isinstance(headers, dict): + for key, value in headers.items(): + self.extra_headeers.append((key, value)) + elif isinstance(headers, list): + self.extra_headeers = headers + + connection_args = socket.getaddrinfo(self.host, self.port, + type=socket.SOCK_STREAM) + if len(connection_args) == 0: # pragma: no cover + raise ConnectionError() + sock = socket.socket(connection_args[0][0], connection_args[0][1], + connection_args[0][2]) + if is_secure: # pragma: no cover + if ssl_context is None: + ssl_context = ssl.create_default_context( + purpose=ssl.Purpose.SERVER_AUTH) + sock = ssl_context.wrap_socket(sock, server_hostname=self.host) + sock.connect(connection_args[0][4]) + super().__init__(sock, connection_type=ConnectionType.CLIENT, + receive_bytes=receive_bytes, + ping_interval=ping_interval, + max_message_size=max_message_size, + thread_class=thread_class, event_class=event_class) + + @classmethod + def connect(cls, url, subprotocols=None, headers=None, + receive_bytes=4096, ping_interval=None, max_message_size=None, + ssl_context=None, thread_class=None, event_class=None): + """Returns a WebSocket client connection. + + :param url: The connection URL. Both ``ws://`` and ``wss://`` URLs are + accepted. + :param subprotocols: The name of the subprotocol to use, or a list of + subprotocol names in order of preference. Set to + ``None`` (the default) to not use a subprotocol. + :param headers: A dictionary or list of tuples with additional HTTP + headers to send with the connection request. Note that + custom headers are not supported by the WebSocket + protocol, so the use of this parameter is not + recommended. + :param receive_bytes: The size of the receive buffer, in bytes. The + default is 4096. + :param ping_interval: Send ping packets to the server at the requested + interval in seconds. Set to ``None`` (the + default) to disable ping/pong logic. Enable to + prevent disconnections when the line is idle for + a certain amount of time, or to detect an + unresponsive server and disconnect. A recommended + interval is 25 seconds. In general it is + preferred to enable ping/pong on the server, and + let the client respond with pong (which it does + regardless of this setting). + :param max_message_size: The maximum size allowed for a message, in + bytes, or ``None`` for no limit. The default + is ``None``. + :param ssl_context: An ``SSLContext`` instance, if a default SSL + context isn't sufficient. + :param thread_class: The ``Thread`` class to use when creating + background threads. The default is the + ``threading.Thread`` class from the Python + standard library. + :param event_class: The ``Event`` class to use when creating event + objects. The default is the `threading.Event`` + class from the Python standard library. + """ + return cls(url, subprotocols=subprotocols, headers=headers, + receive_bytes=receive_bytes, ping_interval=ping_interval, + max_message_size=max_message_size, ssl_context=ssl_context, + thread_class=thread_class, event_class=event_class) + + def handshake(self): + out_data = self.ws.send(Request(host=self.host, target=self.path, + subprotocols=self.subprotocols, + extra_headers=self.extra_headeers)) + self.sock.send(out_data) + + while True: + in_data = self.sock.recv(self.receive_bytes) + self.ws.receive_data(in_data) + try: + event = next(self.ws.events()) + except StopIteration: # pragma: no cover + pass + else: # pragma: no cover + break + if isinstance(event, RejectConnection): # pragma: no cover + raise ConnectionError(event.status_code) + elif not isinstance(event, AcceptConnection): # pragma: no cover + raise ConnectionError(400) + self.subprotocol = event.subprotocol + self.connected = True + + def close(self, reason=None, message=None): + super().close(reason=reason, message=message) + self.sock.close() diff --git a/env/lib/python3.10/site-packages/socketio/__init__.py b/env/lib/python3.10/site-packages/socketio/__init__.py new file mode 100644 index 0000000..95642f4 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/__init__.py @@ -0,0 +1,28 @@ +from .client import Client +from .simple_client import SimpleClient +from .manager import Manager +from .pubsub_manager import PubSubManager +from .kombu_manager import KombuManager +from .redis_manager import RedisManager +from .kafka_manager import KafkaManager +from .zmq_manager import ZmqManager +from .server import Server +from .namespace import Namespace, ClientNamespace +from .middleware import WSGIApp, Middleware +from .tornado import get_tornado_handler +from .async_client import AsyncClient +from .async_simple_client import AsyncSimpleClient +from .async_server import AsyncServer +from .async_manager import AsyncManager +from .async_namespace import AsyncNamespace, AsyncClientNamespace +from .async_redis_manager import AsyncRedisManager +from .async_aiopika_manager import AsyncAioPikaManager +from .asgi import ASGIApp + +__all__ = ['SimpleClient', 'Client', 'Server', 'Manager', 'PubSubManager', + 'KombuManager', 'RedisManager', 'ZmqManager', 'KafkaManager', + 'Namespace', 'ClientNamespace', 'WSGIApp', 'Middleware', + 'AsyncSimpleClient', 'AsyncClient', 'AsyncServer', + 'AsyncNamespace', 'AsyncClientNamespace', 'AsyncManager', + 'AsyncRedisManager', 'ASGIApp', 'get_tornado_handler', + 'AsyncAioPikaManager'] diff --git a/env/lib/python3.10/site-packages/socketio/admin.py b/env/lib/python3.10/site-packages/socketio/admin.py new file mode 100644 index 0000000..12b905e --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/admin.py @@ -0,0 +1,391 @@ +from datetime import datetime, timezone +import functools +import os +import socket +import time +from urllib.parse import parse_qs +from .exceptions import ConnectionRefusedError + +HOSTNAME = socket.gethostname() +PID = os.getpid() + + +class EventBuffer: + def __init__(self): + self.buffer = {} + + def push(self, type, count=1): + timestamp = int(time.time()) * 1000 + key = f'{timestamp};{type}' + if key not in self.buffer: + self.buffer[key] = { + 'timestamp': timestamp, + 'type': type, + 'count': count, + } + else: + self.buffer[key]['count'] += count + + def get_and_clear(self): + buffer = self.buffer + self.buffer = {} + return [value for value in buffer.values()] + + +class InstrumentedServer: + def __init__(self, sio, auth=None, mode='development', read_only=False, + server_id=None, namespace='/admin', server_stats_interval=2): + """Instrument the Socket.IO server for monitoring with the `Socket.IO + Admin UI `_. + """ + if auth is None: + raise ValueError('auth must be specified') + self.sio = sio + self.auth = auth + self.admin_namespace = namespace + self.read_only = read_only + self.server_id = server_id or ( + self.sio.manager.host_id if hasattr(self.sio.manager, 'host_id') + else HOSTNAME + ) + self.mode = mode + self.server_stats_interval = server_stats_interval + self.event_buffer = EventBuffer() + + # task that emits "server_stats" every 2 seconds + self.stop_stats_event = None + self.stats_task = None + + # monkey-patch the server to report metrics to the admin UI + self.instrument() + + def instrument(self): + self.sio.on('connect', self.admin_connect, + namespace=self.admin_namespace) + + if self.mode == 'development': + if not self.read_only: # pragma: no branch + self.sio.on('emit', self.admin_emit, + namespace=self.admin_namespace) + self.sio.on('join', self.admin_enter_room, + namespace=self.admin_namespace) + self.sio.on('leave', self.admin_leave_room, + namespace=self.admin_namespace) + self.sio.on('_disconnect', self.admin_disconnect, + namespace=self.admin_namespace) + + # track socket connection times + self.sio.manager._timestamps = {} + + # report socket.io connections, disconnections and received events + self.sio.__trigger_event = self.sio._trigger_event + self.sio._trigger_event = self._trigger_event + + # report join rooms + self.sio.manager.__basic_enter_room = \ + self.sio.manager.basic_enter_room + self.sio.manager.basic_enter_room = self._basic_enter_room + + # report leave rooms + self.sio.manager.__basic_leave_room = \ + self.sio.manager.basic_leave_room + self.sio.manager.basic_leave_room = self._basic_leave_room + + # report emit events + self.sio.manager.__emit = self.sio.manager.emit + self.sio.manager.emit = self._emit + + # report engine.io connections + self.sio.eio.on('connect', self._handle_eio_connect) + self.sio.eio.on('disconnect', self._handle_eio_disconnect) + + # report polling packets + from engineio.socket import Socket + self.sio.eio.__ok = self.sio.eio._ok + self.sio.eio._ok = self._eio_http_response + Socket.__handle_post_request = Socket.handle_post_request + Socket.handle_post_request = functools.partialmethod( + self.__class__._eio_handle_post_request, self) + + # report websocket packets + Socket.__websocket_handler = Socket._websocket_handler + Socket._websocket_handler = functools.partialmethod( + self.__class__._eio_websocket_handler, self) + + # report connected sockets with each ping + if self.mode == 'development': + Socket.__send_ping = Socket._send_ping + Socket._send_ping = functools.partialmethod( + self.__class__._eio_send_ping, self) + + def uninstrument(self): # pragma: no cover + if self.mode == 'development': + self.sio._trigger_event = self.sio.__trigger_event + self.sio.manager.basic_enter_room = \ + self.sio.manager.__basic_enter_room + self.sio.manager.basic_leave_room = \ + self.sio.manager.__basic_leave_room + self.sio.manager.emit = self.sio.manager.__emit + self.sio.eio._ok = self.sio.eio.__ok + + from engineio.socket import Socket + Socket.handle_post_request = Socket.__handle_post_request + Socket._websocket_handler = Socket.__websocket_handler + if self.mode == 'development': + Socket._send_ping = Socket.__send_ping + + def admin_connect(self, sid, environ, client_auth): + if self.auth: + authenticated = False + if isinstance(self.auth, dict): + authenticated = client_auth == self.auth + elif isinstance(self.auth, list): + authenticated = client_auth in self.auth + else: + authenticated = self.auth(client_auth) + if not authenticated: + raise ConnectionRefusedError('authentication failed') + + def config(sid): + self.sio.sleep(0.1) + + # supported features + features = ['AGGREGATED_EVENTS'] + if not self.read_only: + features += ['EMIT', 'JOIN', 'LEAVE', 'DISCONNECT', 'MJOIN', + 'MLEAVE', 'MDISCONNECT'] + if self.mode == 'development': + features.append('ALL_EVENTS') + self.sio.emit('config', {'supportedFeatures': features}, + to=sid, namespace=self.admin_namespace) + + # send current sockets + if self.mode == 'development': + all_sockets = [] + for nsp in self.sio.manager.get_namespaces(): + for sid, eio_sid in self.sio.manager.get_participants( + nsp, None): + all_sockets.append( + self.serialize_socket(sid, nsp, eio_sid)) + self.sio.emit('all_sockets', all_sockets, to=sid, + namespace=self.admin_namespace) + + self.sio.start_background_task(config, sid) + + def admin_emit(self, _, namespace, room_filter, event, *data): + self.sio.emit(event, data, to=room_filter, namespace=namespace) + + def admin_enter_room(self, _, namespace, room, room_filter=None): + for sid, _ in self.sio.manager.get_participants( + namespace, room_filter): + self.sio.enter_room(sid, room, namespace=namespace) + + def admin_leave_room(self, _, namespace, room, room_filter=None): + for sid, _ in self.sio.manager.get_participants( + namespace, room_filter): + self.sio.leave_room(sid, room, namespace=namespace) + + def admin_disconnect(self, _, namespace, close, room_filter=None): + for sid, _ in self.sio.manager.get_participants( + namespace, room_filter): + self.sio.disconnect(sid, namespace=namespace) + + def shutdown(self): + if self.stats_task: # pragma: no branch + self.stop_stats_event.set() + self.stats_task.join() + + def _trigger_event(self, event, namespace, *args): + t = time.time() + sid = args[0] + if event == 'connect': + eio_sid = self.sio.manager.eio_sid_from_sid(sid, namespace) + self.sio.manager._timestamps[sid] = t + serialized_socket = self.serialize_socket(sid, namespace, eio_sid) + self.sio.emit('socket_connected', ( + serialized_socket, + datetime.fromtimestamp(t, timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + elif event == 'disconnect': + del self.sio.manager._timestamps[sid] + reason = args[1] + self.sio.emit('socket_disconnected', ( + namespace, + sid, + reason, + datetime.fromtimestamp(t, timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + else: + self.sio.emit('event_received', ( + namespace, + sid, + (event, *args[1:]), + datetime.fromtimestamp(t, timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + return self.sio.__trigger_event(event, namespace, *args) + + def _check_for_upgrade(self, eio_sid, sid, namespace): # pragma: no cover + for _ in range(5): + self.sio.sleep(5) + try: + if self.sio.eio._get_socket(eio_sid).upgraded: + self.sio.emit('socket_updated', { + 'id': sid, + 'nsp': namespace, + 'transport': 'websocket', + }, namespace=self.admin_namespace) + break + except KeyError: + pass + + def _basic_enter_room(self, sid, namespace, room, eio_sid=None): + ret = self.sio.manager.__basic_enter_room(sid, namespace, room, + eio_sid) + if room: + self.sio.emit('room_joined', ( + namespace, + room, + sid, + datetime.now(timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + return ret + + def _basic_leave_room(self, sid, namespace, room): + if room: + self.sio.emit('room_left', ( + namespace, + room, + sid, + datetime.now(timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + return self.sio.manager.__basic_leave_room(sid, namespace, room) + + def _emit(self, event, data, namespace, room=None, skip_sid=None, + callback=None, **kwargs): + ret = self.sio.manager.__emit(event, data, namespace, room=room, + skip_sid=skip_sid, callback=callback, + **kwargs) + if namespace != self.admin_namespace: + event_data = [event] + list(data) if isinstance(data, tuple) \ + else [event, data] + if not isinstance(skip_sid, list): # pragma: no branch + skip_sid = [skip_sid] + for sid, _ in self.sio.manager.get_participants(namespace, room): + if sid not in skip_sid: + self.sio.emit('event_sent', ( + namespace, + sid, + event_data, + datetime.now(timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + return ret + + def _handle_eio_connect(self, eio_sid, environ): + if self.stop_stats_event is None: + self.stop_stats_event = self.sio.eio.create_event() + self.stats_task = self.sio.start_background_task( + self._emit_server_stats) + + self.event_buffer.push('rawConnection') + return self.sio._handle_eio_connect(eio_sid, environ) + + def _handle_eio_disconnect(self, eio_sid, reason): + self.event_buffer.push('rawDisconnection') + return self.sio._handle_eio_disconnect(eio_sid, reason) + + def _eio_http_response(self, packets=None, headers=None, jsonp_index=None): + ret = self.sio.eio.__ok(packets=packets, headers=headers, + jsonp_index=jsonp_index) + self.event_buffer.push('packetsOut') + self.event_buffer.push('bytesOut', len(ret['response'])) + return ret + + def _eio_handle_post_request(socket, self, environ): + ret = socket.__handle_post_request(environ) + self.event_buffer.push('packetsIn') + self.event_buffer.push( + 'bytesIn', int(environ.get('CONTENT_LENGTH', 0))) + return ret + + def _eio_websocket_handler(socket, self, ws): + def _send(ws, data, *args, **kwargs): + self.event_buffer.push('packetsOut') + self.event_buffer.push('bytesOut', len(data)) + return ws.__send(data, *args, **kwargs) + + def _wait(ws): + ret = ws.__wait() + self.event_buffer.push('packetsIn') + self.event_buffer.push('bytesIn', len(ret or '')) + return ret + + ws.__send = ws.send + ws.send = functools.partial(_send, ws) + ws.__wait = ws.wait + ws.wait = functools.partial(_wait, ws) + return socket.__websocket_handler(ws) + + def _eio_send_ping(socket, self): # pragma: no cover + eio_sid = socket.sid + t = time.time() + for namespace in self.sio.manager.get_namespaces(): + sid = self.sio.manager.sid_from_eio_sid(eio_sid, namespace) + if sid: + serialized_socket = self.serialize_socket(sid, namespace, + eio_sid) + self.sio.emit('socket_connected', ( + serialized_socket, + datetime.fromtimestamp(t, timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + return socket.__send_ping() + + def _emit_server_stats(self): + start_time = time.time() + namespaces = list(self.sio.handlers.keys()) + namespaces.sort() + while not self.stop_stats_event.is_set(): + self.sio.sleep(self.server_stats_interval) + self.sio.emit('server_stats', { + 'serverId': self.server_id, + 'hostname': HOSTNAME, + 'pid': PID, + 'uptime': time.time() - start_time, + 'clientsCount': len(self.sio.eio.sockets), + 'pollingClientsCount': len( + [s for s in self.sio.eio.sockets.values() + if not s.upgraded]), + 'aggregatedEvents': self.event_buffer.get_and_clear(), + 'namespaces': [{ + 'name': nsp, + 'socketsCount': len(self.sio.manager.rooms.get( + nsp, {None: []}).get(None, [])) + } for nsp in namespaces], + }, namespace=self.admin_namespace) + + def serialize_socket(self, sid, namespace, eio_sid=None): + if eio_sid is None: # pragma: no cover + eio_sid = self.sio.manager.eio_sid_from_sid(sid) + socket = self.sio.eio._get_socket(eio_sid) + environ = self.sio.environ.get(eio_sid, {}) + tm = self.sio.manager._timestamps[sid] if sid in \ + self.sio.manager._timestamps else 0 + return { + 'id': sid, + 'clientId': eio_sid, + 'transport': 'websocket' if socket.upgraded else 'polling', + 'nsp': namespace, + 'data': {}, + 'handshake': { + 'address': environ.get('REMOTE_ADDR', ''), + 'headers': {k[5:].lower(): v for k, v in environ.items() + if k.startswith('HTTP_')}, + 'query': {k: v[0] if len(v) == 1 else v for k, v in parse_qs( + environ.get('QUERY_STRING', '')).items()}, + 'secure': environ.get('wsgi.url_scheme', '') == 'https', + 'url': environ.get('PATH_INFO', ''), + 'issued': tm * 1000, + 'time': datetime.fromtimestamp(tm, timezone.utc).isoformat() + if tm else '', + }, + 'rooms': self.sio.manager.get_rooms(sid, namespace), + } diff --git a/env/lib/python3.10/site-packages/socketio/asgi.py b/env/lib/python3.10/site-packages/socketio/asgi.py new file mode 100644 index 0000000..23b094d --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/asgi.py @@ -0,0 +1,47 @@ +import engineio + + +class ASGIApp(engineio.ASGIApp): # pragma: no cover + """ASGI application middleware for Socket.IO. + + This middleware dispatches traffic to an Socket.IO application. It can + also serve a list of static files to the client, or forward unrelated + HTTP traffic to another ASGI application. + + :param socketio_server: The Socket.IO server. Must be an instance of the + ``socketio.AsyncServer`` class. + :param static_files: A dictionary with static file mapping rules. See the + documentation for details on this argument. + :param other_asgi_app: A separate ASGI app that receives all other traffic. + :param socketio_path: The endpoint where the Socket.IO application should + be installed. The default value is appropriate for + most cases. With a value of ``None``, all incoming + traffic is directed to the Socket.IO server, with the + assumption that routing, if necessary, is handled by + a different layer. When this option is set to + ``None``, ``static_files`` and ``other_asgi_app`` are + ignored. + :param on_startup: function to be called on application startup; can be + coroutine + :param on_shutdown: function to be called on application shutdown; can be + coroutine + + Example usage:: + + import socketio + import uvicorn + + sio = socketio.AsyncServer() + app = socketio.ASGIApp(sio, static_files={ + '/': 'index.html', + '/static': './public', + }) + uvicorn.run(app, host='127.0.0.1', port=5000) + """ + def __init__(self, socketio_server, other_asgi_app=None, + static_files=None, socketio_path='socket.io', + on_startup=None, on_shutdown=None): + super().__init__(socketio_server, other_asgi_app, + static_files=static_files, + engineio_path=socketio_path, on_startup=on_startup, + on_shutdown=on_shutdown) diff --git a/env/lib/python3.10/site-packages/socketio/async_admin.py b/env/lib/python3.10/site-packages/socketio/async_admin.py new file mode 100644 index 0000000..b052d8f --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/async_admin.py @@ -0,0 +1,384 @@ +import asyncio +from datetime import datetime, timezone +import functools +import os +import socket +import time +from urllib.parse import parse_qs +from .admin import EventBuffer +from .exceptions import ConnectionRefusedError + +HOSTNAME = socket.gethostname() +PID = os.getpid() + + +class InstrumentedAsyncServer: + def __init__(self, sio, auth=None, namespace='/admin', read_only=False, + server_id=None, mode='development', server_stats_interval=2): + """Instrument the Socket.IO server for monitoring with the `Socket.IO + Admin UI `_. + """ + if auth is None: + raise ValueError('auth must be specified') + self.sio = sio + self.auth = auth + self.admin_namespace = namespace + self.read_only = read_only + self.server_id = server_id or ( + self.sio.manager.host_id if hasattr(self.sio.manager, 'host_id') + else HOSTNAME + ) + self.mode = mode + self.server_stats_interval = server_stats_interval + self.admin_queue = [] + self.event_buffer = EventBuffer() + + # task that emits "server_stats" every 2 seconds + self.stop_stats_event = None + self.stats_task = None + + # monkey-patch the server to report metrics to the admin UI + self.instrument() + + def instrument(self): + self.sio.on('connect', self.admin_connect, + namespace=self.admin_namespace) + + if self.mode == 'development': + if not self.read_only: # pragma: no branch + self.sio.on('emit', self.admin_emit, + namespace=self.admin_namespace) + self.sio.on('join', self.admin_enter_room, + namespace=self.admin_namespace) + self.sio.on('leave', self.admin_leave_room, + namespace=self.admin_namespace) + self.sio.on('_disconnect', self.admin_disconnect, + namespace=self.admin_namespace) + + # track socket connection times + self.sio.manager._timestamps = {} + + # report socket.io connections, disconnections and received events + self.sio.__trigger_event = self.sio._trigger_event + self.sio._trigger_event = self._trigger_event + + # report join rooms + self.sio.manager.__basic_enter_room = \ + self.sio.manager.basic_enter_room + self.sio.manager.basic_enter_room = self._basic_enter_room + + # report leave rooms + self.sio.manager.__basic_leave_room = \ + self.sio.manager.basic_leave_room + self.sio.manager.basic_leave_room = self._basic_leave_room + + # report emit events + self.sio.manager.__emit = self.sio.manager.emit + self.sio.manager.emit = self._emit + + # report engine.io connections + self.sio.eio.on('connect', self._handle_eio_connect) + self.sio.eio.on('disconnect', self._handle_eio_disconnect) + + # report polling packets + from engineio.async_socket import AsyncSocket + self.sio.eio.__ok = self.sio.eio._ok + self.sio.eio._ok = self._eio_http_response + AsyncSocket.__handle_post_request = AsyncSocket.handle_post_request + AsyncSocket.handle_post_request = functools.partialmethod( + self.__class__._eio_handle_post_request, self) + + # report websocket packets + AsyncSocket.__websocket_handler = AsyncSocket._websocket_handler + AsyncSocket._websocket_handler = functools.partialmethod( + self.__class__._eio_websocket_handler, self) + + # report connected sockets with each ping + if self.mode == 'development': + AsyncSocket.__send_ping = AsyncSocket._send_ping + AsyncSocket._send_ping = functools.partialmethod( + self.__class__._eio_send_ping, self) + + def uninstrument(self): # pragma: no cover + if self.mode == 'development': + self.sio._trigger_event = self.sio.__trigger_event + self.sio.manager.basic_enter_room = \ + self.sio.manager.__basic_enter_room + self.sio.manager.basic_leave_room = \ + self.sio.manager.__basic_leave_room + self.sio.manager.emit = self.sio.manager.__emit + self.sio.eio._ok = self.sio.eio.__ok + + from engineio.async_socket import AsyncSocket + AsyncSocket.handle_post_request = AsyncSocket.__handle_post_request + AsyncSocket._websocket_handler = AsyncSocket.__websocket_handler + if self.mode == 'development': + AsyncSocket._send_ping = AsyncSocket.__send_ping + + async def admin_connect(self, sid, environ, client_auth): + authenticated = True + if self.auth: + authenticated = False + if isinstance(self.auth, dict): + authenticated = client_auth == self.auth + elif isinstance(self.auth, list): + authenticated = client_auth in self.auth + else: + if asyncio.iscoroutinefunction(self.auth): + authenticated = await self.auth(client_auth) + else: + authenticated = self.auth(client_auth) + if not authenticated: + raise ConnectionRefusedError('authentication failed') + + async def config(sid): + await self.sio.sleep(0.1) + + # supported features + features = ['AGGREGATED_EVENTS'] + if not self.read_only: + features += ['EMIT', 'JOIN', 'LEAVE', 'DISCONNECT', 'MJOIN', + 'MLEAVE', 'MDISCONNECT'] + if self.mode == 'development': + features.append('ALL_EVENTS') + await self.sio.emit('config', {'supportedFeatures': features}, + to=sid, namespace=self.admin_namespace) + + # send current sockets + if self.mode == 'development': + all_sockets = [] + for nsp in self.sio.manager.get_namespaces(): + for sid, eio_sid in self.sio.manager.get_participants( + nsp, None): + all_sockets.append( + self.serialize_socket(sid, nsp, eio_sid)) + await self.sio.emit('all_sockets', all_sockets, to=sid, + namespace=self.admin_namespace) + + self.sio.start_background_task(config, sid) + self.stop_stats_event = self.sio.eio.create_event() + self.stats_task = self.sio.start_background_task( + self._emit_server_stats) + + async def admin_emit(self, _, namespace, room_filter, event, *data): + await self.sio.emit(event, data, to=room_filter, namespace=namespace) + + async def admin_enter_room(self, _, namespace, room, room_filter=None): + for sid, _ in self.sio.manager.get_participants( + namespace, room_filter): + await self.sio.enter_room(sid, room, namespace=namespace) + + async def admin_leave_room(self, _, namespace, room, room_filter=None): + for sid, _ in self.sio.manager.get_participants( + namespace, room_filter): + await self.sio.leave_room(sid, room, namespace=namespace) + + async def admin_disconnect(self, _, namespace, close, room_filter=None): + for sid, _ in self.sio.manager.get_participants( + namespace, room_filter): + await self.sio.disconnect(sid, namespace=namespace) + + async def shutdown(self): + if self.stats_task: # pragma: no branch + self.stop_stats_event.set() + await asyncio.gather(self.stats_task) + + async def _trigger_event(self, event, namespace, *args): + t = time.time() + sid = args[0] + if event == 'connect': + eio_sid = self.sio.manager.eio_sid_from_sid(sid, namespace) + self.sio.manager._timestamps[sid] = t + serialized_socket = self.serialize_socket(sid, namespace, eio_sid) + await self.sio.emit('socket_connected', ( + serialized_socket, + datetime.fromtimestamp(t, timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + elif event == 'disconnect': + del self.sio.manager._timestamps[sid] + reason = args[1] + await self.sio.emit('socket_disconnected', ( + namespace, + sid, + reason, + datetime.fromtimestamp(t, timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + else: + await self.sio.emit('event_received', ( + namespace, + sid, + (event, *args[1:]), + datetime.fromtimestamp(t, timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + return await self.sio.__trigger_event(event, namespace, *args) + + async def _check_for_upgrade(self, eio_sid, sid, + namespace): # pragma: no cover + for _ in range(5): + await self.sio.sleep(5) + try: + if self.sio.eio._get_socket(eio_sid).upgraded: + await self.sio.emit('socket_updated', { + 'id': sid, + 'nsp': namespace, + 'transport': 'websocket', + }, namespace=self.admin_namespace) + break + except KeyError: + pass + + def _basic_enter_room(self, sid, namespace, room, eio_sid=None): + ret = self.sio.manager.__basic_enter_room(sid, namespace, room, + eio_sid) + if room: + self.admin_queue.append(('room_joined', ( + namespace, + room, + sid, + datetime.now(timezone.utc).isoformat(), + ))) + return ret + + def _basic_leave_room(self, sid, namespace, room): + if room: + self.admin_queue.append(('room_left', ( + namespace, + room, + sid, + datetime.now(timezone.utc).isoformat(), + ))) + return self.sio.manager.__basic_leave_room(sid, namespace, room) + + async def _emit(self, event, data, namespace, room=None, skip_sid=None, + callback=None, **kwargs): + ret = await self.sio.manager.__emit( + event, data, namespace, room=room, skip_sid=skip_sid, + callback=callback, **kwargs) + if namespace != self.admin_namespace: + event_data = [event] + list(data) if isinstance(data, tuple) \ + else [event, data] + if not isinstance(skip_sid, list): # pragma: no branch + skip_sid = [skip_sid] + for sid, _ in self.sio.manager.get_participants(namespace, room): + if sid not in skip_sid: + await self.sio.emit('event_sent', ( + namespace, + sid, + event_data, + datetime.now(timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + return ret + + async def _handle_eio_connect(self, eio_sid, environ): + if self.stop_stats_event is None: + self.stop_stats_event = self.sio.eio.create_event() + self.stats_task = self.sio.start_background_task( + self._emit_server_stats) + + self.event_buffer.push('rawConnection') + return await self.sio._handle_eio_connect(eio_sid, environ) + + async def _handle_eio_disconnect(self, eio_sid, reason): + self.event_buffer.push('rawDisconnection') + return await self.sio._handle_eio_disconnect(eio_sid, reason) + + def _eio_http_response(self, packets=None, headers=None, jsonp_index=None): + ret = self.sio.eio.__ok(packets=packets, headers=headers, + jsonp_index=jsonp_index) + self.event_buffer.push('packetsOut') + self.event_buffer.push('bytesOut', len(ret['response'])) + return ret + + async def _eio_handle_post_request(socket, self, environ): + ret = await socket.__handle_post_request(environ) + self.event_buffer.push('packetsIn') + self.event_buffer.push( + 'bytesIn', int(environ.get('CONTENT_LENGTH', 0))) + return ret + + async def _eio_websocket_handler(socket, self, ws): + async def _send(ws, data): + self.event_buffer.push('packetsOut') + self.event_buffer.push('bytesOut', len(data)) + return await ws.__send(data) + + async def _wait(ws): + ret = await ws.__wait() + self.event_buffer.push('packetsIn') + self.event_buffer.push('bytesIn', len(ret or '')) + return ret + + ws.__send = ws.send + ws.send = functools.partial(_send, ws) + ws.__wait = ws.wait + ws.wait = functools.partial(_wait, ws) + return await socket.__websocket_handler(ws) + + async def _eio_send_ping(socket, self): # pragma: no cover + eio_sid = socket.sid + t = time.time() + for namespace in self.sio.manager.get_namespaces(): + sid = self.sio.manager.sid_from_eio_sid(eio_sid, namespace) + if sid: + serialized_socket = self.serialize_socket(sid, namespace, + eio_sid) + await self.sio.emit('socket_connected', ( + serialized_socket, + datetime.fromtimestamp(t, timezone.utc).isoformat(), + ), namespace=self.admin_namespace) + return await socket.__send_ping() + + async def _emit_server_stats(self): + start_time = time.time() + namespaces = list(self.sio.handlers.keys()) + namespaces.sort() + while not self.stop_stats_event.is_set(): + await self.sio.sleep(self.server_stats_interval) + await self.sio.emit('server_stats', { + 'serverId': self.server_id, + 'hostname': HOSTNAME, + 'pid': PID, + 'uptime': time.time() - start_time, + 'clientsCount': len(self.sio.eio.sockets), + 'pollingClientsCount': len( + [s for s in self.sio.eio.sockets.values() + if not s.upgraded]), + 'aggregatedEvents': self.event_buffer.get_and_clear(), + 'namespaces': [{ + 'name': nsp, + 'socketsCount': len(self.sio.manager.rooms.get( + nsp, {None: []}).get(None, [])) + } for nsp in namespaces], + }, namespace=self.admin_namespace) + while self.admin_queue: + event, args = self.admin_queue.pop(0) + await self.sio.emit(event, args, + namespace=self.admin_namespace) + + def serialize_socket(self, sid, namespace, eio_sid=None): + if eio_sid is None: # pragma: no cover + eio_sid = self.sio.manager.eio_sid_from_sid(sid) + socket = self.sio.eio._get_socket(eio_sid) + environ = self.sio.environ.get(eio_sid, {}) + tm = self.sio.manager._timestamps[sid] if sid in \ + self.sio.manager._timestamps else 0 + return { + 'id': sid, + 'clientId': eio_sid, + 'transport': 'websocket' if socket.upgraded else 'polling', + 'nsp': namespace, + 'data': {}, + 'handshake': { + 'address': environ.get('REMOTE_ADDR', ''), + 'headers': {k[5:].lower(): v for k, v in environ.items() + if k.startswith('HTTP_')}, + 'query': {k: v[0] if len(v) == 1 else v for k, v in parse_qs( + environ.get('QUERY_STRING', '')).items()}, + 'secure': environ.get('wsgi.url_scheme', '') == 'https', + 'url': environ.get('PATH_INFO', ''), + 'issued': tm * 1000, + 'time': datetime.fromtimestamp(tm, timezone.utc).isoformat() + if tm else '', + }, + 'rooms': self.sio.manager.get_rooms(sid, namespace), + } diff --git a/env/lib/python3.10/site-packages/socketio/async_aiopika_manager.py b/env/lib/python3.10/site-packages/socketio/async_aiopika_manager.py new file mode 100644 index 0000000..b6f09b8 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/async_aiopika_manager.py @@ -0,0 +1,126 @@ +import asyncio +import pickle + +from .async_pubsub_manager import AsyncPubSubManager + +try: + import aio_pika +except ImportError: + aio_pika = None + + +class AsyncAioPikaManager(AsyncPubSubManager): # pragma: no cover + """Client manager that uses aio_pika for inter-process messaging under + asyncio. + + This class implements a client manager backend for event sharing across + multiple processes, using RabbitMQ + + To use a aio_pika backend, initialize the :class:`Server` instance as + follows:: + + url = 'amqp://user:password@hostname:port//' + server = socketio.Server(client_manager=socketio.AsyncAioPikaManager( + url)) + + :param url: The connection URL for the backend messaging queue. Example + connection URLs are ``'amqp://guest:guest@localhost:5672//'`` + for RabbitMQ. + :param channel: The channel name on which the server sends and receives + notifications. Must be the same in all the servers. + With this manager, the channel name is the exchange name + in rabbitmq + :param write_only: If set to ``True``, only initialize to emit events. The + default of ``False`` initializes the class for emitting + and receiving. + """ + + name = 'asyncaiopika' + + def __init__(self, url='amqp://guest:guest@localhost:5672//', + channel='socketio', write_only=False, logger=None): + if aio_pika is None: + raise RuntimeError('aio_pika package is not installed ' + '(Run "pip install aio_pika" in your ' + 'virtualenv).') + self.url = url + self._lock = asyncio.Lock() + self.publisher_connection = None + self.publisher_channel = None + self.publisher_exchange = None + super().__init__(channel=channel, write_only=write_only, logger=logger) + + async def _connection(self): + return await aio_pika.connect_robust(self.url) + + async def _channel(self, connection): + return await connection.channel() + + async def _exchange(self, channel): + return await channel.declare_exchange(self.channel, + aio_pika.ExchangeType.FANOUT) + + async def _queue(self, channel, exchange): + queue = await channel.declare_queue(durable=False, + arguments={'x-expires': 300000}) + await queue.bind(exchange) + return queue + + async def _publish(self, data): + if self.publisher_connection is None: + async with self._lock: + if self.publisher_connection is None: + self.publisher_connection = await self._connection() + self.publisher_channel = await self._channel( + self.publisher_connection + ) + self.publisher_exchange = await self._exchange( + self.publisher_channel + ) + retry = True + while True: + try: + await self.publisher_exchange.publish( + aio_pika.Message( + body=pickle.dumps(data), + delivery_mode=aio_pika.DeliveryMode.PERSISTENT + ), routing_key='*', + ) + break + except aio_pika.AMQPException: + if retry: + self._get_logger().error('Cannot publish to rabbitmq... ' + 'retrying') + retry = False + else: + self._get_logger().error( + 'Cannot publish to rabbitmq... giving up') + break + except aio_pika.exceptions.ChannelInvalidStateError: + # aio_pika raises this exception when the task is cancelled + raise asyncio.CancelledError() + + async def _listen(self): + async with (await self._connection()) as connection: + channel = await self._channel(connection) + await channel.set_qos(prefetch_count=1) + exchange = await self._exchange(channel) + queue = await self._queue(channel, exchange) + + retry_sleep = 1 + while True: + try: + async with queue.iterator() as queue_iter: + async for message in queue_iter: + async with message.process(): + yield pickle.loads(message.body) + retry_sleep = 1 + except aio_pika.AMQPException: + self._get_logger().error( + 'Cannot receive from rabbitmq... ' + 'retrying in {} secs'.format(retry_sleep)) + await asyncio.sleep(retry_sleep) + retry_sleep = min(retry_sleep * 2, 60) + except aio_pika.exceptions.ChannelInvalidStateError: + # aio_pika raises this exception when the task is cancelled + raise asyncio.CancelledError() diff --git a/env/lib/python3.10/site-packages/socketio/async_client.py b/env/lib/python3.10/site-packages/socketio/async_client.py new file mode 100644 index 0000000..463073e --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/async_client.py @@ -0,0 +1,604 @@ +import asyncio +import logging +import random + +import engineio + +from . import base_client +from . import exceptions +from . import packet + +default_logger = logging.getLogger('socketio.client') + + +class AsyncClient(base_client.BaseClient): + """A Socket.IO client for asyncio. + + This class implements a fully compliant Socket.IO web client with support + for websocket and long-polling transports. + + :param reconnection: ``True`` if the client should automatically attempt to + reconnect to the server after an interruption, or + ``False`` to not reconnect. The default is ``True``. + :param reconnection_attempts: How many reconnection attempts to issue + before giving up, or 0 for infinite attempts. + The default is 0. + :param reconnection_delay: How long to wait in seconds before the first + reconnection attempt. Each successive attempt + doubles this delay. + :param reconnection_delay_max: The maximum delay between reconnection + attempts. + :param randomization_factor: Randomization amount for each delay between + reconnection attempts. The default is 0.5, + which means that each delay is randomly + adjusted by +/- 50%. + :param logger: To enable logging set to ``True`` or pass a logger object to + use. To disable logging set to ``False``. The default is + ``False``. Note that fatal errors are logged even when + ``logger`` is ``False``. + :param json: An alternative json module to use for encoding and decoding + packets. Custom json modules must have ``dumps`` and ``loads`` + functions that are compatible with the standard library + versions. + :param handle_sigint: Set to ``True`` to automatically handle disconnection + when the process is interrupted, or to ``False`` to + leave interrupt handling to the calling application. + Interrupt handling can only be enabled when the + client instance is created in the main thread. + + The Engine.IO configuration supports the following settings: + + :param request_timeout: A timeout in seconds for requests. The default is + 5 seconds. + :param http_session: an initialized ``aiohttp.ClientSession`` object to be + used when sending requests to the server. Use it if + you need to add special client options such as proxy + servers, SSL certificates, custom CA bundle, etc. + :param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to + skip SSL certificate verification, allowing + connections to servers with self signed certificates. + The default is ``True``. + :param websocket_extra_options: Dictionary containing additional keyword + arguments passed to + ``websocket.create_connection()``. + :param engineio_logger: To enable Engine.IO logging set to ``True`` or pass + a logger object to use. To disable logging set to + ``False``. The default is ``False``. Note that + fatal errors are logged even when + ``engineio_logger`` is ``False``. + """ + def is_asyncio_based(self): + return True + + async def connect(self, url, headers={}, auth=None, transports=None, + namespaces=None, socketio_path='socket.io', wait=True, + wait_timeout=1, retry=False): + """Connect to a Socket.IO server. + + :param url: The URL of the Socket.IO server. It can include custom + query string parameters if required by the server. If a + function is provided, the client will invoke it to obtain + the URL each time a connection or reconnection is + attempted. + :param headers: A dictionary with custom headers to send with the + connection request. If a function is provided, the + client will invoke it to obtain the headers dictionary + each time a connection or reconnection is attempted. + :param auth: Authentication data passed to the server with the + connection request, normally a dictionary with one or + more string key/value pairs. If a function is provided, + the client will invoke it to obtain the authentication + data each time a connection or reconnection is attempted. + :param transports: The list of allowed transports. Valid transports + are ``'polling'`` and ``'websocket'``. If not + given, the polling transport is connected first, + then an upgrade to websocket is attempted. + :param namespaces: The namespaces to connect as a string or list of + strings. If not given, the namespaces that have + registered event handlers are connected. + :param socketio_path: The endpoint where the Socket.IO server is + installed. The default value is appropriate for + most cases. + :param wait: if set to ``True`` (the default) the call only returns + when all the namespaces are connected. If set to + ``False``, the call returns as soon as the Engine.IO + transport is connected, and the namespaces will connect + in the background. + :param wait_timeout: How long the client should wait for the + connection. The default is 1 second. This + argument is only considered when ``wait`` is set + to ``True``. + :param retry: Apply the reconnection logic if the initial connection + attempt fails. The default is ``False``. + + Note: this method is a coroutine. + + Example usage:: + + sio = socketio.AsyncClient() + await sio.connect('http://localhost:5000') + """ + if self.connected: + raise exceptions.ConnectionError('Already connected') + + self.connection_url = url + self.connection_headers = headers + self.connection_auth = auth + self.connection_transports = transports + self.connection_namespaces = namespaces + self.socketio_path = socketio_path + + if namespaces is None: + namespaces = list(set(self.handlers.keys()).union( + set(self.namespace_handlers.keys()))) + if '*' in namespaces: + namespaces.remove('*') + if len(namespaces) == 0: + namespaces = ['/'] + elif isinstance(namespaces, str): + namespaces = [namespaces] + self.connection_namespaces = namespaces + self.namespaces = {} + if self._connect_event is None: + self._connect_event = self.eio.create_event() + else: + self._connect_event.clear() + real_url = await self._get_real_value(self.connection_url) + real_headers = await self._get_real_value(self.connection_headers) + try: + await self.eio.connect(real_url, headers=real_headers, + transports=transports, + engineio_path=socketio_path) + except engineio.exceptions.ConnectionError as exc: + for n in self.connection_namespaces: + await self._trigger_event( + 'connect_error', n, + exc.args[1] if len(exc.args) > 1 else exc.args[0]) + if retry: # pragma: no cover + await self._handle_reconnect() + if self.eio.state == 'connected': + return + raise exceptions.ConnectionError(exc.args[0]) from None + + if wait: + try: + while True: + await asyncio.wait_for(self._connect_event.wait(), + wait_timeout) + self._connect_event.clear() + if set(self.namespaces) == set(self.connection_namespaces): + break + except asyncio.TimeoutError: + pass + if set(self.namespaces) != set(self.connection_namespaces): + await self.disconnect() + raise exceptions.ConnectionError( + 'One or more namespaces failed to connect') + + self.connected = True + + async def wait(self): + """Wait until the connection with the server ends. + + Client applications can use this function to block the main thread + during the life of the connection. + + Note: this method is a coroutine. + """ + while True: + await self.eio.wait() + await self.sleep(1) # give the reconnect task time to start up + if not self._reconnect_task: + if self.eio.state == 'connected': # pragma: no cover + # connected while sleeping above + continue + break + await self._reconnect_task + if self.eio.state != 'connected': + break + + async def emit(self, event, data=None, namespace=None, callback=None): + """Emit a custom event to the server. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the server. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param callback: If given, this function will be called to acknowledge + the server has received the message. The arguments + that will be passed to the function are those provided + by the server. + + Note: this method is not designed to be used concurrently. If multiple + tasks are emitting at the same time on the same client connection, then + messages composed of multiple packets may end up being sent in an + incorrect sequence. Use standard concurrency solutions (such as a Lock + object) to prevent this situation. + + Note 2: this method is a coroutine. + """ + namespace = namespace or '/' + if namespace not in self.namespaces: + raise exceptions.BadNamespaceError( + namespace + ' is not a connected namespace.') + self.logger.info('Emitting event "%s" [%s]', event, namespace) + if callback is not None: + id = self._generate_ack_id(namespace, callback) + else: + id = None + # tuples are expanded to multiple arguments, everything else is sent + # as a single argument + if isinstance(data, tuple): + data = list(data) + elif data is not None: + data = [data] + else: + data = [] + await self._send_packet(self.packet_class( + packet.EVENT, namespace=namespace, data=[event] + data, id=id)) + + async def send(self, data, namespace=None, callback=None): + """Send a message to the server. + + This function emits an event with the name ``'message'``. Use + :func:`emit` to issue custom event names. + + :param data: The data to send to the server. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param callback: If given, this function will be called to acknowledge + the server has received the message. The arguments + that will be passed to the function are those provided + by the server. + + Note: this method is a coroutine. + """ + await self.emit('message', data=data, namespace=namespace, + callback=callback) + + async def call(self, event, data=None, namespace=None, timeout=60): + """Emit a custom event to the server and wait for the response. + + This method issues an emit with a callback and waits for the callback + to be invoked before returning. If the callback isn't invoked before + the timeout, then a ``TimeoutError`` exception is raised. If the + Socket.IO connection drops during the wait, this method still waits + until the specified timeout. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the server. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param timeout: The waiting timeout. If the timeout is reached before + the server acknowledges the event, then a + ``TimeoutError`` exception is raised. + + Note: this method is not designed to be used concurrently. If multiple + tasks are emitting at the same time on the same client connection, then + messages composed of multiple packets may end up being sent in an + incorrect sequence. Use standard concurrency solutions (such as a Lock + object) to prevent this situation. + + Note 2: this method is a coroutine. + """ + callback_event = self.eio.create_event() + callback_args = [] + + def event_callback(*args): + callback_args.append(args) + callback_event.set() + + await self.emit(event, data=data, namespace=namespace, + callback=event_callback) + try: + await asyncio.wait_for(callback_event.wait(), timeout) + except asyncio.TimeoutError: + raise exceptions.TimeoutError() from None + return callback_args[0] if len(callback_args[0]) > 1 \ + else callback_args[0][0] if len(callback_args[0]) == 1 \ + else None + + async def disconnect(self): + """Disconnect from the server. + + Note: this method is a coroutine. + """ + # here we just request the disconnection + # later in _handle_eio_disconnect we invoke the disconnect handler + for n in self.namespaces: + await self._send_packet(self.packet_class(packet.DISCONNECT, + namespace=n)) + await self.eio.disconnect(abort=True) + + async def shutdown(self): + """Stop the client. + + If the client is connected to a server, it is disconnected. If the + client is attempting to reconnect to server, the reconnection attempts + are stopped. If the client is not connected to a server and is not + attempting to reconnect, then this function does nothing. + """ + if self.connected: + await self.disconnect() + elif self._reconnect_task: # pragma: no branch + self._reconnect_abort.set() + await self._reconnect_task + + def start_background_task(self, target, *args, **kwargs): + """Start a background task using the appropriate async model. + + This is a utility function that applications can use to start a + background task using the method that is compatible with the + selected async mode. + + :param target: the target function to execute. + :param args: arguments to pass to the function. + :param kwargs: keyword arguments to pass to the function. + + The return value is a ``asyncio.Task`` object. + """ + return self.eio.start_background_task(target, *args, **kwargs) + + async def sleep(self, seconds=0): + """Sleep for the requested amount of time using the appropriate async + model. + + This is a utility function that applications can use to put a task to + sleep without having to worry about using the correct call for the + selected async mode. + + Note: this method is a coroutine. + """ + return await self.eio.sleep(seconds) + + async def _get_real_value(self, value): + """Return the actual value, for parameters that can also be given as + callables.""" + if not callable(value): + return value + if asyncio.iscoroutinefunction(value): + return await value() + return value() + + async def _send_packet(self, pkt): + """Send a Socket.IO packet to the server.""" + encoded_packet = pkt.encode() + if isinstance(encoded_packet, list): + for ep in encoded_packet: + await self.eio.send(ep) + else: + await self.eio.send(encoded_packet) + + async def _handle_connect(self, namespace, data): + namespace = namespace or '/' + if namespace not in self.namespaces: + self.logger.info(f'Namespace {namespace} is connected') + self.namespaces[namespace] = (data or {}).get('sid', self.sid) + await self._trigger_event('connect', namespace=namespace) + self._connect_event.set() + + async def _handle_disconnect(self, namespace): + if not self.connected: + return + namespace = namespace or '/' + await self._trigger_event('disconnect', namespace, + self.reason.SERVER_DISCONNECT) + await self._trigger_event('__disconnect_final', namespace) + if namespace in self.namespaces: + del self.namespaces[namespace] + if not self.namespaces: + self.connected = False + await self.eio.disconnect(abort=True) + + async def _handle_event(self, namespace, id, data): + namespace = namespace or '/' + self.logger.info('Received event "%s" [%s]', data[0], namespace) + r = await self._trigger_event(data[0], namespace, *data[1:]) + if id is not None: + # send ACK packet with the response returned by the handler + # tuples are expanded as multiple arguments + if r is None: + data = [] + elif isinstance(r, tuple): + data = list(r) + else: + data = [r] + await self._send_packet(self.packet_class( + packet.ACK, namespace=namespace, id=id, data=data)) + + async def _handle_ack(self, namespace, id, data): + namespace = namespace or '/' + self.logger.info('Received ack [%s]', namespace) + callback = None + try: + callback = self.callbacks[namespace][id] + except KeyError: + # if we get an unknown callback we just ignore it + self.logger.warning('Unknown callback received, ignoring.') + else: + del self.callbacks[namespace][id] + if callback is not None: + if asyncio.iscoroutinefunction(callback): + await callback(*data) + else: + callback(*data) + + async def _handle_error(self, namespace, data): + namespace = namespace or '/' + self.logger.info('Connection to namespace {} was rejected'.format( + namespace)) + if data is None: + data = tuple() + elif not isinstance(data, (tuple, list)): + data = (data,) + await self._trigger_event('connect_error', namespace, *data) + self._connect_event.set() + if namespace in self.namespaces: + del self.namespaces[namespace] + if namespace == '/': + self.namespaces = {} + self.connected = False + + async def _trigger_event(self, event, namespace, *args): + """Invoke an application event handler.""" + # first see if we have an explicit handler for the event + handler, args = self._get_event_handler(event, namespace, args) + if handler: + if asyncio.iscoroutinefunction(handler): + try: + try: + ret = await handler(*args) + except TypeError: + # the legacy disconnect event does not take a reason + # argument + if event == 'disconnect': + ret = await handler(*args[:-1]) + else: # pragma: no cover + raise + except asyncio.CancelledError: # pragma: no cover + ret = None + else: + try: + ret = handler(*args) + except TypeError: + # the legacy disconnect event does not take a reason + # argument + if event == 'disconnect': + ret = handler(*args[:-1]) + else: # pragma: no cover + raise + return ret + + # or else, forward the event to a namepsace handler if one exists + handler, args = self._get_namespace_handler(namespace, args) + if handler: + return await handler.trigger_event(event, *args) + + async def _handle_reconnect(self): + if self._reconnect_abort is None: # pragma: no cover + self._reconnect_abort = self.eio.create_event() + self._reconnect_abort.clear() + base_client.reconnecting_clients.append(self) + attempt_count = 0 + current_delay = self.reconnection_delay + while True: + delay = current_delay + current_delay *= 2 + if delay > self.reconnection_delay_max: + delay = self.reconnection_delay_max + delay += self.randomization_factor * (2 * random.random() - 1) + self.logger.info( + 'Connection failed, new attempt in {:.02f} seconds'.format( + delay)) + abort = False + try: + await asyncio.wait_for(self._reconnect_abort.wait(), delay) + abort = True + except asyncio.TimeoutError: + pass + except asyncio.CancelledError: # pragma: no cover + abort = True + if abort: + self.logger.info('Reconnect task aborted') + for n in self.connection_namespaces: + await self._trigger_event('__disconnect_final', + namespace=n) + break + attempt_count += 1 + try: + await self.connect(self.connection_url, + headers=self.connection_headers, + auth=self.connection_auth, + transports=self.connection_transports, + namespaces=self.connection_namespaces, + socketio_path=self.socketio_path, + retry=False) + except (exceptions.ConnectionError, ValueError): + pass + else: + self.logger.info('Reconnection successful') + self._reconnect_task = None + break + if self.reconnection_attempts and \ + attempt_count >= self.reconnection_attempts: + self.logger.info( + 'Maximum reconnection attempts reached, giving up') + for n in self.connection_namespaces: + await self._trigger_event('__disconnect_final', + namespace=n) + break + base_client.reconnecting_clients.remove(self) + + async def _handle_eio_connect(self): + """Handle the Engine.IO connection event.""" + self.logger.info('Engine.IO connection established') + self.sid = self.eio.sid + real_auth = await self._get_real_value(self.connection_auth) or {} + for n in self.connection_namespaces: + await self._send_packet(self.packet_class( + packet.CONNECT, data=real_auth, namespace=n)) + + async def _handle_eio_message(self, data): + """Dispatch Engine.IO messages.""" + if self._binary_packet: + pkt = self._binary_packet + if pkt.add_attachment(data): + self._binary_packet = None + if pkt.packet_type == packet.BINARY_EVENT: + await self._handle_event(pkt.namespace, pkt.id, pkt.data) + else: + await self._handle_ack(pkt.namespace, pkt.id, pkt.data) + else: + pkt = self.packet_class(encoded_packet=data) + if pkt.packet_type == packet.CONNECT: + await self._handle_connect(pkt.namespace, pkt.data) + elif pkt.packet_type == packet.DISCONNECT: + await self._handle_disconnect(pkt.namespace) + elif pkt.packet_type == packet.EVENT: + await self._handle_event(pkt.namespace, pkt.id, pkt.data) + elif pkt.packet_type == packet.ACK: + await self._handle_ack(pkt.namespace, pkt.id, pkt.data) + elif pkt.packet_type == packet.BINARY_EVENT or \ + pkt.packet_type == packet.BINARY_ACK: + self._binary_packet = pkt + elif pkt.packet_type == packet.CONNECT_ERROR: + await self._handle_error(pkt.namespace, pkt.data) + else: + raise ValueError('Unknown packet type.') + + async def _handle_eio_disconnect(self, reason): + """Handle the Engine.IO disconnection event.""" + self.logger.info('Engine.IO connection dropped') + will_reconnect = self.reconnection and self.eio.state == 'connected' + if self.connected: + for n in self.namespaces: + await self._trigger_event('disconnect', n, reason) + if not will_reconnect: + await self._trigger_event('__disconnect_final', n) + self.namespaces = {} + self.connected = False + self.callbacks = {} + self._binary_packet = None + self.sid = None + if will_reconnect and not self._reconnect_task: + self._reconnect_task = self.start_background_task( + self._handle_reconnect) + + def _engineio_client_class(self): + return engineio.AsyncClient diff --git a/env/lib/python3.10/site-packages/socketio/async_manager.py b/env/lib/python3.10/site-packages/socketio/async_manager.py new file mode 100644 index 0000000..47e7a79 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/async_manager.py @@ -0,0 +1,120 @@ +import asyncio + +from engineio import packet as eio_packet +from socketio import packet +from .base_manager import BaseManager + + +class AsyncManager(BaseManager): + """Manage a client list for an asyncio server.""" + async def can_disconnect(self, sid, namespace): + return self.is_connected(sid, namespace) + + async def emit(self, event, data, namespace, room=None, skip_sid=None, + callback=None, to=None, **kwargs): + """Emit a message to a single client, a room, or all the clients + connected to the namespace. + + Note: this method is a coroutine. + """ + room = to or room + if namespace not in self.rooms: + return + if isinstance(data, tuple): + # tuples are expanded to multiple arguments, everything else is + # sent as a single argument + data = list(data) + elif data is not None: + data = [data] + else: + data = [] + if not isinstance(skip_sid, list): + skip_sid = [skip_sid] + tasks = [] + if not callback: + # when callbacks aren't used the packets sent to each recipient are + # identical, so they can be generated once and reused + pkt = self.server.packet_class( + packet.EVENT, namespace=namespace, data=[event] + data) + encoded_packet = pkt.encode() + if not isinstance(encoded_packet, list): + encoded_packet = [encoded_packet] + eio_pkt = [eio_packet.Packet(eio_packet.MESSAGE, p) + for p in encoded_packet] + for sid, eio_sid in self.get_participants(namespace, room): + if sid not in skip_sid: + for p in eio_pkt: + tasks.append(asyncio.create_task( + self.server._send_eio_packet(eio_sid, p))) + else: + # callbacks are used, so each recipient must be sent a packet that + # contains a unique callback id + # note that callbacks when addressing a group of people are + # implemented but not tested or supported + for sid, eio_sid in self.get_participants(namespace, room): + if sid not in skip_sid: # pragma: no branch + id = self._generate_ack_id(sid, callback) + pkt = self.server.packet_class( + packet.EVENT, namespace=namespace, data=[event] + data, + id=id) + tasks.append(asyncio.create_task( + self.server._send_packet(eio_sid, pkt))) + if tasks == []: # pragma: no cover + return + await asyncio.wait(tasks) + + async def connect(self, eio_sid, namespace): + """Register a client connection to a namespace. + + Note: this method is a coroutine. + """ + return super().connect(eio_sid, namespace) + + async def disconnect(self, sid, namespace, **kwargs): + """Disconnect a client. + + Note: this method is a coroutine. + """ + return self.basic_disconnect(sid, namespace, **kwargs) + + async def enter_room(self, sid, namespace, room, eio_sid=None): + """Add a client to a room. + + Note: this method is a coroutine. + """ + return self.basic_enter_room(sid, namespace, room, eio_sid=eio_sid) + + async def leave_room(self, sid, namespace, room): + """Remove a client from a room. + + Note: this method is a coroutine. + """ + return self.basic_leave_room(sid, namespace, room) + + async def close_room(self, room, namespace): + """Remove all participants from a room. + + Note: this method is a coroutine. + """ + return self.basic_close_room(room, namespace) + + async def trigger_callback(self, sid, id, data): + """Invoke an application callback. + + Note: this method is a coroutine. + """ + callback = None + try: + callback = self.callbacks[sid][id] + except KeyError: + # if we get an unknown callback we just ignore it + self._get_logger().warning('Unknown callback received, ignoring.') + else: + del self.callbacks[sid][id] + if callback is not None: + ret = callback(*data) + if asyncio.iscoroutine(ret): + try: + await ret + except asyncio.CancelledError: # pragma: no cover + pass diff --git a/env/lib/python3.10/site-packages/socketio/async_namespace.py b/env/lib/python3.10/site-packages/socketio/async_namespace.py new file mode 100644 index 0000000..42d6508 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/async_namespace.py @@ -0,0 +1,287 @@ +import asyncio + +from socketio import base_namespace + + +class AsyncNamespace(base_namespace.BaseServerNamespace): + """Base class for asyncio server-side class-based namespaces. + + A class-based namespace is a class that contains all the event handlers + for a Socket.IO namespace. The event handlers are methods of the class + with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``, + ``on_message``, ``on_json``, and so on. These can be regular functions or + coroutines. + + :param namespace: The Socket.IO namespace to be used with all the event + handlers defined in this class. If this argument is + omitted, the default namespace is used. + """ + def is_asyncio_based(self): + return True + + async def trigger_event(self, event, *args): + """Dispatch an event to the proper handler method. + + In the most common usage, this method is not overloaded by subclasses, + as it performs the routing of events to methods. However, this + method can be overridden if special dispatching rules are needed, or if + having a single method that catches all events is desired. + + Note: this method is a coroutine. + """ + handler_name = 'on_' + (event or '') + if hasattr(self, handler_name): + handler = getattr(self, handler_name) + if asyncio.iscoroutinefunction(handler) is True: + try: + try: + ret = await handler(*args) + except TypeError: + # legacy disconnect events do not have a reason + # argument + if event == 'disconnect': + ret = await handler(*args[:-1]) + else: # pragma: no cover + raise + except asyncio.CancelledError: # pragma: no cover + ret = None + else: + try: + ret = handler(*args) + except TypeError: + # legacy disconnect events do not have a reason + # argument + if event == 'disconnect': + ret = handler(*args[:-1]) + else: # pragma: no cover + raise + return ret + + async def emit(self, event, data=None, to=None, room=None, skip_sid=None, + namespace=None, callback=None, ignore_queue=False): + """Emit a custom event to one or more connected clients. + + The only difference with the :func:`socketio.Server.emit` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + + Note: this method is a coroutine. + """ + return await self.server.emit(event, data=data, to=to, room=room, + skip_sid=skip_sid, + namespace=namespace or self.namespace, + callback=callback, + ignore_queue=ignore_queue) + + async def send(self, data, to=None, room=None, skip_sid=None, + namespace=None, callback=None, ignore_queue=False): + """Send a message to one or more connected clients. + + The only difference with the :func:`socketio.Server.send` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + + Note: this method is a coroutine. + """ + return await self.server.send(data, to=to, room=room, + skip_sid=skip_sid, + namespace=namespace or self.namespace, + callback=callback, + ignore_queue=ignore_queue) + + async def call(self, event, data=None, to=None, sid=None, namespace=None, + timeout=None, ignore_queue=False): + """Emit a custom event to a client and wait for the response. + + The only difference with the :func:`socketio.Server.call` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return await self.server.call(event, data=data, to=to, sid=sid, + namespace=namespace or self.namespace, + timeout=timeout, + ignore_queue=ignore_queue) + + async def enter_room(self, sid, room, namespace=None): + """Enter a room. + + The only difference with the :func:`socketio.Server.enter_room` method + is that when the ``namespace`` argument is not given the namespace + associated with the class is used. + + Note: this method is a coroutine. + """ + return await self.server.enter_room( + sid, room, namespace=namespace or self.namespace) + + async def leave_room(self, sid, room, namespace=None): + """Leave a room. + + The only difference with the :func:`socketio.Server.leave_room` method + is that when the ``namespace`` argument is not given the namespace + associated with the class is used. + + Note: this method is a coroutine. + """ + return await self.server.leave_room( + sid, room, namespace=namespace or self.namespace) + + async def close_room(self, room, namespace=None): + """Close a room. + + The only difference with the :func:`socketio.Server.close_room` method + is that when the ``namespace`` argument is not given the namespace + associated with the class is used. + + Note: this method is a coroutine. + """ + return await self.server.close_room( + room, namespace=namespace or self.namespace) + + async def get_session(self, sid, namespace=None): + """Return the user session for a client. + + The only difference with the :func:`socketio.Server.get_session` + method is that when the ``namespace`` argument is not given the + namespace associated with the class is used. + + Note: this method is a coroutine. + """ + return await self.server.get_session( + sid, namespace=namespace or self.namespace) + + async def save_session(self, sid, session, namespace=None): + """Store the user session for a client. + + The only difference with the :func:`socketio.Server.save_session` + method is that when the ``namespace`` argument is not given the + namespace associated with the class is used. + + Note: this method is a coroutine. + """ + return await self.server.save_session( + sid, session, namespace=namespace or self.namespace) + + def session(self, sid, namespace=None): + """Return the user session for a client with context manager syntax. + + The only difference with the :func:`socketio.Server.session` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.server.session(sid, namespace=namespace or self.namespace) + + async def disconnect(self, sid, namespace=None): + """Disconnect a client. + + The only difference with the :func:`socketio.Server.disconnect` method + is that when the ``namespace`` argument is not given the namespace + associated with the class is used. + + Note: this method is a coroutine. + """ + return await self.server.disconnect( + sid, namespace=namespace or self.namespace) + + +class AsyncClientNamespace(base_namespace.BaseClientNamespace): + """Base class for asyncio client-side class-based namespaces. + + A class-based namespace is a class that contains all the event handlers + for a Socket.IO namespace. The event handlers are methods of the class + with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``, + ``on_message``, ``on_json``, and so on. These can be regular functions or + coroutines. + + :param namespace: The Socket.IO namespace to be used with all the event + handlers defined in this class. If this argument is + omitted, the default namespace is used. + """ + def is_asyncio_based(self): + return True + + async def trigger_event(self, event, *args): + """Dispatch an event to the proper handler method. + + In the most common usage, this method is not overloaded by subclasses, + as it performs the routing of events to methods. However, this + method can be overridden if special dispatching rules are needed, or if + having a single method that catches all events is desired. + + Note: this method is a coroutine. + """ + handler_name = 'on_' + (event or '') + if hasattr(self, handler_name): + handler = getattr(self, handler_name) + if asyncio.iscoroutinefunction(handler) is True: + try: + try: + ret = await handler(*args) + except TypeError: + # legacy disconnect events do not have a reason + # argument + if event == 'disconnect': + ret = await handler(*args[:-1]) + else: # pragma: no cover + raise + except asyncio.CancelledError: # pragma: no cover + ret = None + else: + try: + ret = handler(*args) + except TypeError: + # legacy disconnect events do not have a reason + # argument + if event == 'disconnect': + ret = handler(*args[:-1]) + else: # pragma: no cover + raise + return ret + + async def emit(self, event, data=None, namespace=None, callback=None): + """Emit a custom event to the server. + + The only difference with the :func:`socketio.Client.emit` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + + Note: this method is a coroutine. + """ + return await self.client.emit(event, data=data, + namespace=namespace or self.namespace, + callback=callback) + + async def send(self, data, namespace=None, callback=None): + """Send a message to the server. + + The only difference with the :func:`socketio.Client.send` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + + Note: this method is a coroutine. + """ + return await self.client.send(data, + namespace=namespace or self.namespace, + callback=callback) + + async def call(self, event, data=None, namespace=None, timeout=None): + """Emit a custom event to the server and wait for the response. + + The only difference with the :func:`socketio.Client.call` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return await self.client.call(event, data=data, + namespace=namespace or self.namespace, + timeout=timeout) + + async def disconnect(self): + """Disconnect a client. + + The only difference with the :func:`socketio.Client.disconnect` method + is that when the ``namespace`` argument is not given the namespace + associated with the class is used. + + Note: this method is a coroutine. + """ + return await self.client.disconnect() diff --git a/env/lib/python3.10/site-packages/socketio/async_pubsub_manager.py b/env/lib/python3.10/site-packages/socketio/async_pubsub_manager.py new file mode 100644 index 0000000..72946eb --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/async_pubsub_manager.py @@ -0,0 +1,243 @@ +import asyncio +from functools import partial +import uuid + +from engineio import json +import pickle + +from .async_manager import AsyncManager + + +class AsyncPubSubManager(AsyncManager): + """Manage a client list attached to a pub/sub backend under asyncio. + + This is a base class that enables multiple servers to share the list of + clients, with the servers communicating events through a pub/sub backend. + The use of a pub/sub backend also allows any client connected to the + backend to emit events addressed to Socket.IO clients. + + The actual backends must be implemented by subclasses, this class only + provides a pub/sub generic framework for asyncio applications. + + :param channel: The channel name on which the server sends and receives + notifications. + """ + name = 'asyncpubsub' + + def __init__(self, channel='socketio', write_only=False, logger=None): + super().__init__() + self.channel = channel + self.write_only = write_only + self.host_id = uuid.uuid4().hex + self.logger = logger + + def initialize(self): + super().initialize() + if not self.write_only: + self.thread = self.server.start_background_task(self._thread) + self._get_logger().info(self.name + ' backend initialized.') + + async def emit(self, event, data, namespace=None, room=None, skip_sid=None, + callback=None, to=None, **kwargs): + """Emit a message to a single client, a room, or all the clients + connected to the namespace. + + This method takes care or propagating the message to all the servers + that are connected through the message queue. + + The parameters are the same as in :meth:`.Server.emit`. + + Note: this method is a coroutine. + """ + room = to or room + if kwargs.get('ignore_queue'): + return await super().emit( + event, data, namespace=namespace, room=room, skip_sid=skip_sid, + callback=callback) + namespace = namespace or '/' + if callback is not None: + if self.server is None: + raise RuntimeError('Callbacks can only be issued from the ' + 'context of a server.') + if room is None: + raise ValueError('Cannot use callback without a room set.') + id = self._generate_ack_id(room, callback) + callback = (room, namespace, id) + else: + callback = None + message = {'method': 'emit', 'event': event, 'data': data, + 'namespace': namespace, 'room': room, + 'skip_sid': skip_sid, 'callback': callback, + 'host_id': self.host_id} + await self._handle_emit(message) # handle in this host + await self._publish(message) # notify other hosts + + async def can_disconnect(self, sid, namespace): + if self.is_connected(sid, namespace): + # client is in this server, so we can disconnect directly + return await super().can_disconnect(sid, namespace) + else: + # client is in another server, so we post request to the queue + await self._publish({'method': 'disconnect', 'sid': sid, + 'namespace': namespace or '/', + 'host_id': self.host_id}) + + async def disconnect(self, sid, namespace, **kwargs): + if kwargs.get('ignore_queue'): + return await super().disconnect( + sid, namespace=namespace) + message = {'method': 'disconnect', 'sid': sid, + 'namespace': namespace or '/', 'host_id': self.host_id} + await self._handle_disconnect(message) # handle in this host + await self._publish(message) # notify other hosts + + async def enter_room(self, sid, namespace, room, eio_sid=None): + if self.is_connected(sid, namespace): + # client is in this server, so we can disconnect directly + return await super().enter_room(sid, namespace, room, + eio_sid=eio_sid) + else: + message = {'method': 'enter_room', 'sid': sid, 'room': room, + 'namespace': namespace or '/', 'host_id': self.host_id} + await self._publish(message) # notify other hosts + + async def leave_room(self, sid, namespace, room): + if self.is_connected(sid, namespace): + # client is in this server, so we can disconnect directly + return await super().leave_room(sid, namespace, room) + else: + message = {'method': 'leave_room', 'sid': sid, 'room': room, + 'namespace': namespace or '/', 'host_id': self.host_id} + await self._publish(message) # notify other hosts + + async def close_room(self, room, namespace=None): + message = {'method': 'close_room', 'room': room, + 'namespace': namespace or '/', 'host_id': self.host_id} + await self._handle_close_room(message) # handle in this host + await self._publish(message) # notify other hosts + + async def _publish(self, data): + """Publish a message on the Socket.IO channel. + + This method needs to be implemented by the different subclasses that + support pub/sub backends. + """ + raise NotImplementedError('This method must be implemented in a ' + 'subclass.') # pragma: no cover + + async def _listen(self): + """Return the next message published on the Socket.IO channel, + blocking until a message is available. + + This method needs to be implemented by the different subclasses that + support pub/sub backends. + """ + raise NotImplementedError('This method must be implemented in a ' + 'subclass.') # pragma: no cover + + async def _handle_emit(self, message): + # Events with callbacks are very tricky to handle across hosts + # Here in the receiving end we set up a local callback that preserves + # the callback host and id from the sender + remote_callback = message.get('callback') + remote_host_id = message.get('host_id') + if remote_callback is not None and len(remote_callback) == 3: + callback = partial(self._return_callback, remote_host_id, + *remote_callback) + else: + callback = None + await super().emit(message['event'], message['data'], + namespace=message.get('namespace'), + room=message.get('room'), + skip_sid=message.get('skip_sid'), + callback=callback) + + async def _handle_callback(self, message): + if self.host_id == message.get('host_id'): + try: + sid = message['sid'] + id = message['id'] + args = message['args'] + except KeyError: + return + await self.trigger_callback(sid, id, args) + + async def _return_callback(self, host_id, sid, namespace, callback_id, + *args): + # When an event callback is received, the callback is returned back + # the sender, which is identified by the host_id + if host_id == self.host_id: + await self.trigger_callback(sid, callback_id, args) + else: + await self._publish({'method': 'callback', 'host_id': host_id, + 'sid': sid, 'namespace': namespace, + 'id': callback_id, 'args': args}) + + async def _handle_disconnect(self, message): + await self.server.disconnect(sid=message.get('sid'), + namespace=message.get('namespace'), + ignore_queue=True) + + async def _handle_enter_room(self, message): + sid = message.get('sid') + namespace = message.get('namespace') + if self.is_connected(sid, namespace): + await super().enter_room(sid, namespace, message.get('room')) + + async def _handle_leave_room(self, message): + sid = message.get('sid') + namespace = message.get('namespace') + if self.is_connected(sid, namespace): + await super().leave_room(sid, namespace, message.get('room')) + + async def _handle_close_room(self, message): + await super().close_room(room=message.get('room'), + namespace=message.get('namespace')) + + async def _thread(self): + while True: + try: + async for message in self._listen(): # pragma: no branch + data = None + if isinstance(message, dict): + data = message + else: + if isinstance(message, bytes): # pragma: no cover + try: + data = pickle.loads(message) + except: + pass + if data is None: + try: + data = json.loads(message) + except: + pass + if data and 'method' in data: + self._get_logger().debug('pubsub message: {}'.format( + data['method'])) + try: + if data['method'] == 'callback': + await self._handle_callback(data) + elif data.get('host_id') != self.host_id: + if data['method'] == 'emit': + await self._handle_emit(data) + elif data['method'] == 'disconnect': + await self._handle_disconnect(data) + elif data['method'] == 'enter_room': + await self._handle_enter_room(data) + elif data['method'] == 'leave_room': + await self._handle_leave_room(data) + elif data['method'] == 'close_room': + await self._handle_close_room(data) + except asyncio.CancelledError: + raise # let the outer try/except handle it + except Exception: + self.server.logger.exception( + 'Handler error in pubsub listening thread') + self.server.logger.error('pubsub listen() exited unexpectedly') + break # loop should never exit except in unit tests! + except asyncio.CancelledError: # pragma: no cover + break + except Exception: # pragma: no cover + self.server.logger.exception('Unexpected Error in pubsub ' + 'listening thread') diff --git a/env/lib/python3.10/site-packages/socketio/async_redis_manager.py b/env/lib/python3.10/site-packages/socketio/async_redis_manager.py new file mode 100644 index 0000000..e039c6e --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/async_redis_manager.py @@ -0,0 +1,107 @@ +import asyncio +import pickle + +try: # pragma: no cover + from redis import asyncio as aioredis + from redis.exceptions import RedisError +except ImportError: # pragma: no cover + try: + import aioredis + from aioredis.exceptions import RedisError + except ImportError: + aioredis = None + RedisError = None + +from .async_pubsub_manager import AsyncPubSubManager + + +class AsyncRedisManager(AsyncPubSubManager): # pragma: no cover + """Redis based client manager for asyncio servers. + + This class implements a Redis backend for event sharing across multiple + processes. + + To use a Redis backend, initialize the :class:`AsyncServer` instance as + follows:: + + url = 'redis://hostname:port/0' + server = socketio.AsyncServer( + client_manager=socketio.AsyncRedisManager(url)) + + :param url: The connection URL for the Redis server. For a default Redis + store running on the same host, use ``redis://``. To use an + SSL connection, use ``rediss://``. + :param channel: The channel name on which the server sends and receives + notifications. Must be the same in all the servers. + :param write_only: If set to ``True``, only initialize to emit events. The + default of ``False`` initializes the class for emitting + and receiving. + :param redis_options: additional keyword arguments to be passed to + ``aioredis.from_url()``. + """ + name = 'aioredis' + + def __init__(self, url='redis://localhost:6379/0', channel='socketio', + write_only=False, logger=None, redis_options=None): + if aioredis is None: + raise RuntimeError('Redis package is not installed ' + '(Run "pip install redis" in your virtualenv).') + if not hasattr(aioredis.Redis, 'from_url'): + raise RuntimeError('Version 2 of aioredis package is required.') + self.redis_url = url + self.redis_options = redis_options or {} + self._redis_connect() + super().__init__(channel=channel, write_only=write_only, logger=logger) + + def _redis_connect(self): + self.redis = aioredis.Redis.from_url(self.redis_url, + **self.redis_options) + self.pubsub = self.redis.pubsub(ignore_subscribe_messages=True) + + async def _publish(self, data): + retry = True + while True: + try: + if not retry: + self._redis_connect() + return await self.redis.publish( + self.channel, pickle.dumps(data)) + except RedisError: + if retry: + self._get_logger().error('Cannot publish to redis... ' + 'retrying') + retry = False + else: + self._get_logger().error('Cannot publish to redis... ' + 'giving up') + break + + async def _redis_listen_with_retries(self): + retry_sleep = 1 + connect = False + while True: + try: + if connect: + self._redis_connect() + await self.pubsub.subscribe(self.channel) + retry_sleep = 1 + async for message in self.pubsub.listen(): + yield message + except RedisError: + self._get_logger().error('Cannot receive from redis... ' + 'retrying in ' + '{} secs'.format(retry_sleep)) + connect = True + await asyncio.sleep(retry_sleep) + retry_sleep *= 2 + if retry_sleep > 60: + retry_sleep = 60 + + async def _listen(self): + channel = self.channel.encode('utf-8') + await self.pubsub.subscribe(self.channel) + async for message in self._redis_listen_with_retries(): + if message['channel'] == channel and \ + message['type'] == 'message' and 'data' in message: + yield message['data'] + await self.pubsub.unsubscribe(self.channel) diff --git a/env/lib/python3.10/site-packages/socketio/async_server.py b/env/lib/python3.10/site-packages/socketio/async_server.py new file mode 100644 index 0000000..f10fb8a --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/async_server.py @@ -0,0 +1,714 @@ +import asyncio + +import engineio + +from . import async_manager +from . import base_server +from . import exceptions +from . import packet + +# this set is used to keep references to background tasks to prevent them from +# being garbage collected mid-execution. Solution taken from +# https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task +task_reference_holder = set() + + +class AsyncServer(base_server.BaseServer): + """A Socket.IO server for asyncio. + + This class implements a fully compliant Socket.IO web server with support + for websocket and long-polling transports, compatible with the asyncio + framework. + + :param client_manager: The client manager instance that will manage the + client list. When this is omitted, the client list + is stored in an in-memory structure, so the use of + multiple connected servers is not possible. + :param logger: To enable logging set to ``True`` or pass a logger object to + use. To disable logging set to ``False``. Note that fatal + errors are logged even when ``logger`` is ``False``. + :param json: An alternative json module to use for encoding and decoding + packets. Custom json modules must have ``dumps`` and ``loads`` + functions that are compatible with the standard library + versions. + :param async_handlers: If set to ``True``, event handlers for a client are + executed in separate threads. To run handlers for a + client synchronously, set to ``False``. The default + is ``True``. + :param always_connect: When set to ``False``, new connections are + provisory until the connect handler returns + something other than ``False``, at which point they + are accepted. When set to ``True``, connections are + immediately accepted, and then if the connect + handler returns ``False`` a disconnect is issued. + Set to ``True`` if you need to emit events from the + connect handler and your client is confused when it + receives events before the connection acceptance. + In any other case use the default of ``False``. + :param namespaces: a list of namespaces that are accepted, in addition to + any namespaces for which handlers have been defined. The + default is `['/']`, which always accepts connections to + the default namespace. Set to `'*'` to accept all + namespaces. + :param kwargs: Connection parameters for the underlying Engine.IO server. + + The Engine.IO configuration supports the following settings: + + :param async_mode: The asynchronous model to use. See the Deployment + section in the documentation for a description of the + available options. Valid async modes are "aiohttp", + "sanic", "tornado" and "asgi". If this argument is not + given, "aiohttp" is tried first, followed by "sanic", + "tornado", and finally "asgi". The first async mode that + has all its dependencies installed is the one that is + chosen. + :param ping_interval: The interval in seconds at which the server pings + the client. The default is 25 seconds. For advanced + control, a two element tuple can be given, where + the first number is the ping interval and the second + is a grace period added by the server. + :param ping_timeout: The time in seconds that the client waits for the + server to respond before disconnecting. The default + is 20 seconds. + :param max_http_buffer_size: The maximum size that is accepted for incoming + messages. The default is 1,000,000 bytes. In + spite of its name, the value set in this + argument is enforced for HTTP long-polling and + WebSocket connections. + :param allow_upgrades: Whether to allow transport upgrades or not. The + default is ``True``. + :param http_compression: Whether to compress packages when using the + polling transport. The default is ``True``. + :param compression_threshold: Only compress messages when their byte size + is greater than this value. The default is + 1024 bytes. + :param cookie: If set to a string, it is the name of the HTTP cookie the + server sends back to the client containing the client + session id. If set to a dictionary, the ``'name'`` key + contains the cookie name and other keys define cookie + attributes, where the value of each attribute can be a + string, a callable with no arguments, or a boolean. If set + to ``None`` (the default), a cookie is not sent to the + client. + :param cors_allowed_origins: Origin or list of origins that are allowed to + connect to this server. Only the same origin + is allowed by default. Set this argument to + ``'*'`` to allow all origins, or to ``[]`` to + disable CORS handling. + :param cors_credentials: Whether credentials (cookies, authentication) are + allowed in requests to this server. The default is + ``True``. + :param monitor_clients: If set to ``True``, a background task will ensure + inactive clients are closed. Set to ``False`` to + disable the monitoring task (not recommended). The + default is ``True``. + :param transports: The list of allowed transports. Valid transports + are ``'polling'`` and ``'websocket'``. Defaults to + ``['polling', 'websocket']``. + :param engineio_logger: To enable Engine.IO logging set to ``True`` or pass + a logger object to use. To disable logging set to + ``False``. The default is ``False``. Note that + fatal errors are logged even when + ``engineio_logger`` is ``False``. + """ + def __init__(self, client_manager=None, logger=False, json=None, + async_handlers=True, namespaces=None, **kwargs): + if client_manager is None: + client_manager = async_manager.AsyncManager() + super().__init__(client_manager=client_manager, logger=logger, + json=json, async_handlers=async_handlers, + namespaces=namespaces, **kwargs) + + def is_asyncio_based(self): + return True + + def attach(self, app, socketio_path='socket.io'): + """Attach the Socket.IO server to an application.""" + self.eio.attach(app, socketio_path) + + async def emit(self, event, data=None, to=None, room=None, skip_sid=None, + namespace=None, callback=None, ignore_queue=False): + """Emit a custom event to one or more connected clients. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the client or clients. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param to: The recipient of the message. This can be set to the + session ID of a client to address only that client, to any + any custom room created by the application to address all + the clients in that room, or to a list of custom room + names. If this argument is omitted the event is broadcasted + to all connected clients. + :param room: Alias for the ``to`` parameter. + :param skip_sid: The session ID of a client to skip when broadcasting + to a room or to all clients. This can be used to + prevent a message from being sent to the sender. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param callback: If given, this function will be called to acknowledge + the client has received the message. The arguments + that will be passed to the function are those provided + by the client. Callback functions can only be used + when addressing an individual client. + :param ignore_queue: Only used when a message queue is configured. If + set to ``True``, the event is emitted to the + clients directly, without going through the queue. + This is more efficient, but only works when a + single server process is used. It is recommended + to always leave this parameter with its default + value of ``False``. + + Note: this method is not designed to be used concurrently. If multiple + tasks are emitting at the same time to the same client connection, then + messages composed of multiple packets may end up being sent in an + incorrect sequence. Use standard concurrency solutions (such as a Lock + object) to prevent this situation. + + Note 2: this method is a coroutine. + """ + namespace = namespace or '/' + room = to or room + self.logger.info('emitting event "%s" to %s [%s]', event, + room or 'all', namespace) + await self.manager.emit(event, data, namespace, room=room, + skip_sid=skip_sid, callback=callback, + ignore_queue=ignore_queue) + + async def send(self, data, to=None, room=None, skip_sid=None, + namespace=None, callback=None, ignore_queue=False): + """Send a message to one or more connected clients. + + This function emits an event with the name ``'message'``. Use + :func:`emit` to issue custom event names. + + :param data: The data to send to the client or clients. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param to: The recipient of the message. This can be set to the + session ID of a client to address only that client, to any + any custom room created by the application to address all + the clients in that room, or to a list of custom room + names. If this argument is omitted the event is broadcasted + to all connected clients. + :param room: Alias for the ``to`` parameter. + :param skip_sid: The session ID of a client to skip when broadcasting + to a room or to all clients. This can be used to + prevent a message from being sent to the sender. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param callback: If given, this function will be called to acknowledge + the client has received the message. The arguments + that will be passed to the function are those provided + by the client. Callback functions can only be used + when addressing an individual client. + :param ignore_queue: Only used when a message queue is configured. If + set to ``True``, the event is emitted to the + clients directly, without going through the queue. + This is more efficient, but only works when a + single server process is used. It is recommended + to always leave this parameter with its default + value of ``False``. + + Note: this method is a coroutine. + """ + await self.emit('message', data=data, to=to, room=room, + skip_sid=skip_sid, namespace=namespace, + callback=callback, ignore_queue=ignore_queue) + + async def call(self, event, data=None, to=None, sid=None, namespace=None, + timeout=60, ignore_queue=False): + """Emit a custom event to a client and wait for the response. + + This method issues an emit with a callback and waits for the callback + to be invoked before returning. If the callback isn't invoked before + the timeout, then a ``TimeoutError`` exception is raised. If the + Socket.IO connection drops during the wait, this method still waits + until the specified timeout. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the client or clients. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param to: The session ID of the recipient client. + :param sid: Alias for the ``to`` parameter. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param timeout: The waiting timeout. If the timeout is reached before + the client acknowledges the event, then a + ``TimeoutError`` exception is raised. + :param ignore_queue: Only used when a message queue is configured. If + set to ``True``, the event is emitted to the + client directly, without going through the queue. + This is more efficient, but only works when a + single server process is used. It is recommended + to always leave this parameter with its default + value of ``False``. + + Note: this method is not designed to be used concurrently. If multiple + tasks are emitting at the same time to the same client connection, then + messages composed of multiple packets may end up being sent in an + incorrect sequence. Use standard concurrency solutions (such as a Lock + object) to prevent this situation. + + Note 2: this method is a coroutine. + """ + if to is None and sid is None: + raise ValueError('Cannot use call() to broadcast.') + if not self.async_handlers: + raise RuntimeError( + 'Cannot use call() when async_handlers is False.') + callback_event = self.eio.create_event() + callback_args = [] + + def event_callback(*args): + callback_args.append(args) + callback_event.set() + + await self.emit(event, data=data, room=to or sid, namespace=namespace, + callback=event_callback, ignore_queue=ignore_queue) + try: + await asyncio.wait_for(callback_event.wait(), timeout) + except asyncio.TimeoutError: + raise exceptions.TimeoutError() from None + return callback_args[0] if len(callback_args[0]) > 1 \ + else callback_args[0][0] if len(callback_args[0]) == 1 \ + else None + + async def enter_room(self, sid, room, namespace=None): + """Enter a room. + + This function adds the client to a room. The :func:`emit` and + :func:`send` functions can optionally broadcast events to all the + clients in a room. + + :param sid: Session ID of the client. + :param room: Room name. If the room does not exist it is created. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the default namespace is used. + + Note: this method is a coroutine. + """ + namespace = namespace or '/' + self.logger.info('%s is entering room %s [%s]', sid, room, namespace) + await self.manager.enter_room(sid, namespace, room) + + async def leave_room(self, sid, room, namespace=None): + """Leave a room. + + This function removes the client from a room. + + :param sid: Session ID of the client. + :param room: Room name. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the default namespace is used. + + Note: this method is a coroutine. + """ + namespace = namespace or '/' + self.logger.info('%s is leaving room %s [%s]', sid, room, namespace) + await self.manager.leave_room(sid, namespace, room) + + async def close_room(self, room, namespace=None): + """Close a room. + + This function removes all the clients from the given room. + + :param room: Room name. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the default namespace is used. + + Note: this method is a coroutine. + """ + namespace = namespace or '/' + self.logger.info('room %s is closing [%s]', room, namespace) + await self.manager.close_room(room, namespace) + + async def get_session(self, sid, namespace=None): + """Return the user session for a client. + + :param sid: The session id of the client. + :param namespace: The Socket.IO namespace. If this argument is omitted + the default namespace is used. + + The return value is a dictionary. Modifications made to this + dictionary are not guaranteed to be preserved. If you want to modify + the user session, use the ``session`` context manager instead. + """ + namespace = namespace or '/' + eio_sid = self.manager.eio_sid_from_sid(sid, namespace) + eio_session = await self.eio.get_session(eio_sid) + return eio_session.setdefault(namespace, {}) + + async def save_session(self, sid, session, namespace=None): + """Store the user session for a client. + + :param sid: The session id of the client. + :param session: The session dictionary. + :param namespace: The Socket.IO namespace. If this argument is omitted + the default namespace is used. + """ + namespace = namespace or '/' + eio_sid = self.manager.eio_sid_from_sid(sid, namespace) + eio_session = await self.eio.get_session(eio_sid) + eio_session[namespace] = session + + def session(self, sid, namespace=None): + """Return the user session for a client with context manager syntax. + + :param sid: The session id of the client. + + This is a context manager that returns the user session dictionary for + the client. Any changes that are made to this dictionary inside the + context manager block are saved back to the session. Example usage:: + + @eio.on('connect') + def on_connect(sid, environ): + username = authenticate_user(environ) + if not username: + return False + with eio.session(sid) as session: + session['username'] = username + + @eio.on('message') + def on_message(sid, msg): + async with eio.session(sid) as session: + print('received message from ', session['username']) + """ + class _session_context_manager: + def __init__(self, server, sid, namespace): + self.server = server + self.sid = sid + self.namespace = namespace + self.session = None + + async def __aenter__(self): + self.session = await self.server.get_session( + sid, namespace=self.namespace) + return self.session + + async def __aexit__(self, *args): + await self.server.save_session(sid, self.session, + namespace=self.namespace) + + return _session_context_manager(self, sid, namespace) + + async def disconnect(self, sid, namespace=None, ignore_queue=False): + """Disconnect a client. + + :param sid: Session ID of the client. + :param namespace: The Socket.IO namespace to disconnect. If this + argument is omitted the default namespace is used. + :param ignore_queue: Only used when a message queue is configured. If + set to ``True``, the disconnect is processed + locally, without broadcasting on the queue. It is + recommended to always leave this parameter with + its default value of ``False``. + + Note: this method is a coroutine. + """ + namespace = namespace or '/' + if ignore_queue: + delete_it = self.manager.is_connected(sid, namespace) + else: + delete_it = await self.manager.can_disconnect(sid, namespace) + if delete_it: + self.logger.info('Disconnecting %s [%s]', sid, namespace) + eio_sid = self.manager.pre_disconnect(sid, namespace=namespace) + await self._send_packet(eio_sid, self.packet_class( + packet.DISCONNECT, namespace=namespace)) + await self._trigger_event('disconnect', namespace, sid, + self.reason.SERVER_DISCONNECT) + await self.manager.disconnect(sid, namespace=namespace, + ignore_queue=True) + + async def shutdown(self): + """Stop Socket.IO background tasks. + + This method stops all background activity initiated by the Socket.IO + server. It must be called before shutting down the web server. + """ + self.logger.info('Socket.IO is shutting down') + await self.eio.shutdown() + + async def handle_request(self, *args, **kwargs): + """Handle an HTTP request from the client. + + This is the entry point of the Socket.IO application. This function + returns the HTTP response body to deliver to the client. + + Note: this method is a coroutine. + """ + return await self.eio.handle_request(*args, **kwargs) + + def start_background_task(self, target, *args, **kwargs): + """Start a background task using the appropriate async model. + + This is a utility function that applications can use to start a + background task using the method that is compatible with the + selected async mode. + + :param target: the target function to execute. Must be a coroutine. + :param args: arguments to pass to the function. + :param kwargs: keyword arguments to pass to the function. + + The return value is a ``asyncio.Task`` object. + """ + return self.eio.start_background_task(target, *args, **kwargs) + + async def sleep(self, seconds=0): + """Sleep for the requested amount of time using the appropriate async + model. + + This is a utility function that applications can use to put a task to + sleep without having to worry about using the correct call for the + selected async mode. + + Note: this method is a coroutine. + """ + return await self.eio.sleep(seconds) + + def instrument(self, auth=None, mode='development', read_only=False, + server_id=None, namespace='/admin', + server_stats_interval=2): + """Instrument the Socket.IO server for monitoring with the `Socket.IO + Admin UI `_. + + :param auth: Authentication credentials for Admin UI access. Set to a + dictionary with the expected login (usually ``username`` + and ``password``) or a list of dictionaries if more than + one set of credentials need to be available. For more + complex authentication methods, set to a callable that + receives the authentication dictionary as an argument and + returns ``True`` if the user is allowed or ``False`` + otherwise. To disable authentication, set this argument to + ``False`` (not recommended, never do this on a production + server). + :param mode: The reporting mode. The default is ``'development'``, + which is best used while debugging, as it may have a + significant performance effect. Set to ``'production'`` to + reduce the amount of information that is reported to the + admin UI. + :param read_only: If set to ``True``, the admin interface will be + read-only, with no option to modify room assignments + or disconnect clients. The default is ``False``. + :param server_id: The server name to use for this server. If this + argument is omitted, the server generates its own + name. + :param namespace: The Socket.IO namespace to use for the admin + interface. The default is ``/admin``. + :param server_stats_interval: The interval in seconds at which the + server emits a summary of it stats to all + connected admins. + """ + from .async_admin import InstrumentedAsyncServer + return InstrumentedAsyncServer( + self, auth=auth, mode=mode, read_only=read_only, + server_id=server_id, namespace=namespace, + server_stats_interval=server_stats_interval) + + async def _send_packet(self, eio_sid, pkt): + """Send a Socket.IO packet to a client.""" + encoded_packet = pkt.encode() + if isinstance(encoded_packet, list): + for ep in encoded_packet: + await self.eio.send(eio_sid, ep) + else: + await self.eio.send(eio_sid, encoded_packet) + + async def _send_eio_packet(self, eio_sid, eio_pkt): + """Send a raw Engine.IO packet to a client.""" + await self.eio.send_packet(eio_sid, eio_pkt) + + async def _handle_connect(self, eio_sid, namespace, data): + """Handle a client connection request.""" + namespace = namespace or '/' + sid = None + if namespace in self.handlers or namespace in self.namespace_handlers \ + or self.namespaces == '*' or namespace in self.namespaces: + sid = await self.manager.connect(eio_sid, namespace) + if sid is None: + await self._send_packet(eio_sid, self.packet_class( + packet.CONNECT_ERROR, data='Unable to connect', + namespace=namespace)) + return + + if self.always_connect: + await self._send_packet(eio_sid, self.packet_class( + packet.CONNECT, {'sid': sid}, namespace=namespace)) + fail_reason = exceptions.ConnectionRefusedError().error_args + try: + if data: + success = await self._trigger_event( + 'connect', namespace, sid, self.environ[eio_sid], data) + else: + try: + success = await self._trigger_event( + 'connect', namespace, sid, self.environ[eio_sid]) + except TypeError: + success = await self._trigger_event( + 'connect', namespace, sid, self.environ[eio_sid], None) + except exceptions.ConnectionRefusedError as exc: + fail_reason = exc.error_args + success = False + + if success is False: + if self.always_connect: + self.manager.pre_disconnect(sid, namespace) + await self._send_packet(eio_sid, self.packet_class( + packet.DISCONNECT, data=fail_reason, namespace=namespace)) + else: + await self._send_packet(eio_sid, self.packet_class( + packet.CONNECT_ERROR, data=fail_reason, + namespace=namespace)) + await self.manager.disconnect(sid, namespace, ignore_queue=True) + elif not self.always_connect: + await self._send_packet(eio_sid, self.packet_class( + packet.CONNECT, {'sid': sid}, namespace=namespace)) + + async def _handle_disconnect(self, eio_sid, namespace, reason=None): + """Handle a client disconnect.""" + namespace = namespace or '/' + sid = self.manager.sid_from_eio_sid(eio_sid, namespace) + if not self.manager.is_connected(sid, namespace): # pragma: no cover + return + self.manager.pre_disconnect(sid, namespace=namespace) + await self._trigger_event('disconnect', namespace, sid, + reason or self.reason.CLIENT_DISCONNECT) + await self.manager.disconnect(sid, namespace, ignore_queue=True) + + async def _handle_event(self, eio_sid, namespace, id, data): + """Handle an incoming client event.""" + namespace = namespace or '/' + sid = self.manager.sid_from_eio_sid(eio_sid, namespace) + self.logger.info('received event "%s" from %s [%s]', data[0], sid, + namespace) + if not self.manager.is_connected(sid, namespace): + self.logger.warning('%s is not connected to namespace %s', + sid, namespace) + return + if self.async_handlers: + task = self.start_background_task( + self._handle_event_internal, self, sid, eio_sid, data, + namespace, id) + task_reference_holder.add(task) + task.add_done_callback(task_reference_holder.discard) + else: + await self._handle_event_internal(self, sid, eio_sid, data, + namespace, id) + + async def _handle_event_internal(self, server, sid, eio_sid, data, + namespace, id): + r = await server._trigger_event(data[0], namespace, sid, *data[1:]) + if r != self.not_handled and id is not None: + # send ACK packet with the response returned by the handler + # tuples are expanded as multiple arguments + if r is None: + data = [] + elif isinstance(r, tuple): + data = list(r) + else: + data = [r] + await server._send_packet(eio_sid, self.packet_class( + packet.ACK, namespace=namespace, id=id, data=data)) + + async def _handle_ack(self, eio_sid, namespace, id, data): + """Handle ACK packets from the client.""" + namespace = namespace or '/' + sid = self.manager.sid_from_eio_sid(eio_sid, namespace) + self.logger.info('received ack from %s [%s]', sid, namespace) + await self.manager.trigger_callback(sid, id, data) + + async def _trigger_event(self, event, namespace, *args): + """Invoke an application event handler.""" + # first see if we have an explicit handler for the event + handler, args = self._get_event_handler(event, namespace, args) + if handler: + if asyncio.iscoroutinefunction(handler): + try: + try: + ret = await handler(*args) + except TypeError: + # legacy disconnect events use only one argument + if event == 'disconnect': + ret = await handler(*args[:-1]) + else: # pragma: no cover + raise + except asyncio.CancelledError: # pragma: no cover + ret = None + else: + try: + ret = handler(*args) + except TypeError: + # legacy disconnect events use only one argument + if event == 'disconnect': + ret = handler(*args[:-1]) + else: # pragma: no cover + raise + return ret + # or else, forward the event to a namespace handler if one exists + handler, args = self._get_namespace_handler(namespace, args) + if handler: + return await handler.trigger_event(event, *args) + else: + return self.not_handled + + async def _handle_eio_connect(self, eio_sid, environ): + """Handle the Engine.IO connection event.""" + if not self.manager_initialized: + self.manager_initialized = True + self.manager.initialize() + self.environ[eio_sid] = environ + + async def _handle_eio_message(self, eio_sid, data): + """Dispatch Engine.IO messages.""" + if eio_sid in self._binary_packet: + pkt = self._binary_packet[eio_sid] + if pkt.add_attachment(data): + del self._binary_packet[eio_sid] + if pkt.packet_type == packet.BINARY_EVENT: + await self._handle_event(eio_sid, pkt.namespace, pkt.id, + pkt.data) + else: + await self._handle_ack(eio_sid, pkt.namespace, pkt.id, + pkt.data) + else: + pkt = self.packet_class(encoded_packet=data) + if pkt.packet_type == packet.CONNECT: + await self._handle_connect(eio_sid, pkt.namespace, pkt.data) + elif pkt.packet_type == packet.DISCONNECT: + await self._handle_disconnect(eio_sid, pkt.namespace, + self.reason.CLIENT_DISCONNECT) + elif pkt.packet_type == packet.EVENT: + await self._handle_event(eio_sid, pkt.namespace, pkt.id, + pkt.data) + elif pkt.packet_type == packet.ACK: + await self._handle_ack(eio_sid, pkt.namespace, pkt.id, + pkt.data) + elif pkt.packet_type == packet.BINARY_EVENT or \ + pkt.packet_type == packet.BINARY_ACK: + self._binary_packet[eio_sid] = pkt + elif pkt.packet_type == packet.CONNECT_ERROR: + raise ValueError('Unexpected CONNECT_ERROR packet.') + else: + raise ValueError('Unknown packet type.') + + async def _handle_eio_disconnect(self, eio_sid, reason): + """Handle Engine.IO disconnect event.""" + for n in list(self.manager.get_namespaces()).copy(): + await self._handle_disconnect(eio_sid, n, reason) + if eio_sid in self.environ: + del self.environ[eio_sid] + + def _engineio_server_class(self): + return engineio.AsyncServer diff --git a/env/lib/python3.10/site-packages/socketio/async_simple_client.py b/env/lib/python3.10/site-packages/socketio/async_simple_client.py new file mode 100644 index 0000000..c6cd4fc --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/async_simple_client.py @@ -0,0 +1,209 @@ +import asyncio +from socketio import AsyncClient +from socketio.exceptions import SocketIOError, TimeoutError, DisconnectedError + + +class AsyncSimpleClient: + """A Socket.IO client. + + This class implements a simple, yet fully compliant Socket.IO web client + with support for websocket and long-polling transports. + + The positional and keyword arguments given in the constructor are passed + to the underlying :func:`socketio.AsyncClient` object. + """ + def __init__(self, *args, **kwargs): + self.client_args = args + self.client_kwargs = kwargs + self.client = None + self.namespace = '/' + self.connected_event = asyncio.Event() + self.connected = False + self.input_event = asyncio.Event() + self.input_buffer = [] + + async def connect(self, url, headers={}, auth=None, transports=None, + namespace='/', socketio_path='socket.io', + wait_timeout=5): + """Connect to a Socket.IO server. + + :param url: The URL of the Socket.IO server. It can include custom + query string parameters if required by the server. If a + function is provided, the client will invoke it to obtain + the URL each time a connection or reconnection is + attempted. + :param headers: A dictionary with custom headers to send with the + connection request. If a function is provided, the + client will invoke it to obtain the headers dictionary + each time a connection or reconnection is attempted. + :param auth: Authentication data passed to the server with the + connection request, normally a dictionary with one or + more string key/value pairs. If a function is provided, + the client will invoke it to obtain the authentication + data each time a connection or reconnection is attempted. + :param transports: The list of allowed transports. Valid transports + are ``'polling'`` and ``'websocket'``. If not + given, the polling transport is connected first, + then an upgrade to websocket is attempted. + :param namespace: The namespace to connect to as a string. If not + given, the default namespace ``/`` is used. + :param socketio_path: The endpoint where the Socket.IO server is + installed. The default value is appropriate for + most cases. + :param wait_timeout: How long the client should wait for the + connection. The default is 5 seconds. + + Note: this method is a coroutine. + """ + if self.connected: + raise RuntimeError('Already connected') + self.namespace = namespace + self.input_buffer = [] + self.input_event.clear() + self.client = AsyncClient(*self.client_args, **self.client_kwargs) + + @self.client.event(namespace=self.namespace) + def connect(): # pragma: no cover + self.connected = True + self.connected_event.set() + + @self.client.event(namespace=self.namespace) + def disconnect(): # pragma: no cover + self.connected_event.clear() + + @self.client.event(namespace=self.namespace) + def __disconnect_final(): # pragma: no cover + self.connected = False + self.connected_event.set() + + @self.client.on('*', namespace=self.namespace) + def on_event(event, *args): # pragma: no cover + self.input_buffer.append([event, *args]) + self.input_event.set() + + await self.client.connect( + url, headers=headers, auth=auth, transports=transports, + namespaces=[namespace], socketio_path=socketio_path, + wait_timeout=wait_timeout) + + @property + def sid(self): + """The session ID received from the server. + + The session ID is not guaranteed to remain constant throughout the life + of the connection, as reconnections can cause it to change. + """ + return self.client.get_sid(self.namespace) if self.client else None + + @property + def transport(self): + """The name of the transport currently in use. + + The transport is returned as a string and can be one of ``polling`` + and ``websocket``. + """ + return self.client.transport if self.client else '' + + async def emit(self, event, data=None): + """Emit an event to the server. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the server. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + + Note: this method is a coroutine. + + This method schedules the event to be sent out and returns, without + actually waiting for its delivery. In cases where the client needs to + ensure that the event was received, :func:`socketio.SimpleClient.call` + should be used instead. + """ + while True: + await self.connected_event.wait() + if not self.connected: + raise DisconnectedError() + try: + return await self.client.emit(event, data, + namespace=self.namespace) + except SocketIOError: + pass + + async def call(self, event, data=None, timeout=60): + """Emit an event to the server and wait for a response. + + This method issues an emit and waits for the server to provide a + response or acknowledgement. If the response does not arrive before the + timeout, then a ``TimeoutError`` exception is raised. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the server. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param timeout: The waiting timeout. If the timeout is reached before + the server acknowledges the event, then a + ``TimeoutError`` exception is raised. + + Note: this method is a coroutine. + """ + while True: + await self.connected_event.wait() + if not self.connected: + raise DisconnectedError() + try: + return await self.client.call(event, data, + namespace=self.namespace, + timeout=timeout) + except SocketIOError: + pass + + async def receive(self, timeout=None): + """Wait for an event from the server. + + :param timeout: The waiting timeout. If the timeout is reached before + the server acknowledges the event, then a + ``TimeoutError`` exception is raised. + + Note: this method is a coroutine. + + The return value is a list with the event name as the first element. If + the server included arguments with the event, they are returned as + additional list elements. + """ + while not self.input_buffer: + try: + await asyncio.wait_for(self.connected_event.wait(), + timeout=timeout) + except asyncio.TimeoutError: # pragma: no cover + raise TimeoutError() + if not self.connected: + raise DisconnectedError() + try: + await asyncio.wait_for(self.input_event.wait(), + timeout=timeout) + except asyncio.TimeoutError: + raise TimeoutError() + self.input_event.clear() + return self.input_buffer.pop(0) + + async def disconnect(self): + """Disconnect from the server. + + Note: this method is a coroutine. + """ + if self.connected: + await self.client.disconnect() + self.client = None + self.connected = False + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.disconnect() diff --git a/env/lib/python3.10/site-packages/socketio/base_client.py b/env/lib/python3.10/site-packages/socketio/base_client.py new file mode 100644 index 0000000..7bf4420 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/base_client.py @@ -0,0 +1,295 @@ +import itertools +import logging +import signal +import threading + +import engineio + +from . import base_namespace +from . import packet + +default_logger = logging.getLogger('socketio.client') +reconnecting_clients = [] + + +def signal_handler(sig, frame): # pragma: no cover + """SIGINT handler. + + Notify any clients that are in a reconnect loop to abort. Other + disconnection tasks are handled at the engine.io level. + """ + for client in reconnecting_clients[:]: + client._reconnect_abort.set() + if callable(original_signal_handler): + return original_signal_handler(sig, frame) + else: # pragma: no cover + # Handle case where no original SIGINT handler was present. + return signal.default_int_handler(sig, frame) + + +original_signal_handler = None + + +class BaseClient: + reserved_events = ['connect', 'connect_error', 'disconnect', + '__disconnect_final'] + reason = engineio.Client.reason + + def __init__(self, reconnection=True, reconnection_attempts=0, + reconnection_delay=1, reconnection_delay_max=5, + randomization_factor=0.5, logger=False, serializer='default', + json=None, handle_sigint=True, **kwargs): + global original_signal_handler + if handle_sigint and original_signal_handler is None and \ + threading.current_thread() == threading.main_thread(): + original_signal_handler = signal.signal(signal.SIGINT, + signal_handler) + self.reconnection = reconnection + self.reconnection_attempts = reconnection_attempts + self.reconnection_delay = reconnection_delay + self.reconnection_delay_max = reconnection_delay_max + self.randomization_factor = randomization_factor + self.handle_sigint = handle_sigint + + engineio_options = kwargs + engineio_options['handle_sigint'] = handle_sigint + engineio_logger = engineio_options.pop('engineio_logger', None) + if engineio_logger is not None: + engineio_options['logger'] = engineio_logger + if serializer == 'default': + self.packet_class = packet.Packet + elif serializer == 'msgpack': + from . import msgpack_packet + self.packet_class = msgpack_packet.MsgPackPacket + else: + self.packet_class = serializer + if json is not None: + self.packet_class.json = json + engineio_options['json'] = json + + self.eio = self._engineio_client_class()(**engineio_options) + self.eio.on('connect', self._handle_eio_connect) + self.eio.on('message', self._handle_eio_message) + self.eio.on('disconnect', self._handle_eio_disconnect) + + if not isinstance(logger, bool): + self.logger = logger + else: + self.logger = default_logger + if self.logger.level == logging.NOTSET: + if logger: + self.logger.setLevel(logging.INFO) + else: + self.logger.setLevel(logging.ERROR) + self.logger.addHandler(logging.StreamHandler()) + + self.connection_url = None + self.connection_headers = None + self.connection_auth = None + self.connection_transports = None + self.connection_namespaces = [] + self.socketio_path = None + self.sid = None + + self.connected = False #: Indicates if the client is connected or not. + self.namespaces = {} #: set of connected namespaces. + self.handlers = {} + self.namespace_handlers = {} + self.callbacks = {} + self._binary_packet = None + self._connect_event = None + self._reconnect_task = None + self._reconnect_abort = None + + def is_asyncio_based(self): + return False + + def on(self, event, handler=None, namespace=None): + """Register an event handler. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. The ``'*'`` event name + can be used to define a catch-all event handler. + :param handler: The function that should be invoked to handle the + event. When this parameter is not given, the method + acts as a decorator for the handler function. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the handler is associated with + the default namespace. A catch-all namespace can be + defined by passing ``'*'`` as the namespace. + + Example usage:: + + # as a decorator: + @sio.on('connect') + def connect_handler(): + print('Connected!') + + # as a method: + def message_handler(msg): + print('Received message: ', msg) + sio.send( 'response') + sio.on('message', message_handler) + + The arguments passed to the handler function depend on the event type: + + - The ``'connect'`` event handler does not take arguments. + - The ``'disconnect'`` event handler does not take arguments. + - The ``'message'`` handler and handlers for custom event names receive + the message payload as only argument. Any values returned from a + message handler will be passed to the client's acknowledgement + callback function if it exists. + - A catch-all event handler receives the event name as first argument, + followed by any arguments specific to the event. + - A catch-all namespace event handler receives the namespace as first + argument, followed by any arguments specific to the event. + - A combined catch-all namespace and catch-all event handler receives + the event name as first argument and the namespace as second + argument, followed by any arguments specific to the event. + """ + namespace = namespace or '/' + + def set_handler(handler): + if namespace not in self.handlers: + self.handlers[namespace] = {} + self.handlers[namespace][event] = handler + return handler + + if handler is None: + return set_handler + set_handler(handler) + + def event(self, *args, **kwargs): + """Decorator to register an event handler. + + This is a simplified version of the ``on()`` method that takes the + event name from the decorated function. + + Example usage:: + + @sio.event + def my_event(data): + print('Received data: ', data) + + The above example is equivalent to:: + + @sio.on('my_event') + def my_event(data): + print('Received data: ', data) + + A custom namespace can be given as an argument to the decorator:: + + @sio.event(namespace='/test') + def my_event(data): + print('Received data: ', data) + """ + if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): + # the decorator was invoked without arguments + # args[0] is the decorated function + return self.on(args[0].__name__)(args[0]) + else: + # the decorator was invoked with arguments + def set_handler(handler): + return self.on(handler.__name__, *args, **kwargs)(handler) + + return set_handler + + def register_namespace(self, namespace_handler): + """Register a namespace handler object. + + :param namespace_handler: An instance of a :class:`Namespace` + subclass that handles all the event traffic + for a namespace. + """ + if not isinstance(namespace_handler, + base_namespace.BaseClientNamespace): + raise ValueError('Not a namespace instance') + if self.is_asyncio_based() != namespace_handler.is_asyncio_based(): + raise ValueError('Not a valid namespace class for this client') + namespace_handler._set_client(self) + self.namespace_handlers[namespace_handler.namespace] = \ + namespace_handler + + def get_sid(self, namespace=None): + """Return the ``sid`` associated with a connection. + + :param namespace: The Socket.IO namespace. If this argument is omitted + the handler is associated with the default + namespace. Note that unlike previous versions, the + current version of the Socket.IO protocol uses + different ``sid`` values per namespace. + + This method returns the ``sid`` for the requested namespace as a + string. + """ + return self.namespaces.get(namespace or '/') + + def transport(self): + """Return the name of the transport used by the client. + + The two possible values returned by this function are ``'polling'`` + and ``'websocket'``. + """ + return self.eio.transport() + + def _get_event_handler(self, event, namespace, args): + # return the appropriate application event handler + # + # Resolution priority: + # - self.handlers[namespace][event] + # - self.handlers[namespace]["*"] + # - self.handlers["*"][event] + # - self.handlers["*"]["*"] + handler = None + if namespace in self.handlers: + if event in self.handlers[namespace]: + handler = self.handlers[namespace][event] + elif event not in self.reserved_events and \ + '*' in self.handlers[namespace]: + handler = self.handlers[namespace]['*'] + args = (event, *args) + elif '*' in self.handlers: + if event in self.handlers['*']: + handler = self.handlers['*'][event] + args = (namespace, *args) + elif event not in self.reserved_events and \ + '*' in self.handlers['*']: + handler = self.handlers['*']['*'] + args = (event, namespace, *args) + return handler, args + + def _get_namespace_handler(self, namespace, args): + # Return the appropriate application event handler. + # + # Resolution priority: + # - self.namespace_handlers[namespace] + # - self.namespace_handlers["*"] + handler = None + if namespace in self.namespace_handlers: + handler = self.namespace_handlers[namespace] + elif '*' in self.namespace_handlers: + handler = self.namespace_handlers['*'] + args = (namespace, *args) + return handler, args + + def _generate_ack_id(self, namespace, callback): + """Generate a unique identifier for an ACK packet.""" + namespace = namespace or '/' + if namespace not in self.callbacks: + self.callbacks[namespace] = {0: itertools.count(1)} + id = next(self.callbacks[namespace][0]) + self.callbacks[namespace][id] = callback + return id + + def _handle_eio_connect(self): # pragma: no cover + raise NotImplementedError() + + def _handle_eio_message(self, data): # pragma: no cover + raise NotImplementedError() + + def _handle_eio_disconnect(self, reason): # pragma: no cover + raise NotImplementedError() + + def _engineio_client_class(self): # pragma: no cover + raise NotImplementedError() diff --git a/env/lib/python3.10/site-packages/socketio/base_manager.py b/env/lib/python3.10/site-packages/socketio/base_manager.py new file mode 100644 index 0000000..dafa60a --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/base_manager.py @@ -0,0 +1,161 @@ +import itertools +import logging + +from bidict import bidict, ValueDuplicationError + +default_logger = logging.getLogger('socketio') + + +class BaseManager: + def __init__(self): + self.logger = None + self.server = None + self.rooms = {} # self.rooms[namespace][room][sio_sid] = eio_sid + self.eio_to_sid = {} + self.callbacks = {} + self.pending_disconnect = {} + + def set_server(self, server): + self.server = server + + def initialize(self): + """Invoked before the first request is received. Subclasses can add + their initialization code here. + """ + pass + + def get_namespaces(self): + """Return an iterable with the active namespace names.""" + return self.rooms.keys() + + def get_participants(self, namespace, room): + """Return an iterable with the active participants in a room.""" + ns = self.rooms.get(namespace, {}) + if hasattr(room, '__len__') and not isinstance(room, str): + participants = ns[room[0]]._fwdm.copy() if room[0] in ns else {} + for r in room[1:]: + participants.update(ns[r]._fwdm if r in ns else {}) + else: + participants = ns[room]._fwdm.copy() if room in ns else {} + yield from participants.items() + + def connect(self, eio_sid, namespace): + """Register a client connection to a namespace.""" + sid = self.server.eio.generate_id() + try: + self.basic_enter_room(sid, namespace, None, eio_sid=eio_sid) + except ValueDuplicationError: + # already connected + return None + self.basic_enter_room(sid, namespace, sid, eio_sid=eio_sid) + return sid + + def is_connected(self, sid, namespace): + if namespace in self.pending_disconnect and \ + sid in self.pending_disconnect[namespace]: + # the client is in the process of being disconnected + return False + try: + return self.rooms[namespace][None][sid] is not None + except KeyError: + pass + return False + + def sid_from_eio_sid(self, eio_sid, namespace): + try: + return self.rooms[namespace][None]._invm[eio_sid] + except KeyError: + pass + + def eio_sid_from_sid(self, sid, namespace): + if namespace in self.rooms: + return self.rooms[namespace][None].get(sid) + + def pre_disconnect(self, sid, namespace): + """Put the client in the to-be-disconnected list. + + This allows the client data structures to be present while the + disconnect handler is invoked, but still recognize the fact that the + client is soon going away. + """ + if namespace not in self.pending_disconnect: + self.pending_disconnect[namespace] = [] + self.pending_disconnect[namespace].append(sid) + return self.rooms[namespace][None].get(sid) + + def basic_disconnect(self, sid, namespace, **kwargs): + if namespace not in self.rooms: + return + rooms = [] + for room_name, room in self.rooms[namespace].copy().items(): + if sid in room: + rooms.append(room_name) + for room in rooms: + self.basic_leave_room(sid, namespace, room) + if sid in self.callbacks: + del self.callbacks[sid] + if namespace in self.pending_disconnect and \ + sid in self.pending_disconnect[namespace]: + self.pending_disconnect[namespace].remove(sid) + if len(self.pending_disconnect[namespace]) == 0: + del self.pending_disconnect[namespace] + + def basic_enter_room(self, sid, namespace, room, eio_sid=None): + if eio_sid is None and namespace not in self.rooms: + raise ValueError('sid is not connected to requested namespace') + if namespace not in self.rooms: + self.rooms[namespace] = {} + if room not in self.rooms[namespace]: + self.rooms[namespace][room] = bidict() + if eio_sid is None: + eio_sid = self.rooms[namespace][None][sid] + self.rooms[namespace][room][sid] = eio_sid + + def basic_leave_room(self, sid, namespace, room): + try: + del self.rooms[namespace][room][sid] + if len(self.rooms[namespace][room]) == 0: + del self.rooms[namespace][room] + if len(self.rooms[namespace]) == 0: + del self.rooms[namespace] + except KeyError: + pass + + def basic_close_room(self, room, namespace): + try: + for sid, _ in self.get_participants(namespace, room): + self.basic_leave_room(sid, namespace, room) + except KeyError: # pragma: no cover + pass + + def get_rooms(self, sid, namespace): + """Return the rooms a client is in.""" + r = [] + try: + for room_name, room in self.rooms[namespace].items(): + if room_name is not None and sid in room: + r.append(room_name) + except KeyError: + pass + return r + + def _generate_ack_id(self, sid, callback): + """Generate a unique identifier for an ACK packet.""" + if sid not in self.callbacks: + self.callbacks[sid] = {0: itertools.count(1)} + id = next(self.callbacks[sid][0]) + self.callbacks[sid][id] = callback + return id + + def _get_logger(self): + """Get the appropriate logger + + Prevents uninitialized servers in write-only mode from failing. + """ + + if self.logger: + return self.logger + elif self.server: + return self.server.logger + else: + return default_logger diff --git a/env/lib/python3.10/site-packages/socketio/base_namespace.py b/env/lib/python3.10/site-packages/socketio/base_namespace.py new file mode 100644 index 0000000..14b5d8f --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/base_namespace.py @@ -0,0 +1,33 @@ +class BaseNamespace: + def __init__(self, namespace=None): + self.namespace = namespace or '/' + + def is_asyncio_based(self): + return False + + +class BaseServerNamespace(BaseNamespace): + def __init__(self, namespace=None): + super().__init__(namespace=namespace) + self.server = None + + def _set_server(self, server): + self.server = server + + def rooms(self, sid, namespace=None): + """Return the rooms a client is in. + + The only difference with the :func:`socketio.Server.rooms` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.server.rooms(sid, namespace=namespace or self.namespace) + + +class BaseClientNamespace(BaseNamespace): + def __init__(self, namespace=None): + super().__init__(namespace=namespace) + self.client = None + + def _set_client(self, client): + self.client = client diff --git a/env/lib/python3.10/site-packages/socketio/base_server.py b/env/lib/python3.10/site-packages/socketio/base_server.py new file mode 100644 index 0000000..d134eba --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/base_server.py @@ -0,0 +1,266 @@ +import logging + +import engineio + +from . import manager +from . import base_namespace +from . import packet + +default_logger = logging.getLogger('socketio.server') + + +class BaseServer: + reserved_events = ['connect', 'disconnect'] + reason = engineio.Server.reason + + def __init__(self, client_manager=None, logger=False, serializer='default', + json=None, async_handlers=True, always_connect=False, + namespaces=None, **kwargs): + engineio_options = kwargs + engineio_logger = engineio_options.pop('engineio_logger', None) + if engineio_logger is not None: + engineio_options['logger'] = engineio_logger + if serializer == 'default': + self.packet_class = packet.Packet + elif serializer == 'msgpack': + from . import msgpack_packet + self.packet_class = msgpack_packet.MsgPackPacket + else: + self.packet_class = serializer + if json is not None: + self.packet_class.json = json + engineio_options['json'] = json + engineio_options['async_handlers'] = False + self.eio = self._engineio_server_class()(**engineio_options) + self.eio.on('connect', self._handle_eio_connect) + self.eio.on('message', self._handle_eio_message) + self.eio.on('disconnect', self._handle_eio_disconnect) + + self.environ = {} + self.handlers = {} + self.namespace_handlers = {} + self.not_handled = object() + + self._binary_packet = {} + + if not isinstance(logger, bool): + self.logger = logger + else: + self.logger = default_logger + if self.logger.level == logging.NOTSET: + if logger: + self.logger.setLevel(logging.INFO) + else: + self.logger.setLevel(logging.ERROR) + self.logger.addHandler(logging.StreamHandler()) + + if client_manager is None: + client_manager = manager.Manager() + self.manager = client_manager + self.manager.set_server(self) + self.manager_initialized = False + + self.async_handlers = async_handlers + self.always_connect = always_connect + self.namespaces = namespaces or ['/'] + + self.async_mode = self.eio.async_mode + + def is_asyncio_based(self): + return False + + def on(self, event, handler=None, namespace=None): + """Register an event handler. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. The ``'*'`` event name + can be used to define a catch-all event handler. + :param handler: The function that should be invoked to handle the + event. When this parameter is not given, the method + acts as a decorator for the handler function. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the handler is associated with + the default namespace. A catch-all namespace can be + defined by passing ``'*'`` as the namespace. + + Example usage:: + + # as a decorator: + @sio.on('connect', namespace='/chat') + def connect_handler(sid, environ): + print('Connection request') + if environ['REMOTE_ADDR'] in blacklisted: + return False # reject + + # as a method: + def message_handler(sid, msg): + print('Received message: ', msg) + sio.send(sid, 'response') + socket_io.on('message', namespace='/chat', handler=message_handler) + + The arguments passed to the handler function depend on the event type: + + - The ``'connect'`` event handler receives the ``sid`` (session ID) for + the client and the WSGI environment dictionary as arguments. + - The ``'disconnect'`` handler receives the ``sid`` for the client as + only argument. + - The ``'message'`` handler and handlers for custom event names receive + the ``sid`` for the client and the message payload as arguments. Any + values returned from a message handler will be passed to the client's + acknowledgement callback function if it exists. + - A catch-all event handler receives the event name as first argument, + followed by any arguments specific to the event. + - A catch-all namespace event handler receives the namespace as first + argument, followed by any arguments specific to the event. + - A combined catch-all namespace and catch-all event handler receives + the event name as first argument and the namespace as second + argument, followed by any arguments specific to the event. + """ + namespace = namespace or '/' + + def set_handler(handler): + if namespace not in self.handlers: + self.handlers[namespace] = {} + self.handlers[namespace][event] = handler + return handler + + if handler is None: + return set_handler + set_handler(handler) + + def event(self, *args, **kwargs): + """Decorator to register an event handler. + + This is a simplified version of the ``on()`` method that takes the + event name from the decorated function. + + Example usage:: + + @sio.event + def my_event(data): + print('Received data: ', data) + + The above example is equivalent to:: + + @sio.on('my_event') + def my_event(data): + print('Received data: ', data) + + A custom namespace can be given as an argument to the decorator:: + + @sio.event(namespace='/test') + def my_event(data): + print('Received data: ', data) + """ + if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): + # the decorator was invoked without arguments + # args[0] is the decorated function + return self.on(args[0].__name__)(args[0]) + else: + # the decorator was invoked with arguments + def set_handler(handler): + return self.on(handler.__name__, *args, **kwargs)(handler) + + return set_handler + + def register_namespace(self, namespace_handler): + """Register a namespace handler object. + + :param namespace_handler: An instance of a :class:`Namespace` + subclass that handles all the event traffic + for a namespace. + """ + if not isinstance(namespace_handler, + base_namespace.BaseServerNamespace): + raise ValueError('Not a namespace instance') + if self.is_asyncio_based() != namespace_handler.is_asyncio_based(): + raise ValueError('Not a valid namespace class for this server') + namespace_handler._set_server(self) + self.namespace_handlers[namespace_handler.namespace] = \ + namespace_handler + + def rooms(self, sid, namespace=None): + """Return the rooms a client is in. + + :param sid: Session ID of the client. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the default namespace is used. + """ + namespace = namespace or '/' + return self.manager.get_rooms(sid, namespace) + + def transport(self, sid, namespace=None): + """Return the name of the transport used by the client. + + The two possible values returned by this function are ``'polling'`` + and ``'websocket'``. + + :param sid: The session of the client. + :param namespace: The Socket.IO namespace. If this argument is omitted + the default namespace is used. + """ + eio_sid = self.manager.eio_sid_from_sid(sid, namespace or '/') + return self.eio.transport(eio_sid) + + def get_environ(self, sid, namespace=None): + """Return the WSGI environ dictionary for a client. + + :param sid: The session of the client. + :param namespace: The Socket.IO namespace. If this argument is omitted + the default namespace is used. + """ + eio_sid = self.manager.eio_sid_from_sid(sid, namespace or '/') + return self.environ.get(eio_sid) + + def _get_event_handler(self, event, namespace, args): + # Return the appropriate application event handler + # + # Resolution priority: + # - self.handlers[namespace][event] + # - self.handlers[namespace]["*"] + # - self.handlers["*"][event] + # - self.handlers["*"]["*"] + handler = None + if namespace in self.handlers: + if event in self.handlers[namespace]: + handler = self.handlers[namespace][event] + elif event not in self.reserved_events and \ + '*' in self.handlers[namespace]: + handler = self.handlers[namespace]['*'] + args = (event, *args) + if handler is None and '*' in self.handlers: + if event in self.handlers['*']: + handler = self.handlers['*'][event] + args = (namespace, *args) + elif event not in self.reserved_events and \ + '*' in self.handlers['*']: + handler = self.handlers['*']['*'] + args = (event, namespace, *args) + return handler, args + + def _get_namespace_handler(self, namespace, args): + # Return the appropriate application event handler. + # + # Resolution priority: + # - self.namespace_handlers[namespace] + # - self.namespace_handlers["*"] + handler = None + if namespace in self.namespace_handlers: + handler = self.namespace_handlers[namespace] + if handler is None and '*' in self.namespace_handlers: + handler = self.namespace_handlers['*'] + args = (namespace, *args) + return handler, args + + def _handle_eio_connect(self): # pragma: no cover + raise NotImplementedError() + + def _handle_eio_message(self, data): # pragma: no cover + raise NotImplementedError() + + def _handle_eio_disconnect(self): # pragma: no cover + raise NotImplementedError() + + def _engineio_server_class(self): # pragma: no cover + raise NotImplementedError('Must be implemented in subclasses') diff --git a/env/lib/python3.10/site-packages/socketio/client.py b/env/lib/python3.10/site-packages/socketio/client.py new file mode 100644 index 0000000..ade2dd6 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/client.py @@ -0,0 +1,555 @@ +import random + +import engineio + +from . import base_client +from . import exceptions +from . import packet + + +class Client(base_client.BaseClient): + """A Socket.IO client. + + This class implements a fully compliant Socket.IO web client with support + for websocket and long-polling transports. + + :param reconnection: ``True`` if the client should automatically attempt to + reconnect to the server after an interruption, or + ``False`` to not reconnect. The default is ``True``. + :param reconnection_attempts: How many reconnection attempts to issue + before giving up, or 0 for infinite attempts. + The default is 0. + :param reconnection_delay: How long to wait in seconds before the first + reconnection attempt. Each successive attempt + doubles this delay. + :param reconnection_delay_max: The maximum delay between reconnection + attempts. + :param randomization_factor: Randomization amount for each delay between + reconnection attempts. The default is 0.5, + which means that each delay is randomly + adjusted by +/- 50%. + :param logger: To enable logging set to ``True`` or pass a logger object to + use. To disable logging set to ``False``. The default is + ``False``. Note that fatal errors are logged even when + ``logger`` is ``False``. + :param serializer: The serialization method to use when transmitting + packets. Valid values are ``'default'``, ``'pickle'``, + ``'msgpack'`` and ``'cbor'``. Alternatively, a subclass + of the :class:`Packet` class with custom implementations + of the ``encode()`` and ``decode()`` methods can be + provided. Client and server must use compatible + serializers. + :param json: An alternative json module to use for encoding and decoding + packets. Custom json modules must have ``dumps`` and ``loads`` + functions that are compatible with the standard library + versions. + :param handle_sigint: Set to ``True`` to automatically handle disconnection + when the process is interrupted, or to ``False`` to + leave interrupt handling to the calling application. + Interrupt handling can only be enabled when the + client instance is created in the main thread. + + The Engine.IO configuration supports the following settings: + + :param request_timeout: A timeout in seconds for requests. The default is + 5 seconds. + :param http_session: an initialized ``requests.Session`` object to be used + when sending requests to the server. Use it if you + need to add special client options such as proxy + servers, SSL certificates, custom CA bundle, etc. + :param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to + skip SSL certificate verification, allowing + connections to servers with self signed certificates. + The default is ``True``. + :param websocket_extra_options: Dictionary containing additional keyword + arguments passed to + ``websocket.create_connection()``. + :param engineio_logger: To enable Engine.IO logging set to ``True`` or pass + a logger object to use. To disable logging set to + ``False``. The default is ``False``. Note that + fatal errors are logged even when + ``engineio_logger`` is ``False``. + """ + def connect(self, url, headers={}, auth=None, transports=None, + namespaces=None, socketio_path='socket.io', wait=True, + wait_timeout=1, retry=False): + """Connect to a Socket.IO server. + + :param url: The URL of the Socket.IO server. It can include custom + query string parameters if required by the server. If a + function is provided, the client will invoke it to obtain + the URL each time a connection or reconnection is + attempted. + :param headers: A dictionary with custom headers to send with the + connection request. If a function is provided, the + client will invoke it to obtain the headers dictionary + each time a connection or reconnection is attempted. + :param auth: Authentication data passed to the server with the + connection request, normally a dictionary with one or + more string key/value pairs. If a function is provided, + the client will invoke it to obtain the authentication + data each time a connection or reconnection is attempted. + :param transports: The list of allowed transports. Valid transports + are ``'polling'`` and ``'websocket'``. If not + given, the polling transport is connected first, + then an upgrade to websocket is attempted. + :param namespaces: The namespaces to connect as a string or list of + strings. If not given, the namespaces that have + registered event handlers are connected. + :param socketio_path: The endpoint where the Socket.IO server is + installed. The default value is appropriate for + most cases. + :param wait: if set to ``True`` (the default) the call only returns + when all the namespaces are connected. If set to + ``False``, the call returns as soon as the Engine.IO + transport is connected, and the namespaces will connect + in the background. + :param wait_timeout: How long the client should wait for the + connection. The default is 1 second. This + argument is only considered when ``wait`` is set + to ``True``. + :param retry: Apply the reconnection logic if the initial connection + attempt fails. The default is ``False``. + + Example usage:: + + sio = socketio.Client() + sio.connect('http://localhost:5000') + """ + if self.connected: + raise exceptions.ConnectionError('Already connected') + + self.connection_url = url + self.connection_headers = headers + self.connection_auth = auth + self.connection_transports = transports + self.connection_namespaces = namespaces + self.socketio_path = socketio_path + + if namespaces is None: + namespaces = list(set(self.handlers.keys()).union( + set(self.namespace_handlers.keys()))) + if '*' in namespaces: + namespaces.remove('*') + if len(namespaces) == 0: + namespaces = ['/'] + elif isinstance(namespaces, str): + namespaces = [namespaces] + self.connection_namespaces = namespaces + self.namespaces = {} + if self._connect_event is None: + self._connect_event = self.eio.create_event() + else: + self._connect_event.clear() + real_url = self._get_real_value(self.connection_url) + real_headers = self._get_real_value(self.connection_headers) + try: + self.eio.connect(real_url, headers=real_headers, + transports=transports, + engineio_path=socketio_path) + except engineio.exceptions.ConnectionError as exc: + for n in self.connection_namespaces: + self._trigger_event( + 'connect_error', n, + exc.args[1] if len(exc.args) > 1 else exc.args[0]) + if retry: # pragma: no cover + self._handle_reconnect() + if self.eio.state == 'connected': + return + raise exceptions.ConnectionError(exc.args[0]) from None + + if wait: + while self._connect_event.wait(timeout=wait_timeout): + self._connect_event.clear() + if set(self.namespaces) == set(self.connection_namespaces): + break + if set(self.namespaces) != set(self.connection_namespaces): + self.disconnect() + raise exceptions.ConnectionError( + 'One or more namespaces failed to connect') + + self.connected = True + + def wait(self): + """Wait until the connection with the server ends. + + Client applications can use this function to block the main thread + during the life of the connection. + """ + while True: + self.eio.wait() + self.sleep(1) # give the reconnect task time to start up + if not self._reconnect_task: + if self.eio.state == 'connected': # pragma: no cover + # connected while sleeping above + continue + else: + # the reconnect task gave up + break + self._reconnect_task.join() + if self.eio.state != 'connected': + break + + def emit(self, event, data=None, namespace=None, callback=None): + """Emit a custom event to the server. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the server. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param callback: If given, this function will be called to acknowledge + the server has received the message. The arguments + that will be passed to the function are those provided + by the server. + + Note: this method is not thread safe. If multiple threads are emitting + at the same time on the same client connection, messages composed of + multiple packets may end up being sent in an incorrect sequence. Use + standard concurrency solutions (such as a Lock object) to prevent this + situation. + """ + namespace = namespace or '/' + if namespace not in self.namespaces: + raise exceptions.BadNamespaceError( + namespace + ' is not a connected namespace.') + self.logger.info('Emitting event "%s" [%s]', event, namespace) + if callback is not None: + id = self._generate_ack_id(namespace, callback) + else: + id = None + # tuples are expanded to multiple arguments, everything else is sent + # as a single argument + if isinstance(data, tuple): + data = list(data) + elif data is not None: + data = [data] + else: + data = [] + self._send_packet(self.packet_class(packet.EVENT, namespace=namespace, + data=[event] + data, id=id)) + + def send(self, data, namespace=None, callback=None): + """Send a message to the server. + + This function emits an event with the name ``'message'``. Use + :func:`emit` to issue custom event names. + + :param data: The data to send to the server. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param callback: If given, this function will be called to acknowledge + the server has received the message. The arguments + that will be passed to the function are those provided + by the server. + """ + self.emit('message', data=data, namespace=namespace, + callback=callback) + + def call(self, event, data=None, namespace=None, timeout=60): + """Emit a custom event to the server and wait for the response. + + This method issues an emit with a callback and waits for the callback + to be invoked before returning. If the callback isn't invoked before + the timeout, then a ``TimeoutError`` exception is raised. If the + Socket.IO connection drops during the wait, this method still waits + until the specified timeout. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the server. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param timeout: The waiting timeout. If the timeout is reached before + the server acknowledges the event, then a + ``TimeoutError`` exception is raised. + + Note: this method is not thread safe. If multiple threads are emitting + at the same time on the same client connection, messages composed of + multiple packets may end up being sent in an incorrect sequence. Use + standard concurrency solutions (such as a Lock object) to prevent this + situation. + """ + callback_event = self.eio.create_event() + callback_args = [] + + def event_callback(*args): + callback_args.append(args) + callback_event.set() + + self.emit(event, data=data, namespace=namespace, + callback=event_callback) + if not callback_event.wait(timeout=timeout): + raise exceptions.TimeoutError() + return callback_args[0] if len(callback_args[0]) > 1 \ + else callback_args[0][0] if len(callback_args[0]) == 1 \ + else None + + def disconnect(self): + """Disconnect from the server.""" + # here we just request the disconnection + # later in _handle_eio_disconnect we invoke the disconnect handler + for n in self.namespaces: + self._send_packet(self.packet_class( + packet.DISCONNECT, namespace=n)) + self.eio.disconnect(abort=True) + + def shutdown(self): + """Stop the client. + + If the client is connected to a server, it is disconnected. If the + client is attempting to reconnect to server, the reconnection attempts + are stopped. If the client is not connected to a server and is not + attempting to reconnect, then this function does nothing. + """ + if self.connected: + self.disconnect() + elif self._reconnect_task: # pragma: no branch + self._reconnect_abort.set() + self._reconnect_task.join() + + def start_background_task(self, target, *args, **kwargs): + """Start a background task using the appropriate async model. + + This is a utility function that applications can use to start a + background task using the method that is compatible with the + selected async mode. + + :param target: the target function to execute. + :param args: arguments to pass to the function. + :param kwargs: keyword arguments to pass to the function. + + This function returns an object that represents the background task, + on which the ``join()`` methond can be invoked to wait for the task to + complete. + """ + return self.eio.start_background_task(target, *args, **kwargs) + + def sleep(self, seconds=0): + """Sleep for the requested amount of time using the appropriate async + model. + + This is a utility function that applications can use to put a task to + sleep without having to worry about using the correct call for the + selected async mode. + """ + return self.eio.sleep(seconds) + + def _get_real_value(self, value): + """Return the actual value, for parameters that can also be given as + callables.""" + if not callable(value): + return value + return value() + + def _send_packet(self, pkt): + """Send a Socket.IO packet to the server.""" + encoded_packet = pkt.encode() + if isinstance(encoded_packet, list): + for ep in encoded_packet: + self.eio.send(ep) + else: + self.eio.send(encoded_packet) + + def _handle_connect(self, namespace, data): + namespace = namespace or '/' + if namespace not in self.namespaces: + self.logger.info(f'Namespace {namespace} is connected') + self.namespaces[namespace] = (data or {}).get('sid', self.sid) + self._trigger_event('connect', namespace=namespace) + self._connect_event.set() + + def _handle_disconnect(self, namespace): + if not self.connected: + return + namespace = namespace or '/' + self._trigger_event('disconnect', namespace, + self.reason.SERVER_DISCONNECT) + self._trigger_event('__disconnect_final', namespace) + if namespace in self.namespaces: + del self.namespaces[namespace] + if not self.namespaces: + self.connected = False + self.eio.disconnect(abort=True) + + def _handle_event(self, namespace, id, data): + namespace = namespace or '/' + self.logger.info('Received event "%s" [%s]', data[0], namespace) + r = self._trigger_event(data[0], namespace, *data[1:]) + if id is not None: + # send ACK packet with the response returned by the handler + # tuples are expanded as multiple arguments + if r is None: + data = [] + elif isinstance(r, tuple): + data = list(r) + else: + data = [r] + self._send_packet(self.packet_class( + packet.ACK, namespace=namespace, id=id, data=data)) + + def _handle_ack(self, namespace, id, data): + namespace = namespace or '/' + self.logger.info('Received ack [%s]', namespace) + callback = None + try: + callback = self.callbacks[namespace][id] + except KeyError: + # if we get an unknown callback we just ignore it + self.logger.warning('Unknown callback received, ignoring.') + else: + del self.callbacks[namespace][id] + if callback is not None: + callback(*data) + + def _handle_error(self, namespace, data): + namespace = namespace or '/' + self.logger.info('Connection to namespace {} was rejected'.format( + namespace)) + if data is None: + data = tuple() + elif not isinstance(data, (tuple, list)): + data = (data,) + self._trigger_event('connect_error', namespace, *data) + self._connect_event.set() + if namespace in self.namespaces: + del self.namespaces[namespace] + if namespace == '/': + self.namespaces = {} + self.connected = False + + def _trigger_event(self, event, namespace, *args): + """Invoke an application event handler.""" + # first see if we have an explicit handler for the event + handler, args = self._get_event_handler(event, namespace, args) + if handler: + try: + return handler(*args) + except TypeError: + # the legacy disconnect event does not take a reason argument + if event == 'disconnect': + return handler(*args[:-1]) + else: # pragma: no cover + raise + + # or else, forward the event to a namespace handler if one exists + handler, args = self._get_namespace_handler(namespace, args) + if handler: + return handler.trigger_event(event, *args) + + def _handle_reconnect(self): + if self._reconnect_abort is None: # pragma: no cover + self._reconnect_abort = self.eio.create_event() + self._reconnect_abort.clear() + base_client.reconnecting_clients.append(self) + attempt_count = 0 + current_delay = self.reconnection_delay + while True: + delay = current_delay + current_delay *= 2 + if delay > self.reconnection_delay_max: + delay = self.reconnection_delay_max + delay += self.randomization_factor * (2 * random.random() - 1) + self.logger.info( + 'Connection failed, new attempt in {:.02f} seconds'.format( + delay)) + if self._reconnect_abort.wait(delay): + self.logger.info('Reconnect task aborted') + for n in self.connection_namespaces: + self._trigger_event('__disconnect_final', namespace=n) + break + attempt_count += 1 + try: + self.connect(self.connection_url, + headers=self.connection_headers, + auth=self.connection_auth, + transports=self.connection_transports, + namespaces=self.connection_namespaces, + socketio_path=self.socketio_path, + retry=False) + except (exceptions.ConnectionError, ValueError): + pass + else: + self.logger.info('Reconnection successful') + self._reconnect_task = None + break + if self.reconnection_attempts and \ + attempt_count >= self.reconnection_attempts: + self.logger.info( + 'Maximum reconnection attempts reached, giving up') + for n in self.connection_namespaces: + self._trigger_event('__disconnect_final', namespace=n) + break + base_client.reconnecting_clients.remove(self) + + def _handle_eio_connect(self): + """Handle the Engine.IO connection event.""" + self.logger.info('Engine.IO connection established') + self.sid = self.eio.sid + real_auth = self._get_real_value(self.connection_auth) or {} + for n in self.connection_namespaces: + self._send_packet(self.packet_class( + packet.CONNECT, data=real_auth, namespace=n)) + + def _handle_eio_message(self, data): + """Dispatch Engine.IO messages.""" + if self._binary_packet: + pkt = self._binary_packet + if pkt.add_attachment(data): + self._binary_packet = None + if pkt.packet_type == packet.BINARY_EVENT: + self._handle_event(pkt.namespace, pkt.id, pkt.data) + else: + self._handle_ack(pkt.namespace, pkt.id, pkt.data) + else: + pkt = self.packet_class(encoded_packet=data) + if pkt.packet_type == packet.CONNECT: + self._handle_connect(pkt.namespace, pkt.data) + elif pkt.packet_type == packet.DISCONNECT: + self._handle_disconnect(pkt.namespace) + elif pkt.packet_type == packet.EVENT: + self._handle_event(pkt.namespace, pkt.id, pkt.data) + elif pkt.packet_type == packet.ACK: + self._handle_ack(pkt.namespace, pkt.id, pkt.data) + elif pkt.packet_type == packet.BINARY_EVENT or \ + pkt.packet_type == packet.BINARY_ACK: + self._binary_packet = pkt + elif pkt.packet_type == packet.CONNECT_ERROR: + self._handle_error(pkt.namespace, pkt.data) + else: + raise ValueError('Unknown packet type.') + + def _handle_eio_disconnect(self, reason): + """Handle the Engine.IO disconnection event.""" + self.logger.info('Engine.IO connection dropped') + will_reconnect = self.reconnection and self.eio.state == 'connected' + if self.connected: + for n in self.namespaces: + self._trigger_event('disconnect', n, reason) + if not will_reconnect: + self._trigger_event('__disconnect_final', n) + self.namespaces = {} + self.connected = False + self.callbacks = {} + self._binary_packet = None + self.sid = None + if will_reconnect and not self._reconnect_task: + self._reconnect_task = self.start_background_task( + self._handle_reconnect) + + def _engineio_client_class(self): + return engineio.Client diff --git a/env/lib/python3.10/site-packages/socketio/exceptions.py b/env/lib/python3.10/site-packages/socketio/exceptions.py new file mode 100644 index 0000000..19d6e39 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/exceptions.py @@ -0,0 +1,38 @@ +class SocketIOError(Exception): + pass + + +class ConnectionError(SocketIOError): + pass + + +class ConnectionRefusedError(ConnectionError): + """Connection refused exception. + + This exception can be raised from a connect handler when the connection + is not accepted. The positional arguments provided with the exception are + returned with the error packet to the client. + """ + def __init__(self, *args): + if len(args) == 0: + self.error_args = {'message': 'Connection rejected by server'} + elif len(args) == 1: + self.error_args = {'message': str(args[0])} + else: + self.error_args = {'message': str(args[0])} + if len(args) == 2: + self.error_args['data'] = args[1] + else: + self.error_args['data'] = args[1:] + + +class TimeoutError(SocketIOError): + pass + + +class BadNamespaceError(SocketIOError): + pass + + +class DisconnectedError(SocketIOError): + pass diff --git a/env/lib/python3.10/site-packages/socketio/kafka_manager.py b/env/lib/python3.10/site-packages/socketio/kafka_manager.py new file mode 100644 index 0000000..11b87ad --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/kafka_manager.py @@ -0,0 +1,65 @@ +import logging +import pickle + +try: + import kafka +except ImportError: + kafka = None + +from .pubsub_manager import PubSubManager + +logger = logging.getLogger('socketio') + + +class KafkaManager(PubSubManager): # pragma: no cover + """Kafka based client manager. + + This class implements a Kafka backend for event sharing across multiple + processes. + + To use a Kafka backend, initialize the :class:`Server` instance as + follows:: + + url = 'kafka://hostname:port' + server = socketio.Server(client_manager=socketio.KafkaManager(url)) + + :param url: The connection URL for the Kafka server. For a default Kafka + store running on the same host, use ``kafka://``. For a highly + available deployment of Kafka, pass a list with all the + connection URLs available in your cluster. + :param channel: The channel name (topic) on which the server sends and + receives notifications. Must be the same in all the + servers. + :param write_only: If set to ``True``, only initialize to emit events. The + default of ``False`` initializes the class for emitting + and receiving. + """ + name = 'kafka' + + def __init__(self, url='kafka://localhost:9092', channel='socketio', + write_only=False): + if kafka is None: + raise RuntimeError('kafka-python package is not installed ' + '(Run "pip install kafka-python" in your ' + 'virtualenv).') + + super().__init__(channel=channel, write_only=write_only) + + urls = [url] if isinstance(url, str) else url + self.kafka_urls = [url[8:] if url != 'kafka://' else 'localhost:9092' + for url in urls] + self.producer = kafka.KafkaProducer(bootstrap_servers=self.kafka_urls) + self.consumer = kafka.KafkaConsumer(self.channel, + bootstrap_servers=self.kafka_urls) + + def _publish(self, data): + self.producer.send(self.channel, value=pickle.dumps(data)) + self.producer.flush() + + def _kafka_listen(self): + yield from self.consumer + + def _listen(self): + for message in self._kafka_listen(): + if message.topic == self.channel: + yield pickle.loads(message.value) diff --git a/env/lib/python3.10/site-packages/socketio/kombu_manager.py b/env/lib/python3.10/site-packages/socketio/kombu_manager.py new file mode 100644 index 0000000..09e260c --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/kombu_manager.py @@ -0,0 +1,134 @@ +import pickle +import time +import uuid + +try: + import kombu +except ImportError: + kombu = None + +from .pubsub_manager import PubSubManager + + +class KombuManager(PubSubManager): # pragma: no cover + """Client manager that uses kombu for inter-process messaging. + + This class implements a client manager backend for event sharing across + multiple processes, using RabbitMQ, Redis or any other messaging mechanism + supported by `kombu `_. + + To use a kombu backend, initialize the :class:`Server` instance as + follows:: + + url = 'amqp://user:password@hostname:port//' + server = socketio.Server(client_manager=socketio.KombuManager(url)) + + :param url: The connection URL for the backend messaging queue. Example + connection URLs are ``'amqp://guest:guest@localhost:5672//'`` + and ``'redis://localhost:6379/'`` for RabbitMQ and Redis + respectively. Consult the `kombu documentation + `_ for more on how to construct + connection URLs. + :param channel: The channel name on which the server sends and receives + notifications. Must be the same in all the servers. + :param write_only: If set to ``True``, only initialize to emit events. The + default of ``False`` initializes the class for emitting + and receiving. + :param connection_options: additional keyword arguments to be passed to + ``kombu.Connection()``. + :param exchange_options: additional keyword arguments to be passed to + ``kombu.Exchange()``. + :param queue_options: additional keyword arguments to be passed to + ``kombu.Queue()``. + :param producer_options: additional keyword arguments to be passed to + ``kombu.Producer()``. + """ + name = 'kombu' + + def __init__(self, url='amqp://guest:guest@localhost:5672//', + channel='socketio', write_only=False, logger=None, + connection_options=None, exchange_options=None, + queue_options=None, producer_options=None): + if kombu is None: + raise RuntimeError('Kombu package is not installed ' + '(Run "pip install kombu" in your ' + 'virtualenv).') + super().__init__(channel=channel, write_only=write_only, logger=logger) + self.url = url + self.connection_options = connection_options or {} + self.exchange_options = exchange_options or {} + self.queue_options = queue_options or {} + self.producer_options = producer_options or {} + self.publisher_connection = self._connection() + + def initialize(self): + super().initialize() + + monkey_patched = True + if self.server.async_mode == 'eventlet': + from eventlet.patcher import is_monkey_patched + monkey_patched = is_monkey_patched('socket') + elif 'gevent' in self.server.async_mode: + from gevent.monkey import is_module_patched + monkey_patched = is_module_patched('socket') + if not monkey_patched: + raise RuntimeError( + 'Kombu requires a monkey patched socket library to work ' + 'with ' + self.server.async_mode) + + def _connection(self): + return kombu.Connection(self.url, **self.connection_options) + + def _exchange(self): + options = {'type': 'fanout', 'durable': False} + options.update(self.exchange_options) + return kombu.Exchange(self.channel, **options) + + def _queue(self): + queue_name = 'python-socketio.' + str(uuid.uuid4()) + options = {'durable': False, 'queue_arguments': {'x-expires': 300000}} + options.update(self.queue_options) + return kombu.Queue(queue_name, self._exchange(), **options) + + def _producer_publish(self, connection): + producer = connection.Producer(exchange=self._exchange(), + **self.producer_options) + return connection.ensure(producer, producer.publish) + + def _publish(self, data): + retry = True + while True: + try: + producer_publish = self._producer_publish( + self.publisher_connection) + producer_publish(pickle.dumps(data)) + break + except (OSError, kombu.exceptions.KombuError): + if retry: + self._get_logger().error('Cannot publish to rabbitmq... ' + 'retrying') + retry = False + else: + self._get_logger().error( + 'Cannot publish to rabbitmq... giving up') + break + + def _listen(self): + reader_queue = self._queue() + retry_sleep = 1 + while True: + try: + with self._connection() as connection: + with connection.SimpleQueue(reader_queue) as queue: + while True: + message = queue.get(block=True) + message.ack() + yield message.payload + retry_sleep = 1 + except (OSError, kombu.exceptions.KombuError): + self._get_logger().error( + 'Cannot receive from rabbitmq... ' + 'retrying in {} secs'.format(retry_sleep)) + time.sleep(retry_sleep) + retry_sleep = min(retry_sleep * 2, 60) diff --git a/env/lib/python3.10/site-packages/socketio/manager.py b/env/lib/python3.10/site-packages/socketio/manager.py new file mode 100644 index 0000000..3ebf676 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/manager.py @@ -0,0 +1,93 @@ +import logging + +from engineio import packet as eio_packet +from . import base_manager +from . import packet + +default_logger = logging.getLogger('socketio') + + +class Manager(base_manager.BaseManager): + """Manage client connections. + + This class keeps track of all the clients and the rooms they are in, to + support the broadcasting of messages. The data used by this class is + stored in a memory structure, making it appropriate only for single process + services. More sophisticated storage backends can be implemented by + subclasses. + """ + def can_disconnect(self, sid, namespace): + return self.is_connected(sid, namespace) + + def emit(self, event, data, namespace, room=None, skip_sid=None, + callback=None, to=None, **kwargs): + """Emit a message to a single client, a room, or all the clients + connected to the namespace.""" + room = to or room + if namespace not in self.rooms: + return + if isinstance(data, tuple): + # tuples are expanded to multiple arguments, everything else is + # sent as a single argument + data = list(data) + elif data is not None: + data = [data] + else: + data = [] + if not isinstance(skip_sid, list): + skip_sid = [skip_sid] + if not callback: + # when callbacks aren't used the packets sent to each recipient are + # identical, so they can be generated once and reused + pkt = self.server.packet_class( + packet.EVENT, namespace=namespace, data=[event] + data) + encoded_packet = pkt.encode() + if not isinstance(encoded_packet, list): + encoded_packet = [encoded_packet] + eio_pkt = [eio_packet.Packet(eio_packet.MESSAGE, p) + for p in encoded_packet] + for sid, eio_sid in self.get_participants(namespace, room): + if sid not in skip_sid: + for p in eio_pkt: + self.server._send_eio_packet(eio_sid, p) + else: + # callbacks are used, so each recipient must be sent a packet that + # contains a unique callback id + # note that callbacks when addressing a group of people are + # implemented but not tested or supported + for sid, eio_sid in self.get_participants(namespace, room): + if sid not in skip_sid: # pragma: no branch + id = self._generate_ack_id(sid, callback) + pkt = self.server.packet_class( + packet.EVENT, namespace=namespace, data=[event] + data, + id=id) + self.server._send_packet(eio_sid, pkt) + + def disconnect(self, sid, namespace, **kwargs): + """Register a client disconnect from a namespace.""" + return self.basic_disconnect(sid, namespace) + + def enter_room(self, sid, namespace, room, eio_sid=None): + """Add a client to a room.""" + return self.basic_enter_room(sid, namespace, room, eio_sid=eio_sid) + + def leave_room(self, sid, namespace, room): + """Remove a client from a room.""" + return self.basic_leave_room(sid, namespace, room) + + def close_room(self, room, namespace): + """Remove all participants from a room.""" + return self.basic_close_room(room, namespace) + + def trigger_callback(self, sid, id, data): + """Invoke an application callback.""" + callback = None + try: + callback = self.callbacks[sid][id] + except KeyError: + # if we get an unknown callback we just ignore it + self._get_logger().warning('Unknown callback received, ignoring.') + else: + del self.callbacks[sid][id] + if callback is not None: + callback(*data) diff --git a/env/lib/python3.10/site-packages/socketio/middleware.py b/env/lib/python3.10/site-packages/socketio/middleware.py new file mode 100644 index 0000000..acc8ffd --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/middleware.py @@ -0,0 +1,40 @@ +import engineio + + +class WSGIApp(engineio.WSGIApp): + """WSGI middleware for Socket.IO. + + This middleware dispatches traffic to a Socket.IO application. It can also + serve a list of static files to the client, or forward unrelated HTTP + traffic to another WSGI application. + + :param socketio_app: The Socket.IO server. Must be an instance of the + ``socketio.Server`` class. + :param wsgi_app: The WSGI app that receives all other traffic. + :param static_files: A dictionary with static file mapping rules. See the + documentation for details on this argument. + :param socketio_path: The endpoint where the Socket.IO application should + be installed. The default value is appropriate for + most cases. + + Example usage:: + + import socketio + import eventlet + from . import wsgi_app + + sio = socketio.Server() + app = socketio.WSGIApp(sio, wsgi_app) + eventlet.wsgi.server(eventlet.listen(('', 8000)), app) + """ + def __init__(self, socketio_app, wsgi_app=None, static_files=None, + socketio_path='socket.io'): + super().__init__(socketio_app, wsgi_app, static_files=static_files, + engineio_path=socketio_path) + + +class Middleware(WSGIApp): + """This class has been renamed to WSGIApp and is now deprecated.""" + def __init__(self, socketio_app, wsgi_app=None, + socketio_path='socket.io'): + super().__init__(socketio_app, wsgi_app, socketio_path=socketio_path) diff --git a/env/lib/python3.10/site-packages/socketio/msgpack_packet.py b/env/lib/python3.10/site-packages/socketio/msgpack_packet.py new file mode 100644 index 0000000..2746263 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/msgpack_packet.py @@ -0,0 +1,18 @@ +import msgpack +from . import packet + + +class MsgPackPacket(packet.Packet): + uses_binary_events = False + + def encode(self): + """Encode the packet for transmission.""" + return msgpack.dumps(self._to_dict()) + + def decode(self, encoded_packet): + """Decode a transmitted package.""" + decoded = msgpack.loads(encoded_packet) + self.packet_type = decoded['type'] + self.data = decoded.get('data') + self.id = decoded.get('id') + self.namespace = decoded['nsp'] diff --git a/env/lib/python3.10/site-packages/socketio/namespace.py b/env/lib/python3.10/site-packages/socketio/namespace.py new file mode 100644 index 0000000..60cab78 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/namespace.py @@ -0,0 +1,212 @@ +from . import base_namespace + + +class Namespace(base_namespace.BaseServerNamespace): + """Base class for server-side class-based namespaces. + + A class-based namespace is a class that contains all the event handlers + for a Socket.IO namespace. The event handlers are methods of the class + with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``, + ``on_message``, ``on_json``, and so on. + + :param namespace: The Socket.IO namespace to be used with all the event + handlers defined in this class. If this argument is + omitted, the default namespace is used. + """ + def trigger_event(self, event, *args): + """Dispatch an event to the proper handler method. + + In the most common usage, this method is not overloaded by subclasses, + as it performs the routing of events to methods. However, this + method can be overridden if special dispatching rules are needed, or if + having a single method that catches all events is desired. + """ + handler_name = 'on_' + (event or '') + if hasattr(self, handler_name): + try: + return getattr(self, handler_name)(*args) + except TypeError: + # legacy disconnect events do not have a reason argument + if event == 'disconnect': + return getattr(self, handler_name)(*args[:-1]) + else: # pragma: no cover + raise + + def emit(self, event, data=None, to=None, room=None, skip_sid=None, + namespace=None, callback=None, ignore_queue=False): + """Emit a custom event to one or more connected clients. + + The only difference with the :func:`socketio.Server.emit` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.server.emit(event, data=data, to=to, room=room, + skip_sid=skip_sid, + namespace=namespace or self.namespace, + callback=callback, ignore_queue=ignore_queue) + + def send(self, data, to=None, room=None, skip_sid=None, namespace=None, + callback=None, ignore_queue=False): + """Send a message to one or more connected clients. + + The only difference with the :func:`socketio.Server.send` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.server.send(data, to=to, room=room, skip_sid=skip_sid, + namespace=namespace or self.namespace, + callback=callback, ignore_queue=ignore_queue) + + def call(self, event, data=None, to=None, sid=None, namespace=None, + timeout=None, ignore_queue=False): + """Emit a custom event to a client and wait for the response. + + The only difference with the :func:`socketio.Server.call` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.server.call(event, data=data, to=to, sid=sid, + namespace=namespace or self.namespace, + timeout=timeout, ignore_queue=ignore_queue) + + def enter_room(self, sid, room, namespace=None): + """Enter a room. + + The only difference with the :func:`socketio.Server.enter_room` method + is that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.server.enter_room(sid, room, + namespace=namespace or self.namespace) + + def leave_room(self, sid, room, namespace=None): + """Leave a room. + + The only difference with the :func:`socketio.Server.leave_room` method + is that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.server.leave_room(sid, room, + namespace=namespace or self.namespace) + + def close_room(self, room, namespace=None): + """Close a room. + + The only difference with the :func:`socketio.Server.close_room` method + is that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.server.close_room(room, + namespace=namespace or self.namespace) + + def get_session(self, sid, namespace=None): + """Return the user session for a client. + + The only difference with the :func:`socketio.Server.get_session` + method is that when the ``namespace`` argument is not given the + namespace associated with the class is used. + """ + return self.server.get_session( + sid, namespace=namespace or self.namespace) + + def save_session(self, sid, session, namespace=None): + """Store the user session for a client. + + The only difference with the :func:`socketio.Server.save_session` + method is that when the ``namespace`` argument is not given the + namespace associated with the class is used. + """ + return self.server.save_session( + sid, session, namespace=namespace or self.namespace) + + def session(self, sid, namespace=None): + """Return the user session for a client with context manager syntax. + + The only difference with the :func:`socketio.Server.session` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.server.session(sid, namespace=namespace or self.namespace) + + def disconnect(self, sid, namespace=None): + """Disconnect a client. + + The only difference with the :func:`socketio.Server.disconnect` method + is that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.server.disconnect(sid, + namespace=namespace or self.namespace) + + +class ClientNamespace(base_namespace.BaseClientNamespace): + """Base class for client-side class-based namespaces. + + A class-based namespace is a class that contains all the event handlers + for a Socket.IO namespace. The event handlers are methods of the class + with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``, + ``on_message``, ``on_json``, and so on. + + :param namespace: The Socket.IO namespace to be used with all the event + handlers defined in this class. If this argument is + omitted, the default namespace is used. + """ + def trigger_event(self, event, *args): + """Dispatch an event to the proper handler method. + + In the most common usage, this method is not overloaded by subclasses, + as it performs the routing of events to methods. However, this + method can be overridden if special dispatching rules are needed, or if + having a single method that catches all events is desired. + """ + handler_name = 'on_' + (event or '') + if hasattr(self, handler_name): + try: + return getattr(self, handler_name)(*args) + except TypeError: + # legacy disconnect events do not have a reason argument + if event == 'disconnect': + return getattr(self, handler_name)(*args[:-1]) + else: # pragma: no cover + raise + + def emit(self, event, data=None, namespace=None, callback=None): + """Emit a custom event to the server. + + The only difference with the :func:`socketio.Client.emit` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.client.emit(event, data=data, + namespace=namespace or self.namespace, + callback=callback) + + def send(self, data, room=None, namespace=None, callback=None): + """Send a message to the server. + + The only difference with the :func:`socketio.Client.send` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.client.send(data, namespace=namespace or self.namespace, + callback=callback) + + def call(self, event, data=None, namespace=None, timeout=None): + """Emit a custom event to the server and wait for the response. + + The only difference with the :func:`socketio.Client.call` method is + that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.client.call(event, data=data, + namespace=namespace or self.namespace, + timeout=timeout) + + def disconnect(self): + """Disconnect from the server. + + The only difference with the :func:`socketio.Client.disconnect` method + is that when the ``namespace`` argument is not given the namespace + associated with the class is used. + """ + return self.client.disconnect() diff --git a/env/lib/python3.10/site-packages/socketio/packet.py b/env/lib/python3.10/site-packages/socketio/packet.py new file mode 100644 index 0000000..f7ad87e --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/packet.py @@ -0,0 +1,190 @@ +import functools +from engineio import json as _json + +(CONNECT, DISCONNECT, EVENT, ACK, CONNECT_ERROR, BINARY_EVENT, BINARY_ACK) = \ + (0, 1, 2, 3, 4, 5, 6) +packet_names = ['CONNECT', 'DISCONNECT', 'EVENT', 'ACK', 'CONNECT_ERROR', + 'BINARY_EVENT', 'BINARY_ACK'] + + +class Packet: + """Socket.IO packet.""" + + # the format of the Socket.IO packet is as follows: + # + # packet type: 1 byte, values 0-6 + # num_attachments: ASCII encoded, only if num_attachments != 0 + # '-': only if num_attachments != 0 + # namespace, followed by a ',': only if namespace != '/' + # id: ASCII encoded, only if id is not None + # data: JSON dump of data payload + + uses_binary_events = True + json = _json + + def __init__(self, packet_type=EVENT, data=None, namespace=None, id=None, + binary=None, encoded_packet=None): + self.packet_type = packet_type + self.data = data + self.namespace = namespace + self.id = id + if self.uses_binary_events and \ + (binary or (binary is None and self._data_is_binary( + self.data))): + if self.packet_type == EVENT: + self.packet_type = BINARY_EVENT + elif self.packet_type == ACK: + self.packet_type = BINARY_ACK + else: + raise ValueError('Packet does not support binary payload.') + self.attachment_count = 0 + self.attachments = [] + if encoded_packet: + self.attachment_count = self.decode(encoded_packet) or 0 + + def encode(self): + """Encode the packet for transmission. + + If the packet contains binary elements, this function returns a list + of packets where the first is the original packet with placeholders for + the binary components and the remaining ones the binary attachments. + """ + encoded_packet = str(self.packet_type) + if self.packet_type == BINARY_EVENT or self.packet_type == BINARY_ACK: + data, attachments = self._deconstruct_binary(self.data) + encoded_packet += str(len(attachments)) + '-' + else: + data = self.data + attachments = None + if self.namespace is not None and self.namespace != '/': + encoded_packet += self.namespace + ',' + if self.id is not None: + encoded_packet += str(self.id) + if data is not None: + encoded_packet += self.json.dumps(data, separators=(',', ':')) + if attachments is not None: + encoded_packet = [encoded_packet] + attachments + return encoded_packet + + def decode(self, encoded_packet): + """Decode a transmitted package. + + The return value indicates how many binary attachment packets are + necessary to fully decode the packet. + """ + ep = encoded_packet + try: + self.packet_type = int(ep[0:1]) + except TypeError: + self.packet_type = ep + ep = '' + self.namespace = None + self.data = None + ep = ep[1:] + dash = ep.find('-') + attachment_count = 0 + if dash > 0 and ep[0:dash].isdigit(): + if dash > 10: + raise ValueError('too many attachments') + attachment_count = int(ep[0:dash]) + ep = ep[dash + 1:] + if ep and ep[0:1] == '/': + sep = ep.find(',') + if sep == -1: + self.namespace = ep + ep = '' + else: + self.namespace = ep[0:sep] + ep = ep[sep + 1:] + q = self.namespace.find('?') + if q != -1: + self.namespace = self.namespace[0:q] + if ep and ep[0].isdigit(): + i = 1 + end = len(ep) + while i < end: + if not ep[i].isdigit() or i >= 100: + break + i += 1 + self.id = int(ep[:i]) + ep = ep[i:] + if len(ep) > 0 and ep[0].isdigit(): + raise ValueError('id field is too long') + if ep: + self.data = self.json.loads(ep) + return attachment_count + + def add_attachment(self, attachment): + if self.attachment_count <= len(self.attachments): + raise ValueError('Unexpected binary attachment') + self.attachments.append(attachment) + if self.attachment_count == len(self.attachments): + self.reconstruct_binary(self.attachments) + return True + return False + + def reconstruct_binary(self, attachments): + """Reconstruct a decoded packet using the given list of binary + attachments. + """ + self.data = self._reconstruct_binary_internal(self.data, + self.attachments) + + def _reconstruct_binary_internal(self, data, attachments): + if isinstance(data, list): + return [self._reconstruct_binary_internal(item, attachments) + for item in data] + elif isinstance(data, dict): + if data.get('_placeholder') and 'num' in data: + return attachments[data['num']] + else: + return {key: self._reconstruct_binary_internal(value, + attachments) + for key, value in data.items()} + else: + return data + + def _deconstruct_binary(self, data): + """Extract binary components in the packet.""" + attachments = [] + data = self._deconstruct_binary_internal(data, attachments) + return data, attachments + + def _deconstruct_binary_internal(self, data, attachments): + if isinstance(data, bytes): + attachments.append(data) + return {'_placeholder': True, 'num': len(attachments) - 1} + elif isinstance(data, list): + return [self._deconstruct_binary_internal(item, attachments) + for item in data] + elif isinstance(data, dict): + return {key: self._deconstruct_binary_internal(value, attachments) + for key, value in data.items()} + else: + return data + + def _data_is_binary(self, data): + """Check if the data contains binary components.""" + if isinstance(data, bytes): + return True + elif isinstance(data, list): + return functools.reduce( + lambda a, b: a or b, [self._data_is_binary(item) + for item in data], False) + elif isinstance(data, dict): + return functools.reduce( + lambda a, b: a or b, [self._data_is_binary(item) + for item in data.values()], + False) + else: + return False + + def _to_dict(self): + d = { + 'type': self.packet_type, + 'data': self.data, + 'nsp': self.namespace, + } + if self.id is not None: + d['id'] = self.id + return d diff --git a/env/lib/python3.10/site-packages/socketio/pubsub_manager.py b/env/lib/python3.10/site-packages/socketio/pubsub_manager.py new file mode 100644 index 0000000..3270b4c --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/pubsub_manager.py @@ -0,0 +1,233 @@ +from functools import partial +import uuid + +from engineio import json +import pickle + +from .manager import Manager + + +class PubSubManager(Manager): + """Manage a client list attached to a pub/sub backend. + + This is a base class that enables multiple servers to share the list of + clients, with the servers communicating events through a pub/sub backend. + The use of a pub/sub backend also allows any client connected to the + backend to emit events addressed to Socket.IO clients. + + The actual backends must be implemented by subclasses, this class only + provides a pub/sub generic framework. + + :param channel: The channel name on which the server sends and receives + notifications. + """ + name = 'pubsub' + + def __init__(self, channel='socketio', write_only=False, logger=None): + super().__init__() + self.channel = channel + self.write_only = write_only + self.host_id = uuid.uuid4().hex + self.logger = logger + + def initialize(self): + super().initialize() + if not self.write_only: + self.thread = self.server.start_background_task(self._thread) + self._get_logger().info(self.name + ' backend initialized.') + + def emit(self, event, data, namespace=None, room=None, skip_sid=None, + callback=None, to=None, **kwargs): + """Emit a message to a single client, a room, or all the clients + connected to the namespace. + + This method takes care or propagating the message to all the servers + that are connected through the message queue. + + The parameters are the same as in :meth:`.Server.emit`. + """ + room = to or room + if kwargs.get('ignore_queue'): + return super().emit( + event, data, namespace=namespace, room=room, skip_sid=skip_sid, + callback=callback) + namespace = namespace or '/' + if callback is not None: + if self.server is None: + raise RuntimeError('Callbacks can only be issued from the ' + 'context of a server.') + if room is None: + raise ValueError('Cannot use callback without a room set.') + id = self._generate_ack_id(room, callback) + callback = (room, namespace, id) + else: + callback = None + message = {'method': 'emit', 'event': event, 'data': data, + 'namespace': namespace, 'room': room, + 'skip_sid': skip_sid, 'callback': callback, + 'host_id': self.host_id} + self._handle_emit(message) # handle in this host + self._publish(message) # notify other hosts + + def can_disconnect(self, sid, namespace): + if self.is_connected(sid, namespace): + # client is in this server, so we can disconnect directly + return super().can_disconnect(sid, namespace) + else: + # client is in another server, so we post request to the queue + message = {'method': 'disconnect', 'sid': sid, + 'namespace': namespace or '/', 'host_id': self.host_id} + self._handle_disconnect(message) # handle in this host + self._publish(message) # notify other hosts + + def disconnect(self, sid, namespace=None, **kwargs): + if kwargs.get('ignore_queue'): + return super().disconnect(sid, namespace=namespace) + message = {'method': 'disconnect', 'sid': sid, + 'namespace': namespace or '/', 'host_id': self.host_id} + self._handle_disconnect(message) # handle in this host + self._publish(message) # notify other hosts + + def enter_room(self, sid, namespace, room, eio_sid=None): + if self.is_connected(sid, namespace): + # client is in this server, so we can add to the room directly + return super().enter_room(sid, namespace, room, eio_sid=eio_sid) + else: + message = {'method': 'enter_room', 'sid': sid, 'room': room, + 'namespace': namespace or '/', 'host_id': self.host_id} + self._publish(message) # notify other hosts + + def leave_room(self, sid, namespace, room): + if self.is_connected(sid, namespace): + # client is in this server, so we can remove from the room directly + return super().leave_room(sid, namespace, room) + else: + message = {'method': 'leave_room', 'sid': sid, 'room': room, + 'namespace': namespace or '/', 'host_id': self.host_id} + self._publish(message) # notify other hosts + + def close_room(self, room, namespace=None): + message = {'method': 'close_room', 'room': room, + 'namespace': namespace or '/', 'host_id': self.host_id} + self._handle_close_room(message) # handle in this host + self._publish(message) # notify other hosts + + def _publish(self, data): + """Publish a message on the Socket.IO channel. + + This method needs to be implemented by the different subclasses that + support pub/sub backends. + """ + raise NotImplementedError('This method must be implemented in a ' + 'subclass.') # pragma: no cover + + def _listen(self): + """Return the next message published on the Socket.IO channel, + blocking until a message is available. + + This method needs to be implemented by the different subclasses that + support pub/sub backends. + """ + raise NotImplementedError('This method must be implemented in a ' + 'subclass.') # pragma: no cover + + def _handle_emit(self, message): + # Events with callbacks are very tricky to handle across hosts + # Here in the receiving end we set up a local callback that preserves + # the callback host and id from the sender + remote_callback = message.get('callback') + remote_host_id = message.get('host_id') + if remote_callback is not None and len(remote_callback) == 3: + callback = partial(self._return_callback, remote_host_id, + *remote_callback) + else: + callback = None + super().emit(message['event'], message['data'], + namespace=message.get('namespace'), + room=message.get('room'), + skip_sid=message.get('skip_sid'), callback=callback) + + def _handle_callback(self, message): + if self.host_id == message.get('host_id'): + try: + sid = message['sid'] + id = message['id'] + args = message['args'] + except KeyError: + return + self.trigger_callback(sid, id, args) + + def _return_callback(self, host_id, sid, namespace, callback_id, *args): + # When an event callback is received, the callback is returned back + # to the sender, which is identified by the host_id + if host_id == self.host_id: + self.trigger_callback(sid, callback_id, args) + else: + self._publish({'method': 'callback', 'host_id': host_id, + 'sid': sid, 'namespace': namespace, + 'id': callback_id, 'args': args}) + + def _handle_disconnect(self, message): + self.server.disconnect(sid=message.get('sid'), + namespace=message.get('namespace'), + ignore_queue=True) + + def _handle_enter_room(self, message): + sid = message.get('sid') + namespace = message.get('namespace') + if self.is_connected(sid, namespace): + super().enter_room(sid, namespace, message.get('room')) + + def _handle_leave_room(self, message): + sid = message.get('sid') + namespace = message.get('namespace') + if self.is_connected(sid, namespace): + super().leave_room(sid, namespace, message.get('room')) + + def _handle_close_room(self, message): + super().close_room(room=message.get('room'), + namespace=message.get('namespace')) + + def _thread(self): + while True: + try: + for message in self._listen(): + data = None + if isinstance(message, dict): + data = message + else: + if isinstance(message, bytes): # pragma: no cover + try: + data = pickle.loads(message) + except: + pass + if data is None: + try: + data = json.loads(message) + except: + pass + if data and 'method' in data: + self._get_logger().debug('pubsub message: {}'.format( + data['method'])) + try: + if data['method'] == 'callback': + self._handle_callback(data) + elif data.get('host_id') != self.host_id: + if data['method'] == 'emit': + self._handle_emit(data) + elif data['method'] == 'disconnect': + self._handle_disconnect(data) + elif data['method'] == 'enter_room': + self._handle_enter_room(data) + elif data['method'] == 'leave_room': + self._handle_leave_room(data) + elif data['method'] == 'close_room': + self._handle_close_room(data) + except Exception: + self.server.logger.exception( + 'Handler error in pubsub listening thread') + self.server.logger.error('pubsub listen() exited unexpectedly') + break # loop should never exit except in unit tests! + except Exception: # pragma: no cover + self.server.logger.exception('Unexpected Error in pubsub ' + 'listening thread') diff --git a/env/lib/python3.10/site-packages/socketio/redis_manager.py b/env/lib/python3.10/site-packages/socketio/redis_manager.py new file mode 100644 index 0000000..a16fb2c --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/redis_manager.py @@ -0,0 +1,114 @@ +import logging +import pickle +import time + +try: + import redis +except ImportError: + redis = None + +from .pubsub_manager import PubSubManager + +logger = logging.getLogger('socketio') + + +class RedisManager(PubSubManager): # pragma: no cover + """Redis based client manager. + + This class implements a Redis backend for event sharing across multiple + processes. Only kept here as one more example of how to build a custom + backend, since the kombu backend is perfectly adequate to support a Redis + message queue. + + To use a Redis backend, initialize the :class:`Server` instance as + follows:: + + url = 'redis://hostname:port/0' + server = socketio.Server(client_manager=socketio.RedisManager(url)) + + :param url: The connection URL for the Redis server. For a default Redis + store running on the same host, use ``redis://``. To use an + SSL connection, use ``rediss://``. + :param channel: The channel name on which the server sends and receives + notifications. Must be the same in all the servers. + :param write_only: If set to ``True``, only initialize to emit events. The + default of ``False`` initializes the class for emitting + and receiving. + :param redis_options: additional keyword arguments to be passed to + ``Redis.from_url()``. + """ + name = 'redis' + + def __init__(self, url='redis://localhost:6379/0', channel='socketio', + write_only=False, logger=None, redis_options=None): + if redis is None: + raise RuntimeError('Redis package is not installed ' + '(Run "pip install redis" in your ' + 'virtualenv).') + self.redis_url = url + self.redis_options = redis_options or {} + self._redis_connect() + super().__init__(channel=channel, write_only=write_only, logger=logger) + + def initialize(self): + super().initialize() + + monkey_patched = True + if self.server.async_mode == 'eventlet': + from eventlet.patcher import is_monkey_patched + monkey_patched = is_monkey_patched('socket') + elif 'gevent' in self.server.async_mode: + from gevent.monkey import is_module_patched + monkey_patched = is_module_patched('socket') + if not monkey_patched: + raise RuntimeError( + 'Redis requires a monkey patched socket library to work ' + 'with ' + self.server.async_mode) + + def _redis_connect(self): + self.redis = redis.Redis.from_url(self.redis_url, + **self.redis_options) + self.pubsub = self.redis.pubsub(ignore_subscribe_messages=True) + + def _publish(self, data): + retry = True + while True: + try: + if not retry: + self._redis_connect() + return self.redis.publish(self.channel, pickle.dumps(data)) + except redis.exceptions.RedisError: + if retry: + logger.error('Cannot publish to redis... retrying') + retry = False + else: + logger.error('Cannot publish to redis... giving up') + break + + def _redis_listen_with_retries(self): + retry_sleep = 1 + connect = False + while True: + try: + if connect: + self._redis_connect() + self.pubsub.subscribe(self.channel) + retry_sleep = 1 + yield from self.pubsub.listen() + except redis.exceptions.RedisError: + logger.error('Cannot receive from redis... ' + 'retrying in {} secs'.format(retry_sleep)) + connect = True + time.sleep(retry_sleep) + retry_sleep *= 2 + if retry_sleep > 60: + retry_sleep = 60 + + def _listen(self): + channel = self.channel.encode('utf-8') + self.pubsub.subscribe(self.channel) + for message in self._redis_listen_with_retries(): + if message['channel'] == channel and \ + message['type'] == 'message' and 'data' in message: + yield message['data'] + self.pubsub.unsubscribe(self.channel) diff --git a/env/lib/python3.10/site-packages/socketio/server.py b/env/lib/python3.10/site-packages/socketio/server.py new file mode 100644 index 0000000..71c702d --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/server.py @@ -0,0 +1,676 @@ +import logging + +import engineio + +from . import base_server +from . import exceptions +from . import packet + +default_logger = logging.getLogger('socketio.server') + + +class Server(base_server.BaseServer): + """A Socket.IO server. + + This class implements a fully compliant Socket.IO web server with support + for websocket and long-polling transports. + + :param client_manager: The client manager instance that will manage the + client list. When this is omitted, the client list + is stored in an in-memory structure, so the use of + multiple connected servers is not possible. + :param logger: To enable logging set to ``True`` or pass a logger object to + use. To disable logging set to ``False``. The default is + ``False``. Note that fatal errors are logged even when + ``logger`` is ``False``. + :param serializer: The serialization method to use when transmitting + packets. Valid values are ``'default'``, ``'pickle'``, + ``'msgpack'`` and ``'cbor'``. Alternatively, a subclass + of the :class:`Packet` class with custom implementations + of the ``encode()`` and ``decode()`` methods can be + provided. Client and server must use compatible + serializers. + :param json: An alternative json module to use for encoding and decoding + packets. Custom json modules must have ``dumps`` and ``loads`` + functions that are compatible with the standard library + versions. + :param async_handlers: If set to ``True``, event handlers for a client are + executed in separate threads. To run handlers for a + client synchronously, set to ``False``. The default + is ``True``. + :param always_connect: When set to ``False``, new connections are + provisory until the connect handler returns + something other than ``False``, at which point they + are accepted. When set to ``True``, connections are + immediately accepted, and then if the connect + handler returns ``False`` a disconnect is issued. + Set to ``True`` if you need to emit events from the + connect handler and your client is confused when it + receives events before the connection acceptance. + In any other case use the default of ``False``. + :param namespaces: a list of namespaces that are accepted, in addition to + any namespaces for which handlers have been defined. The + default is `['/']`, which always accepts connections to + the default namespace. Set to `'*'` to accept all + namespaces. + :param kwargs: Connection parameters for the underlying Engine.IO server. + + The Engine.IO configuration supports the following settings: + + :param async_mode: The asynchronous model to use. See the Deployment + section in the documentation for a description of the + available options. Valid async modes are + ``'threading'``, ``'eventlet'``, ``'gevent'`` and + ``'gevent_uwsgi'``. If this argument is not given, + ``'eventlet'`` is tried first, then ``'gevent_uwsgi'``, + then ``'gevent'``, and finally ``'threading'``. + The first async mode that has all its dependencies + installed is then one that is chosen. + :param ping_interval: The interval in seconds at which the server pings + the client. The default is 25 seconds. For advanced + control, a two element tuple can be given, where + the first number is the ping interval and the second + is a grace period added by the server. + :param ping_timeout: The time in seconds that the client waits for the + server to respond before disconnecting. The default + is 20 seconds. + :param max_http_buffer_size: The maximum size that is accepted for incoming + messages. The default is 1,000,000 bytes. In + spite of its name, the value set in this + argument is enforced for HTTP long-polling and + WebSocket connections. + :param allow_upgrades: Whether to allow transport upgrades or not. The + default is ``True``. + :param http_compression: Whether to compress packages when using the + polling transport. The default is ``True``. + :param compression_threshold: Only compress messages when their byte size + is greater than this value. The default is + 1024 bytes. + :param cookie: If set to a string, it is the name of the HTTP cookie the + server sends back to the client containing the client + session id. If set to a dictionary, the ``'name'`` key + contains the cookie name and other keys define cookie + attributes, where the value of each attribute can be a + string, a callable with no arguments, or a boolean. If set + to ``None`` (the default), a cookie is not sent to the + client. + :param cors_allowed_origins: Origin or list of origins that are allowed to + connect to this server. Only the same origin + is allowed by default. Set this argument to + ``'*'`` to allow all origins, or to ``[]`` to + disable CORS handling. + :param cors_credentials: Whether credentials (cookies, authentication) are + allowed in requests to this server. The default is + ``True``. + :param monitor_clients: If set to ``True``, a background task will ensure + inactive clients are closed. Set to ``False`` to + disable the monitoring task (not recommended). The + default is ``True``. + :param transports: The list of allowed transports. Valid transports + are ``'polling'`` and ``'websocket'``. Defaults to + ``['polling', 'websocket']``. + :param engineio_logger: To enable Engine.IO logging set to ``True`` or pass + a logger object to use. To disable logging set to + ``False``. The default is ``False``. Note that + fatal errors are logged even when + ``engineio_logger`` is ``False``. + """ + def emit(self, event, data=None, to=None, room=None, skip_sid=None, + namespace=None, callback=None, ignore_queue=False): + """Emit a custom event to one or more connected clients. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the client or clients. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param to: The recipient of the message. This can be set to the + session ID of a client to address only that client, to any + custom room created by the application to address all + the clients in that room, or to a list of custom room + names. If this argument is omitted the event is broadcasted + to all connected clients. + :param room: Alias for the ``to`` parameter. + :param skip_sid: The session ID of a client to skip when broadcasting + to a room or to all clients. This can be used to + prevent a message from being sent to the sender. To + skip multiple sids, pass a list. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param callback: If given, this function will be called to acknowledge + the client has received the message. The arguments + that will be passed to the function are those provided + by the client. Callback functions can only be used + when addressing an individual client. + :param ignore_queue: Only used when a message queue is configured. If + set to ``True``, the event is emitted to the + clients directly, without going through the queue. + This is more efficient, but only works when a + single server process is used. It is recommended + to always leave this parameter with its default + value of ``False``. + + Note: this method is not thread safe. If multiple threads are emitting + at the same time to the same client, then messages composed of + multiple packets may end up being sent in an incorrect sequence. Use + standard concurrency solutions (such as a Lock object) to prevent this + situation. + """ + namespace = namespace or '/' + room = to or room + self.logger.info('emitting event "%s" to %s [%s]', event, + room or 'all', namespace) + self.manager.emit(event, data, namespace, room=room, + skip_sid=skip_sid, callback=callback, + ignore_queue=ignore_queue) + + def send(self, data, to=None, room=None, skip_sid=None, namespace=None, + callback=None, ignore_queue=False): + """Send a message to one or more connected clients. + + This function emits an event with the name ``'message'``. Use + :func:`emit` to issue custom event names. + + :param data: The data to send to the client or clients. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param to: The recipient of the message. This can be set to the + session ID of a client to address only that client, to any + any custom room created by the application to address all + the clients in that room, or to a list of custom room + names. If this argument is omitted the event is broadcasted + to all connected clients. + :param room: Alias for the ``to`` parameter. + :param skip_sid: The session ID of a client to skip when broadcasting + to a room or to all clients. This can be used to + prevent a message from being sent to the sender. To + skip multiple sids, pass a list. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param callback: If given, this function will be called to acknowledge + the client has received the message. The arguments + that will be passed to the function are those provided + by the client. Callback functions can only be used + when addressing an individual client. + :param ignore_queue: Only used when a message queue is configured. If + set to ``True``, the event is emitted to the + clients directly, without going through the queue. + This is more efficient, but only works when a + single server process is used. It is recommended + to always leave this parameter with its default + value of ``False``. + """ + self.emit('message', data=data, to=to, room=room, skip_sid=skip_sid, + namespace=namespace, callback=callback, + ignore_queue=ignore_queue) + + def call(self, event, data=None, to=None, sid=None, namespace=None, + timeout=60, ignore_queue=False): + """Emit a custom event to a client and wait for the response. + + This method issues an emit with a callback and waits for the callback + to be invoked before returning. If the callback isn't invoked before + the timeout, then a ``TimeoutError`` exception is raised. If the + Socket.IO connection drops during the wait, this method still waits + until the specified timeout. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the client or clients. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param to: The session ID of the recipient client. + :param sid: Alias for the ``to`` parameter. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the event is emitted to the + default namespace. + :param timeout: The waiting timeout. If the timeout is reached before + the client acknowledges the event, then a + ``TimeoutError`` exception is raised. + :param ignore_queue: Only used when a message queue is configured. If + set to ``True``, the event is emitted to the + client directly, without going through the queue. + This is more efficient, but only works when a + single server process is used. It is recommended + to always leave this parameter with its default + value of ``False``. + + Note: this method is not thread safe. If multiple threads are emitting + at the same time to the same client, then messages composed of + multiple packets may end up being sent in an incorrect sequence. Use + standard concurrency solutions (such as a Lock object) to prevent this + situation. + """ + if to is None and sid is None: + raise ValueError('Cannot use call() to broadcast.') + if not self.async_handlers: + raise RuntimeError( + 'Cannot use call() when async_handlers is False.') + callback_event = self.eio.create_event() + callback_args = [] + + def event_callback(*args): + callback_args.append(args) + callback_event.set() + + self.emit(event, data=data, room=to or sid, namespace=namespace, + callback=event_callback, ignore_queue=ignore_queue) + if not callback_event.wait(timeout=timeout): + raise exceptions.TimeoutError() + return callback_args[0] if len(callback_args[0]) > 1 \ + else callback_args[0][0] if len(callback_args[0]) == 1 \ + else None + + def enter_room(self, sid, room, namespace=None): + """Enter a room. + + This function adds the client to a room. The :func:`emit` and + :func:`send` functions can optionally broadcast events to all the + clients in a room. + + :param sid: Session ID of the client. + :param room: Room name. If the room does not exist it is created. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the default namespace is used. + """ + namespace = namespace or '/' + self.logger.info('%s is entering room %s [%s]', sid, room, namespace) + self.manager.enter_room(sid, namespace, room) + + def leave_room(self, sid, room, namespace=None): + """Leave a room. + + This function removes the client from a room. + + :param sid: Session ID of the client. + :param room: Room name. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the default namespace is used. + """ + namespace = namespace or '/' + self.logger.info('%s is leaving room %s [%s]', sid, room, namespace) + self.manager.leave_room(sid, namespace, room) + + def close_room(self, room, namespace=None): + """Close a room. + + This function removes all the clients from the given room. + + :param room: Room name. + :param namespace: The Socket.IO namespace for the event. If this + argument is omitted the default namespace is used. + """ + namespace = namespace or '/' + self.logger.info('room %s is closing [%s]', room, namespace) + self.manager.close_room(room, namespace) + + def get_session(self, sid, namespace=None): + """Return the user session for a client. + + :param sid: The session id of the client. + :param namespace: The Socket.IO namespace. If this argument is omitted + the default namespace is used. + + The return value is a dictionary. Modifications made to this + dictionary are not guaranteed to be preserved unless + ``save_session()`` is called, or when the ``session`` context manager + is used. + """ + namespace = namespace or '/' + eio_sid = self.manager.eio_sid_from_sid(sid, namespace) + eio_session = self.eio.get_session(eio_sid) + return eio_session.setdefault(namespace, {}) + + def save_session(self, sid, session, namespace=None): + """Store the user session for a client. + + :param sid: The session id of the client. + :param session: The session dictionary. + :param namespace: The Socket.IO namespace. If this argument is omitted + the default namespace is used. + """ + namespace = namespace or '/' + eio_sid = self.manager.eio_sid_from_sid(sid, namespace) + eio_session = self.eio.get_session(eio_sid) + eio_session[namespace] = session + + def session(self, sid, namespace=None): + """Return the user session for a client with context manager syntax. + + :param sid: The session id of the client. + + This is a context manager that returns the user session dictionary for + the client. Any changes that are made to this dictionary inside the + context manager block are saved back to the session. Example usage:: + + @sio.on('connect') + def on_connect(sid, environ): + username = authenticate_user(environ) + if not username: + return False + with sio.session(sid) as session: + session['username'] = username + + @sio.on('message') + def on_message(sid, msg): + with sio.session(sid) as session: + print('received message from ', session['username']) + """ + class _session_context_manager: + def __init__(self, server, sid, namespace): + self.server = server + self.sid = sid + self.namespace = namespace + self.session = None + + def __enter__(self): + self.session = self.server.get_session(sid, + namespace=namespace) + return self.session + + def __exit__(self, *args): + self.server.save_session(sid, self.session, + namespace=namespace) + + return _session_context_manager(self, sid, namespace) + + def disconnect(self, sid, namespace=None, ignore_queue=False): + """Disconnect a client. + + :param sid: Session ID of the client. + :param namespace: The Socket.IO namespace to disconnect. If this + argument is omitted the default namespace is used. + :param ignore_queue: Only used when a message queue is configured. If + set to ``True``, the disconnect is processed + locally, without broadcasting on the queue. It is + recommended to always leave this parameter with + its default value of ``False``. + """ + namespace = namespace or '/' + if ignore_queue: + delete_it = self.manager.is_connected(sid, namespace) + else: + delete_it = self.manager.can_disconnect(sid, namespace) + if delete_it: + self.logger.info('Disconnecting %s [%s]', sid, namespace) + eio_sid = self.manager.pre_disconnect(sid, namespace=namespace) + self._send_packet(eio_sid, self.packet_class( + packet.DISCONNECT, namespace=namespace)) + self._trigger_event('disconnect', namespace, sid, + self.reason.SERVER_DISCONNECT) + self.manager.disconnect(sid, namespace=namespace, + ignore_queue=True) + + def shutdown(self): + """Stop Socket.IO background tasks. + + This method stops all background activity initiated by the Socket.IO + server. It must be called before shutting down the web server. + """ + self.logger.info('Socket.IO is shutting down') + self.eio.shutdown() + + def handle_request(self, environ, start_response): + """Handle an HTTP request from the client. + + This is the entry point of the Socket.IO application, using the same + interface as a WSGI application. For the typical usage, this function + is invoked by the :class:`Middleware` instance, but it can be invoked + directly when the middleware is not used. + + :param environ: The WSGI environment. + :param start_response: The WSGI ``start_response`` function. + + This function returns the HTTP response body to deliver to the client + as a byte sequence. + """ + return self.eio.handle_request(environ, start_response) + + def start_background_task(self, target, *args, **kwargs): + """Start a background task using the appropriate async model. + + This is a utility function that applications can use to start a + background task using the method that is compatible with the + selected async mode. + + :param target: the target function to execute. + :param args: arguments to pass to the function. + :param kwargs: keyword arguments to pass to the function. + + This function returns an object that represents the background task, + on which the ``join()`` methond can be invoked to wait for the task to + complete. + """ + return self.eio.start_background_task(target, *args, **kwargs) + + def sleep(self, seconds=0): + """Sleep for the requested amount of time using the appropriate async + model. + + This is a utility function that applications can use to put a task to + sleep without having to worry about using the correct call for the + selected async mode. + """ + return self.eio.sleep(seconds) + + def instrument(self, auth=None, mode='development', read_only=False, + server_id=None, namespace='/admin', + server_stats_interval=2): + """Instrument the Socket.IO server for monitoring with the `Socket.IO + Admin UI `_. + + :param auth: Authentication credentials for Admin UI access. Set to a + dictionary with the expected login (usually ``username`` + and ``password``) or a list of dictionaries if more than + one set of credentials need to be available. For more + complex authentication methods, set to a callable that + receives the authentication dictionary as an argument and + returns ``True`` if the user is allowed or ``False`` + otherwise. To disable authentication, set this argument to + ``False`` (not recommended, never do this on a production + server). + :param mode: The reporting mode. The default is ``'development'``, + which is best used while debugging, as it may have a + significant performance effect. Set to ``'production'`` to + reduce the amount of information that is reported to the + admin UI. + :param read_only: If set to ``True``, the admin interface will be + read-only, with no option to modify room assignments + or disconnect clients. The default is ``False``. + :param server_id: The server name to use for this server. If this + argument is omitted, the server generates its own + name. + :param namespace: The Socket.IO namespace to use for the admin + interface. The default is ``/admin``. + :param server_stats_interval: The interval in seconds at which the + server emits a summary of it stats to all + connected admins. + """ + from .admin import InstrumentedServer + return InstrumentedServer( + self, auth=auth, mode=mode, read_only=read_only, + server_id=server_id, namespace=namespace, + server_stats_interval=server_stats_interval) + + def _send_packet(self, eio_sid, pkt): + """Send a Socket.IO packet to a client.""" + encoded_packet = pkt.encode() + if isinstance(encoded_packet, list): + for ep in encoded_packet: + self.eio.send(eio_sid, ep) + else: + self.eio.send(eio_sid, encoded_packet) + + def _send_eio_packet(self, eio_sid, eio_pkt): + """Send a raw Engine.IO packet to a client.""" + self.eio.send_packet(eio_sid, eio_pkt) + + def _handle_connect(self, eio_sid, namespace, data): + """Handle a client connection request.""" + namespace = namespace or '/' + sid = None + if namespace in self.handlers or namespace in self.namespace_handlers \ + or self.namespaces == '*' or namespace in self.namespaces: + sid = self.manager.connect(eio_sid, namespace) + if sid is None: + self._send_packet(eio_sid, self.packet_class( + packet.CONNECT_ERROR, data='Unable to connect', + namespace=namespace)) + return + + if self.always_connect: + self._send_packet(eio_sid, self.packet_class( + packet.CONNECT, {'sid': sid}, namespace=namespace)) + fail_reason = exceptions.ConnectionRefusedError().error_args + try: + if data: + success = self._trigger_event( + 'connect', namespace, sid, self.environ[eio_sid], data) + else: + try: + success = self._trigger_event( + 'connect', namespace, sid, self.environ[eio_sid]) + except TypeError: + success = self._trigger_event( + 'connect', namespace, sid, self.environ[eio_sid], None) + except exceptions.ConnectionRefusedError as exc: + fail_reason = exc.error_args + success = False + + if success is False: + if self.always_connect: + self.manager.pre_disconnect(sid, namespace) + self._send_packet(eio_sid, self.packet_class( + packet.DISCONNECT, data=fail_reason, namespace=namespace)) + else: + self._send_packet(eio_sid, self.packet_class( + packet.CONNECT_ERROR, data=fail_reason, + namespace=namespace)) + self.manager.disconnect(sid, namespace, ignore_queue=True) + elif not self.always_connect: + self._send_packet(eio_sid, self.packet_class( + packet.CONNECT, {'sid': sid}, namespace=namespace)) + + def _handle_disconnect(self, eio_sid, namespace, reason=None): + """Handle a client disconnect.""" + namespace = namespace or '/' + sid = self.manager.sid_from_eio_sid(eio_sid, namespace) + if not self.manager.is_connected(sid, namespace): # pragma: no cover + return + self.manager.pre_disconnect(sid, namespace=namespace) + self._trigger_event('disconnect', namespace, sid, + reason or self.reason.CLIENT_DISCONNECT) + self.manager.disconnect(sid, namespace, ignore_queue=True) + + def _handle_event(self, eio_sid, namespace, id, data): + """Handle an incoming client event.""" + namespace = namespace or '/' + sid = self.manager.sid_from_eio_sid(eio_sid, namespace) + self.logger.info('received event "%s" from %s [%s]', data[0], sid, + namespace) + if not self.manager.is_connected(sid, namespace): + self.logger.warning('%s is not connected to namespace %s', + sid, namespace) + return + if self.async_handlers: + self.start_background_task(self._handle_event_internal, self, sid, + eio_sid, data, namespace, id) + else: + self._handle_event_internal(self, sid, eio_sid, data, namespace, + id) + + def _handle_event_internal(self, server, sid, eio_sid, data, namespace, + id): + r = server._trigger_event(data[0], namespace, sid, *data[1:]) + if r != self.not_handled and id is not None: + # send ACK packet with the response returned by the handler + # tuples are expanded as multiple arguments + if r is None: + data = [] + elif isinstance(r, tuple): + data = list(r) + else: + data = [r] + server._send_packet(eio_sid, self.packet_class( + packet.ACK, namespace=namespace, id=id, data=data)) + + def _handle_ack(self, eio_sid, namespace, id, data): + """Handle ACK packets from the client.""" + namespace = namespace or '/' + sid = self.manager.sid_from_eio_sid(eio_sid, namespace) + self.logger.info('received ack from %s [%s]', sid, namespace) + self.manager.trigger_callback(sid, id, data) + + def _trigger_event(self, event, namespace, *args): + """Invoke an application event handler.""" + # first see if we have an explicit handler for the event + handler, args = self._get_event_handler(event, namespace, args) + if handler: + try: + return handler(*args) + except TypeError: + # legacy disconnect events use only one argument + if event == 'disconnect': + return handler(*args[:-1]) + else: # pragma: no cover + raise + # or else, forward the event to a namespace handler if one exists + handler, args = self._get_namespace_handler(namespace, args) + if handler: + return handler.trigger_event(event, *args) + else: + return self.not_handled + + def _handle_eio_connect(self, eio_sid, environ): + """Handle the Engine.IO connection event.""" + if not self.manager_initialized: + self.manager_initialized = True + self.manager.initialize() + self.environ[eio_sid] = environ + + def _handle_eio_message(self, eio_sid, data): + """Dispatch Engine.IO messages.""" + if eio_sid in self._binary_packet: + pkt = self._binary_packet[eio_sid] + if pkt.add_attachment(data): + del self._binary_packet[eio_sid] + if pkt.packet_type == packet.BINARY_EVENT: + self._handle_event(eio_sid, pkt.namespace, pkt.id, + pkt.data) + else: + self._handle_ack(eio_sid, pkt.namespace, pkt.id, pkt.data) + else: + pkt = self.packet_class(encoded_packet=data) + if pkt.packet_type == packet.CONNECT: + self._handle_connect(eio_sid, pkt.namespace, pkt.data) + elif pkt.packet_type == packet.DISCONNECT: + self._handle_disconnect(eio_sid, pkt.namespace, + self.reason.CLIENT_DISCONNECT) + elif pkt.packet_type == packet.EVENT: + self._handle_event(eio_sid, pkt.namespace, pkt.id, pkt.data) + elif pkt.packet_type == packet.ACK: + self._handle_ack(eio_sid, pkt.namespace, pkt.id, pkt.data) + elif pkt.packet_type == packet.BINARY_EVENT or \ + pkt.packet_type == packet.BINARY_ACK: + self._binary_packet[eio_sid] = pkt + elif pkt.packet_type == packet.CONNECT_ERROR: + raise ValueError('Unexpected CONNECT_ERROR packet.') + else: + raise ValueError('Unknown packet type.') + + def _handle_eio_disconnect(self, eio_sid, reason): + """Handle Engine.IO disconnect event.""" + for n in list(self.manager.get_namespaces()).copy(): + self._handle_disconnect(eio_sid, n, reason) + if eio_sid in self.environ: + del self.environ[eio_sid] + + def _engineio_server_class(self): + return engineio.Server diff --git a/env/lib/python3.10/site-packages/socketio/simple_client.py b/env/lib/python3.10/site-packages/socketio/simple_client.py new file mode 100644 index 0000000..6779147 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/simple_client.py @@ -0,0 +1,191 @@ +from threading import Event +from socketio import Client +from socketio.exceptions import SocketIOError, TimeoutError, DisconnectedError + + +class SimpleClient: + """A Socket.IO client. + + This class implements a simple, yet fully compliant Socket.IO web client + with support for websocket and long-polling transports. + + The positional and keyword arguments given in the constructor are passed + to the underlying :func:`socketio.Client` object. + """ + def __init__(self, *args, **kwargs): + self.client_args = args + self.client_kwargs = kwargs + self.client = None + self.namespace = '/' + self.connected_event = Event() + self.connected = False + self.input_event = Event() + self.input_buffer = [] + + def connect(self, url, headers={}, auth=None, transports=None, + namespace='/', socketio_path='socket.io', wait_timeout=5): + """Connect to a Socket.IO server. + + :param url: The URL of the Socket.IO server. It can include custom + query string parameters if required by the server. If a + function is provided, the client will invoke it to obtain + the URL each time a connection or reconnection is + attempted. + :param headers: A dictionary with custom headers to send with the + connection request. If a function is provided, the + client will invoke it to obtain the headers dictionary + each time a connection or reconnection is attempted. + :param auth: Authentication data passed to the server with the + connection request, normally a dictionary with one or + more string key/value pairs. If a function is provided, + the client will invoke it to obtain the authentication + data each time a connection or reconnection is attempted. + :param transports: The list of allowed transports. Valid transports + are ``'polling'`` and ``'websocket'``. If not + given, the polling transport is connected first, + then an upgrade to websocket is attempted. + :param namespace: The namespace to connect to as a string. If not + given, the default namespace ``/`` is used. + :param socketio_path: The endpoint where the Socket.IO server is + installed. The default value is appropriate for + most cases. + :param wait_timeout: How long the client should wait for the + connection to be established. The default is 5 + seconds. + """ + if self.connected: + raise RuntimeError('Already connected') + self.namespace = namespace + self.input_buffer = [] + self.input_event.clear() + self.client = Client(*self.client_args, **self.client_kwargs) + + @self.client.event(namespace=self.namespace) + def connect(): # pragma: no cover + self.connected = True + self.connected_event.set() + + @self.client.event(namespace=self.namespace) + def disconnect(): # pragma: no cover + self.connected_event.clear() + + @self.client.event(namespace=self.namespace) + def __disconnect_final(): # pragma: no cover + self.connected = False + self.connected_event.set() + + @self.client.on('*', namespace=self.namespace) + def on_event(event, *args): # pragma: no cover + self.input_buffer.append([event, *args]) + self.input_event.set() + + self.client.connect(url, headers=headers, auth=auth, + transports=transports, namespaces=[namespace], + socketio_path=socketio_path, + wait_timeout=wait_timeout) + + @property + def sid(self): + """The session ID received from the server. + + The session ID is not guaranteed to remain constant throughout the life + of the connection, as reconnections can cause it to change. + """ + return self.client.get_sid(self.namespace) if self.client else None + + @property + def transport(self): + """The name of the transport currently in use. + + The transport is returned as a string and can be one of ``polling`` + and ``websocket``. + """ + return self.client.transport if self.client else '' + + def emit(self, event, data=None): + """Emit an event to the server. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the server. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + + This method schedules the event to be sent out and returns, without + actually waiting for its delivery. In cases where the client needs to + ensure that the event was received, :func:`socketio.SimpleClient.call` + should be used instead. + """ + while True: + self.connected_event.wait() + if not self.connected: + raise DisconnectedError() + try: + return self.client.emit(event, data, namespace=self.namespace) + except SocketIOError: + pass + + def call(self, event, data=None, timeout=60): + """Emit an event to the server and wait for a response. + + This method issues an emit and waits for the server to provide a + response or acknowledgement. If the response does not arrive before the + timeout, then a ``TimeoutError`` exception is raised. + + :param event: The event name. It can be any string. The event names + ``'connect'``, ``'message'`` and ``'disconnect'`` are + reserved and should not be used. + :param data: The data to send to the server. Data can be of + type ``str``, ``bytes``, ``list`` or ``dict``. To send + multiple arguments, use a tuple where each element is of + one of the types indicated above. + :param timeout: The waiting timeout. If the timeout is reached before + the server acknowledges the event, then a + ``TimeoutError`` exception is raised. + """ + while True: + self.connected_event.wait() + if not self.connected: + raise DisconnectedError() + try: + return self.client.call(event, data, namespace=self.namespace, + timeout=timeout) + except SocketIOError: + pass + + def receive(self, timeout=None): + """Wait for an event from the server. + + :param timeout: The waiting timeout. If the timeout is reached before + the server acknowledges the event, then a + ``TimeoutError`` exception is raised. + + The return value is a list with the event name as the first element. If + the server included arguments with the event, they are returned as + additional list elements. + """ + while not self.input_buffer: + if not self.connected_event.wait( + timeout=timeout): # pragma: no cover + raise TimeoutError() + if not self.connected: + raise DisconnectedError() + if not self.input_event.wait(timeout=timeout): + raise TimeoutError() + self.input_event.clear() + return self.input_buffer.pop(0) + + def disconnect(self): + """Disconnect from the server.""" + if self.connected: + self.client.disconnect() + self.client = None + self.connected = False + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.disconnect() diff --git a/env/lib/python3.10/site-packages/socketio/tornado.py b/env/lib/python3.10/site-packages/socketio/tornado.py new file mode 100644 index 0000000..160bd32 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/tornado.py @@ -0,0 +1,9 @@ +try: + from engineio.async_drivers.tornado import get_tornado_handler as \ + get_engineio_handler +except ImportError: # pragma: no cover + get_engineio_handler = None + + +def get_tornado_handler(socketio_server): # pragma: no cover + return get_engineio_handler(socketio_server.eio) diff --git a/env/lib/python3.10/site-packages/socketio/zmq_manager.py b/env/lib/python3.10/site-packages/socketio/zmq_manager.py new file mode 100644 index 0000000..468dc26 --- /dev/null +++ b/env/lib/python3.10/site-packages/socketio/zmq_manager.py @@ -0,0 +1,105 @@ +import pickle +import re + +from .pubsub_manager import PubSubManager + + +class ZmqManager(PubSubManager): # pragma: no cover + """zmq based client manager. + + NOTE: this zmq implementation should be considered experimental at this + time. At this time, eventlet is required to use zmq. + + This class implements a zmq backend for event sharing across multiple + processes. To use a zmq backend, initialize the :class:`Server` instance as + follows:: + + url = 'zmq+tcp://hostname:port1+port2' + server = socketio.Server(client_manager=socketio.ZmqManager(url)) + + :param url: The connection URL for the zmq message broker, + which will need to be provided and running. + :param channel: The channel name on which the server sends and receives + notifications. Must be the same in all the servers. + :param write_only: If set to ``True``, only initialize to emit events. The + default of ``False`` initializes the class for emitting + and receiving. + + A zmq message broker must be running for the zmq_manager to work. + you can write your own or adapt one from the following simple broker + below:: + + import zmq + + receiver = zmq.Context().socket(zmq.PULL) + receiver.bind("tcp://*:5555") + + publisher = zmq.Context().socket(zmq.PUB) + publisher.bind("tcp://*:5556") + + while True: + publisher.send(receiver.recv()) + """ + name = 'zmq' + + def __init__(self, url='zmq+tcp://localhost:5555+5556', + channel='socketio', + write_only=False, + logger=None): + try: + from eventlet.green import zmq + except ImportError: + raise RuntimeError('zmq package is not installed ' + '(Run "pip install pyzmq" in your ' + 'virtualenv).') + + r = re.compile(r':\d+\+\d+$') + if not (url.startswith('zmq+tcp://') and r.search(url)): + raise RuntimeError('unexpected connection string: ' + url) + + url = url.replace('zmq+', '') + (sink_url, sub_port) = url.split('+') + sink_port = sink_url.split(':')[-1] + sub_url = sink_url.replace(sink_port, sub_port) + + sink = zmq.Context().socket(zmq.PUSH) + sink.connect(sink_url) + + sub = zmq.Context().socket(zmq.SUB) + sub.setsockopt_string(zmq.SUBSCRIBE, '') + sub.connect(sub_url) + + self.sink = sink + self.sub = sub + self.channel = channel + super().__init__(channel=channel, write_only=write_only, logger=logger) + + def _publish(self, data): + pickled_data = pickle.dumps( + { + 'type': 'message', + 'channel': self.channel, + 'data': data + } + ) + return self.sink.send(pickled_data) + + def zmq_listen(self): + while True: + response = self.sub.recv() + if response is not None: + yield response + + def _listen(self): + for message in self.zmq_listen(): + if isinstance(message, bytes): + try: + message = pickle.loads(message) + except Exception: + pass + if isinstance(message, dict) and \ + message['type'] == 'message' and \ + message['channel'] == self.channel and \ + 'data' in message: + yield message['data'] + return diff --git a/env/lib/python3.10/site-packages/websocket/__init__.py b/env/lib/python3.10/site-packages/websocket/__init__.py new file mode 100644 index 0000000..559b38a --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/__init__.py @@ -0,0 +1,26 @@ +""" +__init__.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +from ._abnf import * +from ._app import WebSocketApp as WebSocketApp, setReconnect as setReconnect +from ._core import * +from ._exceptions import * +from ._logging import * +from ._socket import * + +__version__ = "1.8.0" diff --git a/env/lib/python3.10/site-packages/websocket/_abnf.py b/env/lib/python3.10/site-packages/websocket/_abnf.py new file mode 100644 index 0000000..d7754e0 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_abnf.py @@ -0,0 +1,453 @@ +import array +import os +import struct +import sys +from threading import Lock +from typing import Callable, Optional, Union + +from ._exceptions import WebSocketPayloadException, WebSocketProtocolException +from ._utils import validate_utf8 + +""" +_abnf.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +try: + # If wsaccel is available, use compiled routines to mask data. + # wsaccel only provides around a 10% speed boost compared + # to the websocket-client _mask() implementation. + # Note that wsaccel is unmaintained. + from wsaccel.xormask import XorMaskerSimple + + def _mask(mask_value: array.array, data_value: array.array) -> bytes: + mask_result: bytes = XorMaskerSimple(mask_value).process(data_value) + return mask_result + +except ImportError: + # wsaccel is not available, use websocket-client _mask() + native_byteorder = sys.byteorder + + def _mask(mask_value: array.array, data_value: array.array) -> bytes: + datalen = len(data_value) + int_data_value = int.from_bytes(data_value, native_byteorder) + int_mask_value = int.from_bytes( + mask_value * (datalen // 4) + mask_value[: datalen % 4], native_byteorder + ) + return (int_data_value ^ int_mask_value).to_bytes(datalen, native_byteorder) + + +__all__ = [ + "ABNF", + "continuous_frame", + "frame_buffer", + "STATUS_NORMAL", + "STATUS_GOING_AWAY", + "STATUS_PROTOCOL_ERROR", + "STATUS_UNSUPPORTED_DATA_TYPE", + "STATUS_STATUS_NOT_AVAILABLE", + "STATUS_ABNORMAL_CLOSED", + "STATUS_INVALID_PAYLOAD", + "STATUS_POLICY_VIOLATION", + "STATUS_MESSAGE_TOO_BIG", + "STATUS_INVALID_EXTENSION", + "STATUS_UNEXPECTED_CONDITION", + "STATUS_BAD_GATEWAY", + "STATUS_TLS_HANDSHAKE_ERROR", +] + +# closing frame status codes. +STATUS_NORMAL = 1000 +STATUS_GOING_AWAY = 1001 +STATUS_PROTOCOL_ERROR = 1002 +STATUS_UNSUPPORTED_DATA_TYPE = 1003 +STATUS_STATUS_NOT_AVAILABLE = 1005 +STATUS_ABNORMAL_CLOSED = 1006 +STATUS_INVALID_PAYLOAD = 1007 +STATUS_POLICY_VIOLATION = 1008 +STATUS_MESSAGE_TOO_BIG = 1009 +STATUS_INVALID_EXTENSION = 1010 +STATUS_UNEXPECTED_CONDITION = 1011 +STATUS_SERVICE_RESTART = 1012 +STATUS_TRY_AGAIN_LATER = 1013 +STATUS_BAD_GATEWAY = 1014 +STATUS_TLS_HANDSHAKE_ERROR = 1015 + +VALID_CLOSE_STATUS = ( + STATUS_NORMAL, + STATUS_GOING_AWAY, + STATUS_PROTOCOL_ERROR, + STATUS_UNSUPPORTED_DATA_TYPE, + STATUS_INVALID_PAYLOAD, + STATUS_POLICY_VIOLATION, + STATUS_MESSAGE_TOO_BIG, + STATUS_INVALID_EXTENSION, + STATUS_UNEXPECTED_CONDITION, + STATUS_SERVICE_RESTART, + STATUS_TRY_AGAIN_LATER, + STATUS_BAD_GATEWAY, +) + + +class ABNF: + """ + ABNF frame class. + See http://tools.ietf.org/html/rfc5234 + and http://tools.ietf.org/html/rfc6455#section-5.2 + """ + + # operation code values. + OPCODE_CONT = 0x0 + OPCODE_TEXT = 0x1 + OPCODE_BINARY = 0x2 + OPCODE_CLOSE = 0x8 + OPCODE_PING = 0x9 + OPCODE_PONG = 0xA + + # available operation code value tuple + OPCODES = ( + OPCODE_CONT, + OPCODE_TEXT, + OPCODE_BINARY, + OPCODE_CLOSE, + OPCODE_PING, + OPCODE_PONG, + ) + + # opcode human readable string + OPCODE_MAP = { + OPCODE_CONT: "cont", + OPCODE_TEXT: "text", + OPCODE_BINARY: "binary", + OPCODE_CLOSE: "close", + OPCODE_PING: "ping", + OPCODE_PONG: "pong", + } + + # data length threshold. + LENGTH_7 = 0x7E + LENGTH_16 = 1 << 16 + LENGTH_63 = 1 << 63 + + def __init__( + self, + fin: int = 0, + rsv1: int = 0, + rsv2: int = 0, + rsv3: int = 0, + opcode: int = OPCODE_TEXT, + mask_value: int = 1, + data: Union[str, bytes, None] = "", + ) -> None: + """ + Constructor for ABNF. Please check RFC for arguments. + """ + self.fin = fin + self.rsv1 = rsv1 + self.rsv2 = rsv2 + self.rsv3 = rsv3 + self.opcode = opcode + self.mask_value = mask_value + if data is None: + data = "" + self.data = data + self.get_mask_key = os.urandom + + def validate(self, skip_utf8_validation: bool = False) -> None: + """ + Validate the ABNF frame. + + Parameters + ---------- + skip_utf8_validation: skip utf8 validation. + """ + if self.rsv1 or self.rsv2 or self.rsv3: + raise WebSocketProtocolException("rsv is not implemented, yet") + + if self.opcode not in ABNF.OPCODES: + raise WebSocketProtocolException("Invalid opcode %r", self.opcode) + + if self.opcode == ABNF.OPCODE_PING and not self.fin: + raise WebSocketProtocolException("Invalid ping frame.") + + if self.opcode == ABNF.OPCODE_CLOSE: + l = len(self.data) + if not l: + return + if l == 1 or l >= 126: + raise WebSocketProtocolException("Invalid close frame.") + if l > 2 and not skip_utf8_validation and not validate_utf8(self.data[2:]): + raise WebSocketProtocolException("Invalid close frame.") + + code = 256 * int(self.data[0]) + int(self.data[1]) + if not self._is_valid_close_status(code): + raise WebSocketProtocolException("Invalid close opcode %r", code) + + @staticmethod + def _is_valid_close_status(code: int) -> bool: + return code in VALID_CLOSE_STATUS or (3000 <= code < 5000) + + def __str__(self) -> str: + return f"fin={self.fin} opcode={self.opcode} data={self.data}" + + @staticmethod + def create_frame(data: Union[bytes, str], opcode: int, fin: int = 1) -> "ABNF": + """ + Create frame to send text, binary and other data. + + Parameters + ---------- + data: str + data to send. This is string value(byte array). + If opcode is OPCODE_TEXT and this value is unicode, + data value is converted into unicode string, automatically. + opcode: int + operation code. please see OPCODE_MAP. + fin: int + fin flag. if set to 0, create continue fragmentation. + """ + if opcode == ABNF.OPCODE_TEXT and isinstance(data, str): + data = data.encode("utf-8") + # mask must be set if send data from client + return ABNF(fin, 0, 0, 0, opcode, 1, data) + + def format(self) -> bytes: + """ + Format this object to string(byte array) to send data to server. + """ + if any(x not in (0, 1) for x in [self.fin, self.rsv1, self.rsv2, self.rsv3]): + raise ValueError("not 0 or 1") + if self.opcode not in ABNF.OPCODES: + raise ValueError("Invalid OPCODE") + length = len(self.data) + if length >= ABNF.LENGTH_63: + raise ValueError("data is too long") + + frame_header = chr( + self.fin << 7 + | self.rsv1 << 6 + | self.rsv2 << 5 + | self.rsv3 << 4 + | self.opcode + ).encode("latin-1") + if length < ABNF.LENGTH_7: + frame_header += chr(self.mask_value << 7 | length).encode("latin-1") + elif length < ABNF.LENGTH_16: + frame_header += chr(self.mask_value << 7 | 0x7E).encode("latin-1") + frame_header += struct.pack("!H", length) + else: + frame_header += chr(self.mask_value << 7 | 0x7F).encode("latin-1") + frame_header += struct.pack("!Q", length) + + if not self.mask_value: + if isinstance(self.data, str): + self.data = self.data.encode("utf-8") + return frame_header + self.data + mask_key = self.get_mask_key(4) + return frame_header + self._get_masked(mask_key) + + def _get_masked(self, mask_key: Union[str, bytes]) -> bytes: + s = ABNF.mask(mask_key, self.data) + + if isinstance(mask_key, str): + mask_key = mask_key.encode("utf-8") + + return mask_key + s + + @staticmethod + def mask(mask_key: Union[str, bytes], data: Union[str, bytes]) -> bytes: + """ + Mask or unmask data. Just do xor for each byte + + Parameters + ---------- + mask_key: bytes or str + 4 byte mask. + data: bytes or str + data to mask/unmask. + """ + if data is None: + data = "" + + if isinstance(mask_key, str): + mask_key = mask_key.encode("latin-1") + + if isinstance(data, str): + data = data.encode("latin-1") + + return _mask(array.array("B", mask_key), array.array("B", data)) + + +class frame_buffer: + _HEADER_MASK_INDEX = 5 + _HEADER_LENGTH_INDEX = 6 + + def __init__( + self, recv_fn: Callable[[int], int], skip_utf8_validation: bool + ) -> None: + self.recv = recv_fn + self.skip_utf8_validation = skip_utf8_validation + # Buffers over the packets from the layer beneath until desired amount + # bytes of bytes are received. + self.recv_buffer: list = [] + self.clear() + self.lock = Lock() + + def clear(self) -> None: + self.header: Optional[tuple] = None + self.length: Optional[int] = None + self.mask_value: Union[bytes, str, None] = None + + def has_received_header(self) -> bool: + return self.header is None + + def recv_header(self) -> None: + header = self.recv_strict(2) + b1 = header[0] + fin = b1 >> 7 & 1 + rsv1 = b1 >> 6 & 1 + rsv2 = b1 >> 5 & 1 + rsv3 = b1 >> 4 & 1 + opcode = b1 & 0xF + b2 = header[1] + has_mask = b2 >> 7 & 1 + length_bits = b2 & 0x7F + + self.header = (fin, rsv1, rsv2, rsv3, opcode, has_mask, length_bits) + + def has_mask(self) -> Union[bool, int]: + if not self.header: + return False + header_val: int = self.header[frame_buffer._HEADER_MASK_INDEX] + return header_val + + def has_received_length(self) -> bool: + return self.length is None + + def recv_length(self) -> None: + bits = self.header[frame_buffer._HEADER_LENGTH_INDEX] + length_bits = bits & 0x7F + if length_bits == 0x7E: + v = self.recv_strict(2) + self.length = struct.unpack("!H", v)[0] + elif length_bits == 0x7F: + v = self.recv_strict(8) + self.length = struct.unpack("!Q", v)[0] + else: + self.length = length_bits + + def has_received_mask(self) -> bool: + return self.mask_value is None + + def recv_mask(self) -> None: + self.mask_value = self.recv_strict(4) if self.has_mask() else "" + + def recv_frame(self) -> ABNF: + with self.lock: + # Header + if self.has_received_header(): + self.recv_header() + (fin, rsv1, rsv2, rsv3, opcode, has_mask, _) = self.header + + # Frame length + if self.has_received_length(): + self.recv_length() + length = self.length + + # Mask + if self.has_received_mask(): + self.recv_mask() + mask_value = self.mask_value + + # Payload + payload = self.recv_strict(length) + if has_mask: + payload = ABNF.mask(mask_value, payload) + + # Reset for next frame + self.clear() + + frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload) + frame.validate(self.skip_utf8_validation) + + return frame + + def recv_strict(self, bufsize: int) -> bytes: + shortage = bufsize - sum(map(len, self.recv_buffer)) + while shortage > 0: + # Limit buffer size that we pass to socket.recv() to avoid + # fragmenting the heap -- the number of bytes recv() actually + # reads is limited by socket buffer and is relatively small, + # yet passing large numbers repeatedly causes lots of large + # buffers allocated and then shrunk, which results in + # fragmentation. + bytes_ = self.recv(min(16384, shortage)) + self.recv_buffer.append(bytes_) + shortage -= len(bytes_) + + unified = b"".join(self.recv_buffer) + + if shortage == 0: + self.recv_buffer = [] + return unified + else: + self.recv_buffer = [unified[bufsize:]] + return unified[:bufsize] + + +class continuous_frame: + def __init__(self, fire_cont_frame: bool, skip_utf8_validation: bool) -> None: + self.fire_cont_frame = fire_cont_frame + self.skip_utf8_validation = skip_utf8_validation + self.cont_data: Optional[list] = None + self.recving_frames: Optional[int] = None + + def validate(self, frame: ABNF) -> None: + if not self.recving_frames and frame.opcode == ABNF.OPCODE_CONT: + raise WebSocketProtocolException("Illegal frame") + if self.recving_frames and frame.opcode in ( + ABNF.OPCODE_TEXT, + ABNF.OPCODE_BINARY, + ): + raise WebSocketProtocolException("Illegal frame") + + def add(self, frame: ABNF) -> None: + if self.cont_data: + self.cont_data[1] += frame.data + else: + if frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY): + self.recving_frames = frame.opcode + self.cont_data = [frame.opcode, frame.data] + + if frame.fin: + self.recving_frames = None + + def is_fire(self, frame: ABNF) -> Union[bool, int]: + return frame.fin or self.fire_cont_frame + + def extract(self, frame: ABNF) -> tuple: + data = self.cont_data + self.cont_data = None + frame.data = data[1] + if ( + not self.fire_cont_frame + and data[0] == ABNF.OPCODE_TEXT + and not self.skip_utf8_validation + and not validate_utf8(frame.data) + ): + raise WebSocketPayloadException(f"cannot decode: {repr(frame.data)}") + return data[0], frame diff --git a/env/lib/python3.10/site-packages/websocket/_app.py b/env/lib/python3.10/site-packages/websocket/_app.py new file mode 100644 index 0000000..9fee765 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_app.py @@ -0,0 +1,677 @@ +import inspect +import selectors +import socket +import threading +import time +from typing import Any, Callable, Optional, Union + +from . import _logging +from ._abnf import ABNF +from ._core import WebSocket, getdefaulttimeout +from ._exceptions import ( + WebSocketConnectionClosedException, + WebSocketException, + WebSocketTimeoutException, +) +from ._ssl_compat import SSLEOFError +from ._url import parse_url + +""" +_app.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +__all__ = ["WebSocketApp"] + +RECONNECT = 0 + + +def setReconnect(reconnectInterval: int) -> None: + global RECONNECT + RECONNECT = reconnectInterval + + +class DispatcherBase: + """ + DispatcherBase + """ + + def __init__(self, app: Any, ping_timeout: Union[float, int, None]) -> None: + self.app = app + self.ping_timeout = ping_timeout + + def timeout(self, seconds: Union[float, int, None], callback: Callable) -> None: + time.sleep(seconds) + callback() + + def reconnect(self, seconds: int, reconnector: Callable) -> None: + try: + _logging.info( + f"reconnect() - retrying in {seconds} seconds [{len(inspect.stack())} frames in stack]" + ) + time.sleep(seconds) + reconnector(reconnecting=True) + except KeyboardInterrupt as e: + _logging.info(f"User exited {e}") + raise e + + +class Dispatcher(DispatcherBase): + """ + Dispatcher + """ + + def read( + self, + sock: socket.socket, + read_callback: Callable, + check_callback: Callable, + ) -> None: + sel = selectors.DefaultSelector() + sel.register(self.app.sock.sock, selectors.EVENT_READ) + try: + while self.app.keep_running: + if sel.select(self.ping_timeout): + if not read_callback(): + break + check_callback() + finally: + sel.close() + + +class SSLDispatcher(DispatcherBase): + """ + SSLDispatcher + """ + + def read( + self, + sock: socket.socket, + read_callback: Callable, + check_callback: Callable, + ) -> None: + sock = self.app.sock.sock + sel = selectors.DefaultSelector() + sel.register(sock, selectors.EVENT_READ) + try: + while self.app.keep_running: + if self.select(sock, sel): + if not read_callback(): + break + check_callback() + finally: + sel.close() + + def select(self, sock, sel: selectors.DefaultSelector): + sock = self.app.sock.sock + if sock.pending(): + return [ + sock, + ] + + r = sel.select(self.ping_timeout) + + if len(r) > 0: + return r[0][0] + + +class WrappedDispatcher: + """ + WrappedDispatcher + """ + + def __init__(self, app, ping_timeout: Union[float, int, None], dispatcher) -> None: + self.app = app + self.ping_timeout = ping_timeout + self.dispatcher = dispatcher + dispatcher.signal(2, dispatcher.abort) # keyboard interrupt + + def read( + self, + sock: socket.socket, + read_callback: Callable, + check_callback: Callable, + ) -> None: + self.dispatcher.read(sock, read_callback) + self.ping_timeout and self.timeout(self.ping_timeout, check_callback) + + def timeout(self, seconds: float, callback: Callable) -> None: + self.dispatcher.timeout(seconds, callback) + + def reconnect(self, seconds: int, reconnector: Callable) -> None: + self.timeout(seconds, reconnector) + + +class WebSocketApp: + """ + Higher level of APIs are provided. The interface is like JavaScript WebSocket object. + """ + + def __init__( + self, + url: str, + header: Union[list, dict, Callable, None] = None, + on_open: Optional[Callable[[WebSocket], None]] = None, + on_reconnect: Optional[Callable[[WebSocket], None]] = None, + on_message: Optional[Callable[[WebSocket, Any], None]] = None, + on_error: Optional[Callable[[WebSocket, Any], None]] = None, + on_close: Optional[Callable[[WebSocket, Any, Any], None]] = None, + on_ping: Optional[Callable] = None, + on_pong: Optional[Callable] = None, + on_cont_message: Optional[Callable] = None, + keep_running: bool = True, + get_mask_key: Optional[Callable] = None, + cookie: Optional[str] = None, + subprotocols: Optional[list] = None, + on_data: Optional[Callable] = None, + socket: Optional[socket.socket] = None, + ) -> None: + """ + WebSocketApp initialization + + Parameters + ---------- + url: str + Websocket url. + header: list or dict or Callable + Custom header for websocket handshake. + If the parameter is a callable object, it is called just before the connection attempt. + The returned dict or list is used as custom header value. + This could be useful in order to properly setup timestamp dependent headers. + on_open: function + Callback object which is called at opening websocket. + on_open has one argument. + The 1st argument is this class object. + on_reconnect: function + Callback object which is called at reconnecting websocket. + on_reconnect has one argument. + The 1st argument is this class object. + on_message: function + Callback object which is called when received data. + on_message has 2 arguments. + The 1st argument is this class object. + The 2nd argument is utf-8 data received from the server. + on_error: function + Callback object which is called when we get error. + on_error has 2 arguments. + The 1st argument is this class object. + The 2nd argument is exception object. + on_close: function + Callback object which is called when connection is closed. + on_close has 3 arguments. + The 1st argument is this class object. + The 2nd argument is close_status_code. + The 3rd argument is close_msg. + on_cont_message: function + Callback object which is called when a continuation + frame is received. + on_cont_message has 3 arguments. + The 1st argument is this class object. + The 2nd argument is utf-8 string which we get from the server. + The 3rd argument is continue flag. if 0, the data continue + to next frame data + on_data: function + Callback object which is called when a message received. + This is called before on_message or on_cont_message, + and then on_message or on_cont_message is called. + on_data has 4 argument. + The 1st argument is this class object. + The 2nd argument is utf-8 string which we get from the server. + The 3rd argument is data type. ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY will be came. + The 4th argument is continue flag. If 0, the data continue + keep_running: bool + This parameter is obsolete and ignored. + get_mask_key: function + A callable function to get new mask keys, see the + WebSocket.set_mask_key's docstring for more information. + cookie: str + Cookie value. + subprotocols: list + List of available sub protocols. Default is None. + socket: socket + Pre-initialized stream socket. + """ + self.url = url + self.header = header if header is not None else [] + self.cookie = cookie + + self.on_open = on_open + self.on_reconnect = on_reconnect + self.on_message = on_message + self.on_data = on_data + self.on_error = on_error + self.on_close = on_close + self.on_ping = on_ping + self.on_pong = on_pong + self.on_cont_message = on_cont_message + self.keep_running = False + self.get_mask_key = get_mask_key + self.sock: Optional[WebSocket] = None + self.last_ping_tm = float(0) + self.last_pong_tm = float(0) + self.ping_thread: Optional[threading.Thread] = None + self.stop_ping: Optional[threading.Event] = None + self.ping_interval = float(0) + self.ping_timeout: Union[float, int, None] = None + self.ping_payload = "" + self.subprotocols = subprotocols + self.prepared_socket = socket + self.has_errored = False + self.has_done_teardown = False + self.has_done_teardown_lock = threading.Lock() + + def send(self, data: Union[bytes, str], opcode: int = ABNF.OPCODE_TEXT) -> None: + """ + send message + + Parameters + ---------- + data: str + Message to send. If you set opcode to OPCODE_TEXT, + data must be utf-8 string or unicode. + opcode: int + Operation code of data. Default is OPCODE_TEXT. + """ + + if not self.sock or self.sock.send(data, opcode) == 0: + raise WebSocketConnectionClosedException("Connection is already closed.") + + def send_text(self, text_data: str) -> None: + """ + Sends UTF-8 encoded text. + """ + if not self.sock or self.sock.send(text_data, ABNF.OPCODE_TEXT) == 0: + raise WebSocketConnectionClosedException("Connection is already closed.") + + def send_bytes(self, data: Union[bytes, bytearray]) -> None: + """ + Sends a sequence of bytes. + """ + if not self.sock or self.sock.send(data, ABNF.OPCODE_BINARY) == 0: + raise WebSocketConnectionClosedException("Connection is already closed.") + + def close(self, **kwargs) -> None: + """ + Close websocket connection. + """ + self.keep_running = False + if self.sock: + self.sock.close(**kwargs) + self.sock = None + + def _start_ping_thread(self) -> None: + self.last_ping_tm = self.last_pong_tm = float(0) + self.stop_ping = threading.Event() + self.ping_thread = threading.Thread(target=self._send_ping) + self.ping_thread.daemon = True + self.ping_thread.start() + + def _stop_ping_thread(self) -> None: + if self.stop_ping: + self.stop_ping.set() + if self.ping_thread and self.ping_thread.is_alive(): + self.ping_thread.join(3) + self.last_ping_tm = self.last_pong_tm = float(0) + + def _send_ping(self) -> None: + if self.stop_ping.wait(self.ping_interval) or self.keep_running is False: + return + while not self.stop_ping.wait(self.ping_interval) and self.keep_running is True: + if self.sock: + self.last_ping_tm = time.time() + try: + _logging.debug("Sending ping") + self.sock.ping(self.ping_payload) + except Exception as e: + _logging.debug(f"Failed to send ping: {e}") + + def run_forever( + self, + sockopt: tuple = None, + sslopt: dict = None, + ping_interval: Union[float, int] = 0, + ping_timeout: Union[float, int, None] = None, + ping_payload: str = "", + http_proxy_host: str = None, + http_proxy_port: Union[int, str] = None, + http_no_proxy: list = None, + http_proxy_auth: tuple = None, + http_proxy_timeout: Optional[float] = None, + skip_utf8_validation: bool = False, + host: str = None, + origin: str = None, + dispatcher=None, + suppress_origin: bool = False, + proxy_type: str = None, + reconnect: int = None, + ) -> bool: + """ + Run event loop for WebSocket framework. + + This loop is an infinite loop and is alive while websocket is available. + + Parameters + ---------- + sockopt: tuple + Values for socket.setsockopt. + sockopt must be tuple + and each element is argument of sock.setsockopt. + sslopt: dict + Optional dict object for ssl socket option. + ping_interval: int or float + Automatically send "ping" command + every specified period (in seconds). + If set to 0, no ping is sent periodically. + ping_timeout: int or float + Timeout (in seconds) if the pong message is not received. + ping_payload: str + Payload message to send with each ping. + http_proxy_host: str + HTTP proxy host name. + http_proxy_port: int or str + HTTP proxy port. If not set, set to 80. + http_no_proxy: list + Whitelisted host names that don't use the proxy. + http_proxy_timeout: int or float + HTTP proxy timeout, default is 60 sec as per python-socks. + http_proxy_auth: tuple + HTTP proxy auth information. tuple of username and password. Default is None. + skip_utf8_validation: bool + skip utf8 validation. + host: str + update host header. + origin: str + update origin header. + dispatcher: Dispatcher object + customize reading data from socket. + suppress_origin: bool + suppress outputting origin header. + proxy_type: str + type of proxy from: http, socks4, socks4a, socks5, socks5h + reconnect: int + delay interval when reconnecting + + Returns + ------- + teardown: bool + False if the `WebSocketApp` is closed or caught KeyboardInterrupt, + True if any other exception was raised during a loop. + """ + + if reconnect is None: + reconnect = RECONNECT + + if ping_timeout is not None and ping_timeout <= 0: + raise WebSocketException("Ensure ping_timeout > 0") + if ping_interval is not None and ping_interval < 0: + raise WebSocketException("Ensure ping_interval >= 0") + if ping_timeout and ping_interval and ping_interval <= ping_timeout: + raise WebSocketException("Ensure ping_interval > ping_timeout") + if not sockopt: + sockopt = () + if not sslopt: + sslopt = {} + if self.sock: + raise WebSocketException("socket is already opened") + + self.ping_interval = ping_interval + self.ping_timeout = ping_timeout + self.ping_payload = ping_payload + self.has_done_teardown = False + self.keep_running = True + + def teardown(close_frame: ABNF = None): + """ + Tears down the connection. + + Parameters + ---------- + close_frame: ABNF frame + If close_frame is set, the on_close handler is invoked + with the statusCode and reason from the provided frame. + """ + + # teardown() is called in many code paths to ensure resources are cleaned up and on_close is fired. + # To ensure the work is only done once, we use this bool and lock. + with self.has_done_teardown_lock: + if self.has_done_teardown: + return + self.has_done_teardown = True + + self._stop_ping_thread() + self.keep_running = False + if self.sock: + self.sock.close() + close_status_code, close_reason = self._get_close_args( + close_frame if close_frame else None + ) + self.sock = None + + # Finally call the callback AFTER all teardown is complete + self._callback(self.on_close, close_status_code, close_reason) + + def setSock(reconnecting: bool = False) -> None: + if reconnecting and self.sock: + self.sock.shutdown() + + self.sock = WebSocket( + self.get_mask_key, + sockopt=sockopt, + sslopt=sslopt, + fire_cont_frame=self.on_cont_message is not None, + skip_utf8_validation=skip_utf8_validation, + enable_multithread=True, + ) + + self.sock.settimeout(getdefaulttimeout()) + try: + header = self.header() if callable(self.header) else self.header + + self.sock.connect( + self.url, + header=header, + cookie=self.cookie, + http_proxy_host=http_proxy_host, + http_proxy_port=http_proxy_port, + http_no_proxy=http_no_proxy, + http_proxy_auth=http_proxy_auth, + http_proxy_timeout=http_proxy_timeout, + subprotocols=self.subprotocols, + host=host, + origin=origin, + suppress_origin=suppress_origin, + proxy_type=proxy_type, + socket=self.prepared_socket, + ) + + _logging.info("Websocket connected") + + if self.ping_interval: + self._start_ping_thread() + + if reconnecting and self.on_reconnect: + self._callback(self.on_reconnect) + else: + self._callback(self.on_open) + + dispatcher.read(self.sock.sock, read, check) + except ( + WebSocketConnectionClosedException, + ConnectionRefusedError, + KeyboardInterrupt, + SystemExit, + Exception, + ) as e: + handleDisconnect(e, reconnecting) + + def read() -> bool: + if not self.keep_running: + return teardown() + + try: + op_code, frame = self.sock.recv_data_frame(True) + except ( + WebSocketConnectionClosedException, + KeyboardInterrupt, + SSLEOFError, + ) as e: + if custom_dispatcher: + return handleDisconnect(e, bool(reconnect)) + else: + raise e + + if op_code == ABNF.OPCODE_CLOSE: + return teardown(frame) + elif op_code == ABNF.OPCODE_PING: + self._callback(self.on_ping, frame.data) + elif op_code == ABNF.OPCODE_PONG: + self.last_pong_tm = time.time() + self._callback(self.on_pong, frame.data) + elif op_code == ABNF.OPCODE_CONT and self.on_cont_message: + self._callback(self.on_data, frame.data, frame.opcode, frame.fin) + self._callback(self.on_cont_message, frame.data, frame.fin) + else: + data = frame.data + if op_code == ABNF.OPCODE_TEXT and not skip_utf8_validation: + data = data.decode("utf-8") + self._callback(self.on_data, data, frame.opcode, True) + self._callback(self.on_message, data) + + return True + + def check() -> bool: + if self.ping_timeout: + has_timeout_expired = ( + time.time() - self.last_ping_tm > self.ping_timeout + ) + has_pong_not_arrived_after_last_ping = ( + self.last_pong_tm - self.last_ping_tm < 0 + ) + has_pong_arrived_too_late = ( + self.last_pong_tm - self.last_ping_tm > self.ping_timeout + ) + + if ( + self.last_ping_tm + and has_timeout_expired + and ( + has_pong_not_arrived_after_last_ping + or has_pong_arrived_too_late + ) + ): + raise WebSocketTimeoutException("ping/pong timed out") + return True + + def handleDisconnect( + e: Union[ + WebSocketConnectionClosedException, + ConnectionRefusedError, + KeyboardInterrupt, + SystemExit, + Exception, + ], + reconnecting: bool = False, + ) -> bool: + self.has_errored = True + self._stop_ping_thread() + if not reconnecting: + self._callback(self.on_error, e) + + if isinstance(e, (KeyboardInterrupt, SystemExit)): + teardown() + # Propagate further + raise + + if reconnect: + _logging.info(f"{e} - reconnect") + if custom_dispatcher: + _logging.debug( + f"Calling custom dispatcher reconnect [{len(inspect.stack())} frames in stack]" + ) + dispatcher.reconnect(reconnect, setSock) + else: + _logging.error(f"{e} - goodbye") + teardown() + + custom_dispatcher = bool(dispatcher) + dispatcher = self.create_dispatcher( + ping_timeout, dispatcher, parse_url(self.url)[3] + ) + + try: + setSock() + if not custom_dispatcher and reconnect: + while self.keep_running: + _logging.debug( + f"Calling dispatcher reconnect [{len(inspect.stack())} frames in stack]" + ) + dispatcher.reconnect(reconnect, setSock) + except (KeyboardInterrupt, Exception) as e: + _logging.info(f"tearing down on exception {e}") + teardown() + finally: + if not custom_dispatcher: + # Ensure teardown was called before returning from run_forever + teardown() + + return self.has_errored + + def create_dispatcher( + self, + ping_timeout: Union[float, int, None], + dispatcher: Optional[DispatcherBase] = None, + is_ssl: bool = False, + ) -> Union[Dispatcher, SSLDispatcher, WrappedDispatcher]: + if dispatcher: # If custom dispatcher is set, use WrappedDispatcher + return WrappedDispatcher(self, ping_timeout, dispatcher) + timeout = ping_timeout or 10 + if is_ssl: + return SSLDispatcher(self, timeout) + return Dispatcher(self, timeout) + + def _get_close_args(self, close_frame: ABNF) -> list: + """ + _get_close_args extracts the close code and reason from the close body + if it exists (RFC6455 says WebSocket Connection Close Code is optional) + """ + # Need to catch the case where close_frame is None + # Otherwise the following if statement causes an error + if not self.on_close or not close_frame: + return [None, None] + + # Extract close frame status code + if close_frame.data and len(close_frame.data) >= 2: + close_status_code = 256 * int(close_frame.data[0]) + int( + close_frame.data[1] + ) + reason = close_frame.data[2:] + if isinstance(reason, bytes): + reason = reason.decode("utf-8") + return [close_status_code, reason] + else: + # Most likely reached this because len(close_frame_data.data) < 2 + return [None, None] + + def _callback(self, callback, *args) -> None: + if callback: + try: + callback(self, *args) + + except Exception as e: + _logging.error(f"error from callback {callback}: {e}") + if self.on_error: + self.on_error(self, e) diff --git a/env/lib/python3.10/site-packages/websocket/_cookiejar.py b/env/lib/python3.10/site-packages/websocket/_cookiejar.py new file mode 100644 index 0000000..7480e5f --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_cookiejar.py @@ -0,0 +1,75 @@ +import http.cookies +from typing import Optional + +""" +_cookiejar.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + + +class SimpleCookieJar: + def __init__(self) -> None: + self.jar: dict = {} + + def add(self, set_cookie: Optional[str]) -> None: + if set_cookie: + simple_cookie = http.cookies.SimpleCookie(set_cookie) + + for v in simple_cookie.values(): + if domain := v.get("domain"): + if not domain.startswith("."): + domain = f".{domain}" + cookie = ( + self.jar.get(domain) + if self.jar.get(domain) + else http.cookies.SimpleCookie() + ) + cookie.update(simple_cookie) + self.jar[domain.lower()] = cookie + + def set(self, set_cookie: str) -> None: + if set_cookie: + simple_cookie = http.cookies.SimpleCookie(set_cookie) + + for v in simple_cookie.values(): + if domain := v.get("domain"): + if not domain.startswith("."): + domain = f".{domain}" + self.jar[domain.lower()] = simple_cookie + + def get(self, host: str) -> str: + if not host: + return "" + + cookies = [] + for domain, _ in self.jar.items(): + host = host.lower() + if host.endswith(domain) or host == domain[1:]: + cookies.append(self.jar.get(domain)) + + return "; ".join( + filter( + None, + sorted( + [ + f"{k}={v.value}" + for cookie in filter(None, cookies) + for k, v in cookie.items() + ] + ), + ) + ) diff --git a/env/lib/python3.10/site-packages/websocket/_core.py b/env/lib/python3.10/site-packages/websocket/_core.py new file mode 100644 index 0000000..f940ed0 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_core.py @@ -0,0 +1,647 @@ +import socket +import struct +import threading +import time +from typing import Optional, Union + +# websocket modules +from ._abnf import ABNF, STATUS_NORMAL, continuous_frame, frame_buffer +from ._exceptions import WebSocketProtocolException, WebSocketConnectionClosedException +from ._handshake import SUPPORTED_REDIRECT_STATUSES, handshake +from ._http import connect, proxy_info +from ._logging import debug, error, trace, isEnabledForError, isEnabledForTrace +from ._socket import getdefaulttimeout, recv, send, sock_opt +from ._ssl_compat import ssl +from ._utils import NoLock + +""" +_core.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +__all__ = ["WebSocket", "create_connection"] + + +class WebSocket: + """ + Low level WebSocket interface. + + This class is based on the WebSocket protocol `draft-hixie-thewebsocketprotocol-76 `_ + + We can connect to the websocket server and send/receive data. + The following example is an echo client. + + >>> import websocket + >>> ws = websocket.WebSocket() + >>> ws.connect("ws://echo.websocket.events") + >>> ws.recv() + 'echo.websocket.events sponsored by Lob.com' + >>> ws.send("Hello, Server") + 19 + >>> ws.recv() + 'Hello, Server' + >>> ws.close() + + Parameters + ---------- + get_mask_key: func + A callable function to get new mask keys, see the + WebSocket.set_mask_key's docstring for more information. + sockopt: tuple + Values for socket.setsockopt. + sockopt must be tuple and each element is argument of sock.setsockopt. + sslopt: dict + Optional dict object for ssl socket options. See FAQ for details. + fire_cont_frame: bool + Fire recv event for each cont frame. Default is False. + enable_multithread: bool + If set to True, lock send method. + skip_utf8_validation: bool + Skip utf8 validation. + """ + + def __init__( + self, + get_mask_key=None, + sockopt=None, + sslopt=None, + fire_cont_frame: bool = False, + enable_multithread: bool = True, + skip_utf8_validation: bool = False, + **_, + ): + """ + Initialize WebSocket object. + + Parameters + ---------- + sslopt: dict + Optional dict object for ssl socket options. See FAQ for details. + """ + self.sock_opt = sock_opt(sockopt, sslopt) + self.handshake_response = None + self.sock: Optional[socket.socket] = None + + self.connected = False + self.get_mask_key = get_mask_key + # These buffer over the build-up of a single frame. + self.frame_buffer = frame_buffer(self._recv, skip_utf8_validation) + self.cont_frame = continuous_frame(fire_cont_frame, skip_utf8_validation) + + if enable_multithread: + self.lock = threading.Lock() + self.readlock = threading.Lock() + else: + self.lock = NoLock() + self.readlock = NoLock() + + def __iter__(self): + """ + Allow iteration over websocket, implying sequential `recv` executions. + """ + while True: + yield self.recv() + + def __next__(self): + return self.recv() + + def next(self): + return self.__next__() + + def fileno(self): + return self.sock.fileno() + + def set_mask_key(self, func): + """ + Set function to create mask key. You can customize mask key generator. + Mainly, this is for testing purpose. + + Parameters + ---------- + func: func + callable object. the func takes 1 argument as integer. + The argument means length of mask key. + This func must return string(byte array), + which length is argument specified. + """ + self.get_mask_key = func + + def gettimeout(self) -> Union[float, int, None]: + """ + Get the websocket timeout (in seconds) as an int or float + + Returns + ---------- + timeout: int or float + returns timeout value (in seconds). This value could be either float/integer. + """ + return self.sock_opt.timeout + + def settimeout(self, timeout: Union[float, int, None]): + """ + Set the timeout to the websocket. + + Parameters + ---------- + timeout: int or float + timeout time (in seconds). This value could be either float/integer. + """ + self.sock_opt.timeout = timeout + if self.sock: + self.sock.settimeout(timeout) + + timeout = property(gettimeout, settimeout) + + def getsubprotocol(self): + """ + Get subprotocol + """ + if self.handshake_response: + return self.handshake_response.subprotocol + else: + return None + + subprotocol = property(getsubprotocol) + + def getstatus(self): + """ + Get handshake status + """ + if self.handshake_response: + return self.handshake_response.status + else: + return None + + status = property(getstatus) + + def getheaders(self): + """ + Get handshake response header + """ + if self.handshake_response: + return self.handshake_response.headers + else: + return None + + def is_ssl(self): + try: + return isinstance(self.sock, ssl.SSLSocket) + except: + return False + + headers = property(getheaders) + + def connect(self, url, **options): + """ + Connect to url. url is websocket url scheme. + ie. ws://host:port/resource + You can customize using 'options'. + If you set "header" list object, you can set your own custom header. + + >>> ws = WebSocket() + >>> ws.connect("ws://echo.websocket.events", + ... header=["User-Agent: MyProgram", + ... "x-custom: header"]) + + Parameters + ---------- + header: list or dict + Custom http header list or dict. + cookie: str + Cookie value. + origin: str + Custom origin url. + connection: str + Custom connection header value. + Default value "Upgrade" set in _handshake.py + suppress_origin: bool + Suppress outputting origin header. + host: str + Custom host header string. + timeout: int or float + Socket timeout time. This value is an integer or float. + If you set None for this value, it means "use default_timeout value" + http_proxy_host: str + HTTP proxy host name. + http_proxy_port: str or int + HTTP proxy port. Default is 80. + http_no_proxy: list + Whitelisted host names that don't use the proxy. + http_proxy_auth: tuple + HTTP proxy auth information. Tuple of username and password. Default is None. + http_proxy_timeout: int or float + HTTP proxy timeout, default is 60 sec as per python-socks. + redirect_limit: int + Number of redirects to follow. + subprotocols: list + List of available subprotocols. Default is None. + socket: socket + Pre-initialized stream socket. + """ + self.sock_opt.timeout = options.get("timeout", self.sock_opt.timeout) + self.sock, addrs = connect( + url, self.sock_opt, proxy_info(**options), options.pop("socket", None) + ) + + try: + self.handshake_response = handshake(self.sock, url, *addrs, **options) + for _ in range(options.pop("redirect_limit", 3)): + if self.handshake_response.status in SUPPORTED_REDIRECT_STATUSES: + url = self.handshake_response.headers["location"] + self.sock.close() + self.sock, addrs = connect( + url, + self.sock_opt, + proxy_info(**options), + options.pop("socket", None), + ) + self.handshake_response = handshake( + self.sock, url, *addrs, **options + ) + self.connected = True + except: + if self.sock: + self.sock.close() + self.sock = None + raise + + def send(self, payload: Union[bytes, str], opcode: int = ABNF.OPCODE_TEXT) -> int: + """ + Send the data as string. + + Parameters + ---------- + payload: str + Payload must be utf-8 string or unicode, + If the opcode is OPCODE_TEXT. + Otherwise, it must be string(byte array). + opcode: int + Operation code (opcode) to send. + """ + + frame = ABNF.create_frame(payload, opcode) + return self.send_frame(frame) + + def send_text(self, text_data: str) -> int: + """ + Sends UTF-8 encoded text. + """ + return self.send(text_data, ABNF.OPCODE_TEXT) + + def send_bytes(self, data: Union[bytes, bytearray]) -> int: + """ + Sends a sequence of bytes. + """ + return self.send(data, ABNF.OPCODE_BINARY) + + def send_frame(self, frame) -> int: + """ + Send the data frame. + + >>> ws = create_connection("ws://echo.websocket.events") + >>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT) + >>> ws.send_frame(frame) + >>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0) + >>> ws.send_frame(frame) + >>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1) + >>> ws.send_frame(frame) + + Parameters + ---------- + frame: ABNF frame + frame data created by ABNF.create_frame + """ + if self.get_mask_key: + frame.get_mask_key = self.get_mask_key + data = frame.format() + length = len(data) + if isEnabledForTrace(): + trace(f"++Sent raw: {repr(data)}") + trace(f"++Sent decoded: {frame.__str__()}") + with self.lock: + while data: + l = self._send(data) + data = data[l:] + + return length + + def send_binary(self, payload: bytes) -> int: + """ + Send a binary message (OPCODE_BINARY). + + Parameters + ---------- + payload: bytes + payload of message to send. + """ + return self.send(payload, ABNF.OPCODE_BINARY) + + def ping(self, payload: Union[str, bytes] = ""): + """ + Send ping data. + + Parameters + ---------- + payload: str + data payload to send server. + """ + if isinstance(payload, str): + payload = payload.encode("utf-8") + self.send(payload, ABNF.OPCODE_PING) + + def pong(self, payload: Union[str, bytes] = ""): + """ + Send pong data. + + Parameters + ---------- + payload: str + data payload to send server. + """ + if isinstance(payload, str): + payload = payload.encode("utf-8") + self.send(payload, ABNF.OPCODE_PONG) + + def recv(self) -> Union[str, bytes]: + """ + Receive string data(byte array) from the server. + + Returns + ---------- + data: string (byte array) value. + """ + with self.readlock: + opcode, data = self.recv_data() + if opcode == ABNF.OPCODE_TEXT: + data_received: Union[bytes, str] = data + if isinstance(data_received, bytes): + return data_received.decode("utf-8") + elif isinstance(data_received, str): + return data_received + elif opcode == ABNF.OPCODE_BINARY: + data_binary: bytes = data + return data_binary + else: + return "" + + def recv_data(self, control_frame: bool = False) -> tuple: + """ + Receive data with operation code. + + Parameters + ---------- + control_frame: bool + a boolean flag indicating whether to return control frame + data, defaults to False + + Returns + ------- + opcode, frame.data: tuple + tuple of operation code and string(byte array) value. + """ + opcode, frame = self.recv_data_frame(control_frame) + return opcode, frame.data + + def recv_data_frame(self, control_frame: bool = False) -> tuple: + """ + Receive data with operation code. + + If a valid ping message is received, a pong response is sent. + + Parameters + ---------- + control_frame: bool + a boolean flag indicating whether to return control frame + data, defaults to False + + Returns + ------- + frame.opcode, frame: tuple + tuple of operation code and string(byte array) value. + """ + while True: + frame = self.recv_frame() + if isEnabledForTrace(): + trace(f"++Rcv raw: {repr(frame.format())}") + trace(f"++Rcv decoded: {frame.__str__()}") + if not frame: + # handle error: + # 'NoneType' object has no attribute 'opcode' + raise WebSocketProtocolException(f"Not a valid frame {frame}") + elif frame.opcode in ( + ABNF.OPCODE_TEXT, + ABNF.OPCODE_BINARY, + ABNF.OPCODE_CONT, + ): + self.cont_frame.validate(frame) + self.cont_frame.add(frame) + + if self.cont_frame.is_fire(frame): + return self.cont_frame.extract(frame) + + elif frame.opcode == ABNF.OPCODE_CLOSE: + self.send_close() + return frame.opcode, frame + elif frame.opcode == ABNF.OPCODE_PING: + if len(frame.data) < 126: + self.pong(frame.data) + else: + raise WebSocketProtocolException("Ping message is too long") + if control_frame: + return frame.opcode, frame + elif frame.opcode == ABNF.OPCODE_PONG: + if control_frame: + return frame.opcode, frame + + def recv_frame(self): + """ + Receive data as frame from server. + + Returns + ------- + self.frame_buffer.recv_frame(): ABNF frame object + """ + return self.frame_buffer.recv_frame() + + def send_close(self, status: int = STATUS_NORMAL, reason: bytes = b""): + """ + Send close data to the server. + + Parameters + ---------- + status: int + Status code to send. See STATUS_XXX. + reason: str or bytes + The reason to close. This must be string or UTF-8 bytes. + """ + if status < 0 or status >= ABNF.LENGTH_16: + raise ValueError("code is invalid range") + self.connected = False + self.send(struct.pack("!H", status) + reason, ABNF.OPCODE_CLOSE) + + def close(self, status: int = STATUS_NORMAL, reason: bytes = b"", timeout: int = 3): + """ + Close Websocket object + + Parameters + ---------- + status: int + Status code to send. See VALID_CLOSE_STATUS in ABNF. + reason: bytes + The reason to close in UTF-8. + timeout: int or float + Timeout until receive a close frame. + If None, it will wait forever until receive a close frame. + """ + if not self.connected: + return + if status < 0 or status >= ABNF.LENGTH_16: + raise ValueError("code is invalid range") + + try: + self.connected = False + self.send(struct.pack("!H", status) + reason, ABNF.OPCODE_CLOSE) + sock_timeout = self.sock.gettimeout() + self.sock.settimeout(timeout) + start_time = time.time() + while timeout is None or time.time() - start_time < timeout: + try: + frame = self.recv_frame() + if frame.opcode != ABNF.OPCODE_CLOSE: + continue + if isEnabledForError(): + recv_status = struct.unpack("!H", frame.data[0:2])[0] + if recv_status >= 3000 and recv_status <= 4999: + debug(f"close status: {repr(recv_status)}") + elif recv_status != STATUS_NORMAL: + error(f"close status: {repr(recv_status)}") + break + except: + break + self.sock.settimeout(sock_timeout) + self.sock.shutdown(socket.SHUT_RDWR) + except: + pass + + self.shutdown() + + def abort(self): + """ + Low-level asynchronous abort, wakes up other threads that are waiting in recv_* + """ + if self.connected: + self.sock.shutdown(socket.SHUT_RDWR) + + def shutdown(self): + """ + close socket, immediately. + """ + if self.sock: + self.sock.close() + self.sock = None + self.connected = False + + def _send(self, data: Union[str, bytes]): + return send(self.sock, data) + + def _recv(self, bufsize): + try: + return recv(self.sock, bufsize) + except WebSocketConnectionClosedException: + if self.sock: + self.sock.close() + self.sock = None + self.connected = False + raise + + +def create_connection(url: str, timeout=None, class_=WebSocket, **options): + """ + Connect to url and return websocket object. + + Connect to url and return the WebSocket object. + Passing optional timeout parameter will set the timeout on the socket. + If no timeout is supplied, + the global default timeout setting returned by getdefaulttimeout() is used. + You can customize using 'options'. + If you set "header" list object, you can set your own custom header. + + >>> conn = create_connection("ws://echo.websocket.events", + ... header=["User-Agent: MyProgram", + ... "x-custom: header"]) + + Parameters + ---------- + class_: class + class to instantiate when creating the connection. It has to implement + settimeout and connect. It's __init__ should be compatible with + WebSocket.__init__, i.e. accept all of it's kwargs. + header: list or dict + custom http header list or dict. + cookie: str + Cookie value. + origin: str + custom origin url. + suppress_origin: bool + suppress outputting origin header. + host: str + custom host header string. + timeout: int or float + socket timeout time. This value could be either float/integer. + If set to None, it uses the default_timeout value. + http_proxy_host: str + HTTP proxy host name. + http_proxy_port: str or int + HTTP proxy port. If not set, set to 80. + http_no_proxy: list + Whitelisted host names that don't use the proxy. + http_proxy_auth: tuple + HTTP proxy auth information. tuple of username and password. Default is None. + http_proxy_timeout: int or float + HTTP proxy timeout, default is 60 sec as per python-socks. + enable_multithread: bool + Enable lock for multithread. + redirect_limit: int + Number of redirects to follow. + sockopt: tuple + Values for socket.setsockopt. + sockopt must be a tuple and each element is an argument of sock.setsockopt. + sslopt: dict + Optional dict object for ssl socket options. See FAQ for details. + subprotocols: list + List of available subprotocols. Default is None. + skip_utf8_validation: bool + Skip utf8 validation. + socket: socket + Pre-initialized stream socket. + """ + sockopt = options.pop("sockopt", []) + sslopt = options.pop("sslopt", {}) + fire_cont_frame = options.pop("fire_cont_frame", False) + enable_multithread = options.pop("enable_multithread", True) + skip_utf8_validation = options.pop("skip_utf8_validation", False) + websock = class_( + sockopt=sockopt, + sslopt=sslopt, + fire_cont_frame=fire_cont_frame, + enable_multithread=enable_multithread, + skip_utf8_validation=skip_utf8_validation, + **options, + ) + websock.settimeout(timeout if timeout is not None else getdefaulttimeout()) + websock.connect(url, **options) + return websock diff --git a/env/lib/python3.10/site-packages/websocket/_exceptions.py b/env/lib/python3.10/site-packages/websocket/_exceptions.py new file mode 100644 index 0000000..cd196e4 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_exceptions.py @@ -0,0 +1,94 @@ +""" +_exceptions.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + + +class WebSocketException(Exception): + """ + WebSocket exception class. + """ + + pass + + +class WebSocketProtocolException(WebSocketException): + """ + If the WebSocket protocol is invalid, this exception will be raised. + """ + + pass + + +class WebSocketPayloadException(WebSocketException): + """ + If the WebSocket payload is invalid, this exception will be raised. + """ + + pass + + +class WebSocketConnectionClosedException(WebSocketException): + """ + If remote host closed the connection or some network error happened, + this exception will be raised. + """ + + pass + + +class WebSocketTimeoutException(WebSocketException): + """ + WebSocketTimeoutException will be raised at socket timeout during read/write data. + """ + + pass + + +class WebSocketProxyException(WebSocketException): + """ + WebSocketProxyException will be raised when proxy error occurred. + """ + + pass + + +class WebSocketBadStatusException(WebSocketException): + """ + WebSocketBadStatusException will be raised when we get bad handshake status code. + """ + + def __init__( + self, + message: str, + status_code: int, + status_message=None, + resp_headers=None, + resp_body=None, + ): + super().__init__(message) + self.status_code = status_code + self.resp_headers = resp_headers + self.resp_body = resp_body + + +class WebSocketAddressException(WebSocketException): + """ + If the websocket address info cannot be found, this exception will be raised. + """ + + pass diff --git a/env/lib/python3.10/site-packages/websocket/_handshake.py b/env/lib/python3.10/site-packages/websocket/_handshake.py new file mode 100644 index 0000000..7bd61b8 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_handshake.py @@ -0,0 +1,202 @@ +""" +_handshake.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import hashlib +import hmac +import os +from base64 import encodebytes as base64encode +from http import HTTPStatus + +from ._cookiejar import SimpleCookieJar +from ._exceptions import WebSocketException, WebSocketBadStatusException +from ._http import read_headers +from ._logging import dump, error +from ._socket import send + +__all__ = ["handshake_response", "handshake", "SUPPORTED_REDIRECT_STATUSES"] + +# websocket supported version. +VERSION = 13 + +SUPPORTED_REDIRECT_STATUSES = ( + HTTPStatus.MOVED_PERMANENTLY, + HTTPStatus.FOUND, + HTTPStatus.SEE_OTHER, + HTTPStatus.TEMPORARY_REDIRECT, + HTTPStatus.PERMANENT_REDIRECT, +) +SUCCESS_STATUSES = SUPPORTED_REDIRECT_STATUSES + (HTTPStatus.SWITCHING_PROTOCOLS,) + +CookieJar = SimpleCookieJar() + + +class handshake_response: + def __init__(self, status: int, headers: dict, subprotocol): + self.status = status + self.headers = headers + self.subprotocol = subprotocol + CookieJar.add(headers.get("set-cookie")) + + +def handshake( + sock, url: str, hostname: str, port: int, resource: str, **options +) -> handshake_response: + headers, key = _get_handshake_headers(resource, url, hostname, port, options) + + header_str = "\r\n".join(headers) + send(sock, header_str) + dump("request header", header_str) + + status, resp = _get_resp_headers(sock) + if status in SUPPORTED_REDIRECT_STATUSES: + return handshake_response(status, resp, None) + success, subproto = _validate(resp, key, options.get("subprotocols")) + if not success: + raise WebSocketException("Invalid WebSocket Header") + + return handshake_response(status, resp, subproto) + + +def _pack_hostname(hostname: str) -> str: + # IPv6 address + if ":" in hostname: + return f"[{hostname}]" + return hostname + + +def _get_handshake_headers( + resource: str, url: str, host: str, port: int, options: dict +) -> tuple: + headers = [f"GET {resource} HTTP/1.1", "Upgrade: websocket"] + if port in [80, 443]: + hostport = _pack_hostname(host) + else: + hostport = f"{_pack_hostname(host)}:{port}" + if options.get("host"): + headers.append(f'Host: {options["host"]}') + else: + headers.append(f"Host: {hostport}") + + # scheme indicates whether http or https is used in Origin + # The same approach is used in parse_url of _url.py to set default port + scheme, url = url.split(":", 1) + if not options.get("suppress_origin"): + if "origin" in options and options["origin"] is not None: + headers.append(f'Origin: {options["origin"]}') + elif scheme == "wss": + headers.append(f"Origin: https://{hostport}") + else: + headers.append(f"Origin: http://{hostport}") + + key = _create_sec_websocket_key() + + # Append Sec-WebSocket-Key & Sec-WebSocket-Version if not manually specified + if not options.get("header") or "Sec-WebSocket-Key" not in options["header"]: + headers.append(f"Sec-WebSocket-Key: {key}") + else: + key = options["header"]["Sec-WebSocket-Key"] + + if not options.get("header") or "Sec-WebSocket-Version" not in options["header"]: + headers.append(f"Sec-WebSocket-Version: {VERSION}") + + if not options.get("connection"): + headers.append("Connection: Upgrade") + else: + headers.append(options["connection"]) + + if subprotocols := options.get("subprotocols"): + headers.append(f'Sec-WebSocket-Protocol: {",".join(subprotocols)}') + + if header := options.get("header"): + if isinstance(header, dict): + header = [": ".join([k, v]) for k, v in header.items() if v is not None] + headers.extend(header) + + server_cookie = CookieJar.get(host) + client_cookie = options.get("cookie", None) + + if cookie := "; ".join(filter(None, [server_cookie, client_cookie])): + headers.append(f"Cookie: {cookie}") + + headers.extend(("", "")) + return headers, key + + +def _get_resp_headers(sock, success_statuses: tuple = SUCCESS_STATUSES) -> tuple: + status, resp_headers, status_message = read_headers(sock) + if status not in success_statuses: + content_len = resp_headers.get("content-length") + if content_len: + response_body = sock.recv( + int(content_len) + ) # read the body of the HTTP error message response and include it in the exception + else: + response_body = None + raise WebSocketBadStatusException( + f"Handshake status {status} {status_message} -+-+- {resp_headers} -+-+- {response_body}", + status, + status_message, + resp_headers, + response_body, + ) + return status, resp_headers + + +_HEADERS_TO_CHECK = { + "upgrade": "websocket", + "connection": "upgrade", +} + + +def _validate(headers, key: str, subprotocols) -> tuple: + subproto = None + for k, v in _HEADERS_TO_CHECK.items(): + r = headers.get(k, None) + if not r: + return False, None + r = [x.strip().lower() for x in r.split(",")] + if v not in r: + return False, None + + if subprotocols: + subproto = headers.get("sec-websocket-protocol", None) + if not subproto or subproto.lower() not in [s.lower() for s in subprotocols]: + error(f"Invalid subprotocol: {subprotocols}") + return False, None + subproto = subproto.lower() + + result = headers.get("sec-websocket-accept", None) + if not result: + return False, None + result = result.lower() + + if isinstance(result, str): + result = result.encode("utf-8") + + value = f"{key}258EAFA5-E914-47DA-95CA-C5AB0DC85B11".encode("utf-8") + hashed = base64encode(hashlib.sha1(value).digest()).strip().lower() + + if hmac.compare_digest(hashed, result): + return True, subproto + else: + return False, None + + +def _create_sec_websocket_key() -> str: + randomness = os.urandom(16) + return base64encode(randomness).decode("utf-8").strip() diff --git a/env/lib/python3.10/site-packages/websocket/_http.py b/env/lib/python3.10/site-packages/websocket/_http.py new file mode 100644 index 0000000..9b1bf85 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_http.py @@ -0,0 +1,373 @@ +""" +_http.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import errno +import os +import socket +from base64 import encodebytes as base64encode + +from ._exceptions import ( + WebSocketAddressException, + WebSocketException, + WebSocketProxyException, +) +from ._logging import debug, dump, trace +from ._socket import DEFAULT_SOCKET_OPTION, recv_line, send +from ._ssl_compat import HAVE_SSL, ssl +from ._url import get_proxy_info, parse_url + +__all__ = ["proxy_info", "connect", "read_headers"] + +try: + from python_socks._errors import * + from python_socks._types import ProxyType + from python_socks.sync import Proxy + + HAVE_PYTHON_SOCKS = True +except: + HAVE_PYTHON_SOCKS = False + + class ProxyError(Exception): + pass + + class ProxyTimeoutError(Exception): + pass + + class ProxyConnectionError(Exception): + pass + + +class proxy_info: + def __init__(self, **options): + self.proxy_host = options.get("http_proxy_host", None) + if self.proxy_host: + self.proxy_port = options.get("http_proxy_port", 0) + self.auth = options.get("http_proxy_auth", None) + self.no_proxy = options.get("http_no_proxy", None) + self.proxy_protocol = options.get("proxy_type", "http") + # Note: If timeout not specified, default python-socks timeout is 60 seconds + self.proxy_timeout = options.get("http_proxy_timeout", None) + if self.proxy_protocol not in [ + "http", + "socks4", + "socks4a", + "socks5", + "socks5h", + ]: + raise ProxyError( + "Only http, socks4, socks5 proxy protocols are supported" + ) + else: + self.proxy_port = 0 + self.auth = None + self.no_proxy = None + self.proxy_protocol = "http" + + +def _start_proxied_socket(url: str, options, proxy) -> tuple: + if not HAVE_PYTHON_SOCKS: + raise WebSocketException( + "Python Socks is needed for SOCKS proxying but is not available" + ) + + hostname, port, resource, is_secure = parse_url(url) + + if proxy.proxy_protocol == "socks4": + rdns = False + proxy_type = ProxyType.SOCKS4 + # socks4a sends DNS through proxy + elif proxy.proxy_protocol == "socks4a": + rdns = True + proxy_type = ProxyType.SOCKS4 + elif proxy.proxy_protocol == "socks5": + rdns = False + proxy_type = ProxyType.SOCKS5 + # socks5h sends DNS through proxy + elif proxy.proxy_protocol == "socks5h": + rdns = True + proxy_type = ProxyType.SOCKS5 + + ws_proxy = Proxy.create( + proxy_type=proxy_type, + host=proxy.proxy_host, + port=int(proxy.proxy_port), + username=proxy.auth[0] if proxy.auth else None, + password=proxy.auth[1] if proxy.auth else None, + rdns=rdns, + ) + + sock = ws_proxy.connect(hostname, port, timeout=proxy.proxy_timeout) + + if is_secure: + if HAVE_SSL: + sock = _ssl_socket(sock, options.sslopt, hostname) + else: + raise WebSocketException("SSL not available.") + + return sock, (hostname, port, resource) + + +def connect(url: str, options, proxy, socket): + # Use _start_proxied_socket() only for socks4 or socks5 proxy + # Use _tunnel() for http proxy + # TODO: Use python-socks for http protocol also, to standardize flow + if proxy.proxy_host and not socket and proxy.proxy_protocol != "http": + return _start_proxied_socket(url, options, proxy) + + hostname, port_from_url, resource, is_secure = parse_url(url) + + if socket: + return socket, (hostname, port_from_url, resource) + + addrinfo_list, need_tunnel, auth = _get_addrinfo_list( + hostname, port_from_url, is_secure, proxy + ) + if not addrinfo_list: + raise WebSocketException(f"Host not found.: {hostname}:{port_from_url}") + + sock = None + try: + sock = _open_socket(addrinfo_list, options.sockopt, options.timeout) + if need_tunnel: + sock = _tunnel(sock, hostname, port_from_url, auth) + + if is_secure: + if HAVE_SSL: + sock = _ssl_socket(sock, options.sslopt, hostname) + else: + raise WebSocketException("SSL not available.") + + return sock, (hostname, port_from_url, resource) + except: + if sock: + sock.close() + raise + + +def _get_addrinfo_list(hostname, port: int, is_secure: bool, proxy) -> tuple: + phost, pport, pauth = get_proxy_info( + hostname, + is_secure, + proxy.proxy_host, + proxy.proxy_port, + proxy.auth, + proxy.no_proxy, + ) + try: + # when running on windows 10, getaddrinfo without socktype returns a socktype 0. + # This generates an error exception: `_on_error: exception Socket type must be stream or datagram, not 0` + # or `OSError: [Errno 22] Invalid argument` when creating socket. Force the socket type to SOCK_STREAM. + if not phost: + addrinfo_list = socket.getaddrinfo( + hostname, port, 0, socket.SOCK_STREAM, socket.SOL_TCP + ) + return addrinfo_list, False, None + else: + pport = pport and pport or 80 + # when running on windows 10, the getaddrinfo used above + # returns a socktype 0. This generates an error exception: + # _on_error: exception Socket type must be stream or datagram, not 0 + # Force the socket type to SOCK_STREAM + addrinfo_list = socket.getaddrinfo( + phost, pport, 0, socket.SOCK_STREAM, socket.SOL_TCP + ) + return addrinfo_list, True, pauth + except socket.gaierror as e: + raise WebSocketAddressException(e) + + +def _open_socket(addrinfo_list, sockopt, timeout): + err = None + for addrinfo in addrinfo_list: + family, socktype, proto = addrinfo[:3] + sock = socket.socket(family, socktype, proto) + sock.settimeout(timeout) + for opts in DEFAULT_SOCKET_OPTION: + sock.setsockopt(*opts) + for opts in sockopt: + sock.setsockopt(*opts) + + address = addrinfo[4] + err = None + while not err: + try: + sock.connect(address) + except socket.error as error: + sock.close() + error.remote_ip = str(address[0]) + try: + eConnRefused = ( + errno.ECONNREFUSED, + errno.WSAECONNREFUSED, + errno.ENETUNREACH, + ) + except AttributeError: + eConnRefused = (errno.ECONNREFUSED, errno.ENETUNREACH) + if error.errno not in eConnRefused: + raise error + err = error + continue + else: + break + else: + continue + break + else: + if err: + raise err + + return sock + + +def _wrap_sni_socket(sock: socket.socket, sslopt: dict, hostname, check_hostname): + context = sslopt.get("context", None) + if not context: + context = ssl.SSLContext(sslopt.get("ssl_version", ssl.PROTOCOL_TLS_CLIENT)) + # Non default context need to manually enable SSLKEYLOGFILE support by setting the keylog_filename attribute. + # For more details see also: + # * https://docs.python.org/3.8/library/ssl.html?highlight=sslkeylogfile#context-creation + # * https://docs.python.org/3.8/library/ssl.html?highlight=sslkeylogfile#ssl.SSLContext.keylog_filename + context.keylog_filename = os.environ.get("SSLKEYLOGFILE", None) + + if sslopt.get("cert_reqs", ssl.CERT_NONE) != ssl.CERT_NONE: + cafile = sslopt.get("ca_certs", None) + capath = sslopt.get("ca_cert_path", None) + if cafile or capath: + context.load_verify_locations(cafile=cafile, capath=capath) + elif hasattr(context, "load_default_certs"): + context.load_default_certs(ssl.Purpose.SERVER_AUTH) + if sslopt.get("certfile", None): + context.load_cert_chain( + sslopt["certfile"], + sslopt.get("keyfile", None), + sslopt.get("password", None), + ) + + # Python 3.10 switch to PROTOCOL_TLS_CLIENT defaults to "cert_reqs = ssl.CERT_REQUIRED" and "check_hostname = True" + # If both disabled, set check_hostname before verify_mode + # see https://github.com/liris/websocket-client/commit/b96a2e8fa765753e82eea531adb19716b52ca3ca#commitcomment-10803153 + if sslopt.get("cert_reqs", ssl.CERT_NONE) == ssl.CERT_NONE and not sslopt.get( + "check_hostname", False + ): + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + else: + context.check_hostname = sslopt.get("check_hostname", True) + context.verify_mode = sslopt.get("cert_reqs", ssl.CERT_REQUIRED) + + if "ciphers" in sslopt: + context.set_ciphers(sslopt["ciphers"]) + if "cert_chain" in sslopt: + certfile, keyfile, password = sslopt["cert_chain"] + context.load_cert_chain(certfile, keyfile, password) + if "ecdh_curve" in sslopt: + context.set_ecdh_curve(sslopt["ecdh_curve"]) + + return context.wrap_socket( + sock, + do_handshake_on_connect=sslopt.get("do_handshake_on_connect", True), + suppress_ragged_eofs=sslopt.get("suppress_ragged_eofs", True), + server_hostname=hostname, + ) + + +def _ssl_socket(sock: socket.socket, user_sslopt: dict, hostname): + sslopt: dict = {"cert_reqs": ssl.CERT_REQUIRED} + sslopt.update(user_sslopt) + + cert_path = os.environ.get("WEBSOCKET_CLIENT_CA_BUNDLE") + if ( + cert_path + and os.path.isfile(cert_path) + and user_sslopt.get("ca_certs", None) is None + ): + sslopt["ca_certs"] = cert_path + elif ( + cert_path + and os.path.isdir(cert_path) + and user_sslopt.get("ca_cert_path", None) is None + ): + sslopt["ca_cert_path"] = cert_path + + if sslopt.get("server_hostname", None): + hostname = sslopt["server_hostname"] + + check_hostname = sslopt.get("check_hostname", True) + sock = _wrap_sni_socket(sock, sslopt, hostname, check_hostname) + + return sock + + +def _tunnel(sock: socket.socket, host, port: int, auth) -> socket.socket: + debug("Connecting proxy...") + connect_header = f"CONNECT {host}:{port} HTTP/1.1\r\n" + connect_header += f"Host: {host}:{port}\r\n" + + # TODO: support digest auth. + if auth and auth[0]: + auth_str = auth[0] + if auth[1]: + auth_str += f":{auth[1]}" + encoded_str = base64encode(auth_str.encode()).strip().decode().replace("\n", "") + connect_header += f"Proxy-Authorization: Basic {encoded_str}\r\n" + connect_header += "\r\n" + dump("request header", connect_header) + + send(sock, connect_header) + + try: + status, _, _ = read_headers(sock) + except Exception as e: + raise WebSocketProxyException(str(e)) + + if status != 200: + raise WebSocketProxyException(f"failed CONNECT via proxy status: {status}") + + return sock + + +def read_headers(sock: socket.socket) -> tuple: + status = None + status_message = None + headers: dict = {} + trace("--- response header ---") + + while True: + line = recv_line(sock) + line = line.decode("utf-8").strip() + if not line: + break + trace(line) + if not status: + status_info = line.split(" ", 2) + status = int(status_info[1]) + if len(status_info) > 2: + status_message = status_info[2] + else: + kv = line.split(":", 1) + if len(kv) != 2: + raise WebSocketException("Invalid header") + key, value = kv + if key.lower() == "set-cookie" and headers.get("set-cookie"): + headers["set-cookie"] = headers.get("set-cookie") + "; " + value.strip() + else: + headers[key.lower()] = value.strip() + + trace("-----------------------") + + return status, headers, status_message diff --git a/env/lib/python3.10/site-packages/websocket/_logging.py b/env/lib/python3.10/site-packages/websocket/_logging.py new file mode 100644 index 0000000..0f673d3 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_logging.py @@ -0,0 +1,106 @@ +import logging + +""" +_logging.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +_logger = logging.getLogger("websocket") +try: + from logging import NullHandler +except ImportError: + + class NullHandler(logging.Handler): + def emit(self, record) -> None: + pass + + +_logger.addHandler(NullHandler()) + +_traceEnabled = False + +__all__ = [ + "enableTrace", + "dump", + "error", + "warning", + "debug", + "trace", + "isEnabledForError", + "isEnabledForDebug", + "isEnabledForTrace", +] + + +def enableTrace( + traceable: bool, + handler: logging.StreamHandler = logging.StreamHandler(), + level: str = "DEBUG", +) -> None: + """ + Turn on/off the traceability. + + Parameters + ---------- + traceable: bool + If set to True, traceability is enabled. + """ + global _traceEnabled + _traceEnabled = traceable + if traceable: + _logger.addHandler(handler) + _logger.setLevel(getattr(logging, level)) + + +def dump(title: str, message: str) -> None: + if _traceEnabled: + _logger.debug(f"--- {title} ---") + _logger.debug(message) + _logger.debug("-----------------------") + + +def error(msg: str) -> None: + _logger.error(msg) + + +def warning(msg: str) -> None: + _logger.warning(msg) + + +def debug(msg: str) -> None: + _logger.debug(msg) + + +def info(msg: str) -> None: + _logger.info(msg) + + +def trace(msg: str) -> None: + if _traceEnabled: + _logger.debug(msg) + + +def isEnabledForError() -> bool: + return _logger.isEnabledFor(logging.ERROR) + + +def isEnabledForDebug() -> bool: + return _logger.isEnabledFor(logging.DEBUG) + + +def isEnabledForTrace() -> bool: + return _traceEnabled diff --git a/env/lib/python3.10/site-packages/websocket/_socket.py b/env/lib/python3.10/site-packages/websocket/_socket.py new file mode 100644 index 0000000..81094ff --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_socket.py @@ -0,0 +1,188 @@ +import errno +import selectors +import socket +from typing import Union + +from ._exceptions import ( + WebSocketConnectionClosedException, + WebSocketTimeoutException, +) +from ._ssl_compat import SSLError, SSLWantReadError, SSLWantWriteError +from ._utils import extract_error_code, extract_err_message + +""" +_socket.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +DEFAULT_SOCKET_OPTION = [(socket.SOL_TCP, socket.TCP_NODELAY, 1)] +if hasattr(socket, "SO_KEEPALIVE"): + DEFAULT_SOCKET_OPTION.append((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)) +if hasattr(socket, "TCP_KEEPIDLE"): + DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPIDLE, 30)) +if hasattr(socket, "TCP_KEEPINTVL"): + DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPINTVL, 10)) +if hasattr(socket, "TCP_KEEPCNT"): + DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPCNT, 3)) + +_default_timeout = None + +__all__ = [ + "DEFAULT_SOCKET_OPTION", + "sock_opt", + "setdefaulttimeout", + "getdefaulttimeout", + "recv", + "recv_line", + "send", +] + + +class sock_opt: + def __init__(self, sockopt: list, sslopt: dict) -> None: + if sockopt is None: + sockopt = [] + if sslopt is None: + sslopt = {} + self.sockopt = sockopt + self.sslopt = sslopt + self.timeout = None + + +def setdefaulttimeout(timeout: Union[int, float, None]) -> None: + """ + Set the global timeout setting to connect. + + Parameters + ---------- + timeout: int or float + default socket timeout time (in seconds) + """ + global _default_timeout + _default_timeout = timeout + + +def getdefaulttimeout() -> Union[int, float, None]: + """ + Get default timeout + + Returns + ---------- + _default_timeout: int or float + Return the global timeout setting (in seconds) to connect. + """ + return _default_timeout + + +def recv(sock: socket.socket, bufsize: int) -> bytes: + if not sock: + raise WebSocketConnectionClosedException("socket is already closed.") + + def _recv(): + try: + return sock.recv(bufsize) + except SSLWantReadError: + pass + except socket.error as exc: + error_code = extract_error_code(exc) + if error_code not in [errno.EAGAIN, errno.EWOULDBLOCK]: + raise + + sel = selectors.DefaultSelector() + sel.register(sock, selectors.EVENT_READ) + + r = sel.select(sock.gettimeout()) + sel.close() + + if r: + return sock.recv(bufsize) + + try: + if sock.gettimeout() == 0: + bytes_ = sock.recv(bufsize) + else: + bytes_ = _recv() + except TimeoutError: + raise WebSocketTimeoutException("Connection timed out") + except socket.timeout as e: + message = extract_err_message(e) + raise WebSocketTimeoutException(message) + except SSLError as e: + message = extract_err_message(e) + if isinstance(message, str) and "timed out" in message: + raise WebSocketTimeoutException(message) + else: + raise + + if not bytes_: + raise WebSocketConnectionClosedException("Connection to remote host was lost.") + + return bytes_ + + +def recv_line(sock: socket.socket) -> bytes: + line = [] + while True: + c = recv(sock, 1) + line.append(c) + if c == b"\n": + break + return b"".join(line) + + +def send(sock: socket.socket, data: Union[bytes, str]) -> int: + if isinstance(data, str): + data = data.encode("utf-8") + + if not sock: + raise WebSocketConnectionClosedException("socket is already closed.") + + def _send(): + try: + return sock.send(data) + except SSLWantWriteError: + pass + except socket.error as exc: + error_code = extract_error_code(exc) + if error_code is None: + raise + if error_code not in [errno.EAGAIN, errno.EWOULDBLOCK]: + raise + + sel = selectors.DefaultSelector() + sel.register(sock, selectors.EVENT_WRITE) + + w = sel.select(sock.gettimeout()) + sel.close() + + if w: + return sock.send(data) + + try: + if sock.gettimeout() == 0: + return sock.send(data) + else: + return _send() + except socket.timeout as e: + message = extract_err_message(e) + raise WebSocketTimeoutException(message) + except Exception as e: + message = extract_err_message(e) + if isinstance(message, str) and "timed out" in message: + raise WebSocketTimeoutException(message) + else: + raise diff --git a/env/lib/python3.10/site-packages/websocket/_ssl_compat.py b/env/lib/python3.10/site-packages/websocket/_ssl_compat.py new file mode 100644 index 0000000..0a8a32b --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_ssl_compat.py @@ -0,0 +1,48 @@ +""" +_ssl_compat.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +__all__ = [ + "HAVE_SSL", + "ssl", + "SSLError", + "SSLEOFError", + "SSLWantReadError", + "SSLWantWriteError", +] + +try: + import ssl + from ssl import SSLError, SSLEOFError, SSLWantReadError, SSLWantWriteError + + HAVE_SSL = True +except ImportError: + # dummy class of SSLError for environment without ssl support + class SSLError(Exception): + pass + + class SSLEOFError(Exception): + pass + + class SSLWantReadError(Exception): + pass + + class SSLWantWriteError(Exception): + pass + + ssl = None + HAVE_SSL = False diff --git a/env/lib/python3.10/site-packages/websocket/_url.py b/env/lib/python3.10/site-packages/websocket/_url.py new file mode 100644 index 0000000..9021317 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_url.py @@ -0,0 +1,190 @@ +import os +import socket +import struct +from typing import Optional +from urllib.parse import unquote, urlparse +from ._exceptions import WebSocketProxyException + +""" +_url.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +__all__ = ["parse_url", "get_proxy_info"] + + +def parse_url(url: str) -> tuple: + """ + parse url and the result is tuple of + (hostname, port, resource path and the flag of secure mode) + + Parameters + ---------- + url: str + url string. + """ + if ":" not in url: + raise ValueError("url is invalid") + + scheme, url = url.split(":", 1) + + parsed = urlparse(url, scheme="http") + if parsed.hostname: + hostname = parsed.hostname + else: + raise ValueError("hostname is invalid") + port = 0 + if parsed.port: + port = parsed.port + + is_secure = False + if scheme == "ws": + if not port: + port = 80 + elif scheme == "wss": + is_secure = True + if not port: + port = 443 + else: + raise ValueError("scheme %s is invalid" % scheme) + + if parsed.path: + resource = parsed.path + else: + resource = "/" + + if parsed.query: + resource += f"?{parsed.query}" + + return hostname, port, resource, is_secure + + +DEFAULT_NO_PROXY_HOST = ["localhost", "127.0.0.1"] + + +def _is_ip_address(addr: str) -> bool: + try: + socket.inet_aton(addr) + except socket.error: + return False + else: + return True + + +def _is_subnet_address(hostname: str) -> bool: + try: + addr, netmask = hostname.split("/") + return _is_ip_address(addr) and 0 <= int(netmask) < 32 + except ValueError: + return False + + +def _is_address_in_network(ip: str, net: str) -> bool: + ipaddr: int = struct.unpack("!I", socket.inet_aton(ip))[0] + netaddr, netmask = net.split("/") + netaddr: int = struct.unpack("!I", socket.inet_aton(netaddr))[0] + + netmask = (0xFFFFFFFF << (32 - int(netmask))) & 0xFFFFFFFF + return ipaddr & netmask == netaddr + + +def _is_no_proxy_host(hostname: str, no_proxy: Optional[list]) -> bool: + if not no_proxy: + if v := os.environ.get("no_proxy", os.environ.get("NO_PROXY", "")).replace( + " ", "" + ): + no_proxy = v.split(",") + if not no_proxy: + no_proxy = DEFAULT_NO_PROXY_HOST + + if "*" in no_proxy: + return True + if hostname in no_proxy: + return True + if _is_ip_address(hostname): + return any( + [ + _is_address_in_network(hostname, subnet) + for subnet in no_proxy + if _is_subnet_address(subnet) + ] + ) + for domain in [domain for domain in no_proxy if domain.startswith(".")]: + if hostname.endswith(domain): + return True + return False + + +def get_proxy_info( + hostname: str, + is_secure: bool, + proxy_host: Optional[str] = None, + proxy_port: int = 0, + proxy_auth: Optional[tuple] = None, + no_proxy: Optional[list] = None, + proxy_type: str = "http", +) -> tuple: + """ + Try to retrieve proxy host and port from environment + if not provided in options. + Result is (proxy_host, proxy_port, proxy_auth). + proxy_auth is tuple of username and password + of proxy authentication information. + + Parameters + ---------- + hostname: str + Websocket server name. + is_secure: bool + Is the connection secure? (wss) looks for "https_proxy" in env + instead of "http_proxy" + proxy_host: str + http proxy host name. + proxy_port: str or int + http proxy port. + no_proxy: list + Whitelisted host names that don't use the proxy. + proxy_auth: tuple + HTTP proxy auth information. Tuple of username and password. Default is None. + proxy_type: str + Specify the proxy protocol (http, socks4, socks4a, socks5, socks5h). Default is "http". + Use socks4a or socks5h if you want to send DNS requests through the proxy. + """ + if _is_no_proxy_host(hostname, no_proxy): + return None, 0, None + + if proxy_host: + if not proxy_port: + raise WebSocketProxyException("Cannot use port 0 when proxy_host specified") + port = proxy_port + auth = proxy_auth + return proxy_host, port, auth + + env_key = "https_proxy" if is_secure else "http_proxy" + value = os.environ.get(env_key, os.environ.get(env_key.upper(), "")).replace( + " ", "" + ) + if value: + proxy = urlparse(value) + auth = ( + (unquote(proxy.username), unquote(proxy.password)) + if proxy.username + else None + ) + return proxy.hostname, proxy.port, auth + + return None, 0, None diff --git a/env/lib/python3.10/site-packages/websocket/_utils.py b/env/lib/python3.10/site-packages/websocket/_utils.py new file mode 100644 index 0000000..65f3c0d --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_utils.py @@ -0,0 +1,459 @@ +from typing import Union + +""" +_url.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +__all__ = ["NoLock", "validate_utf8", "extract_err_message", "extract_error_code"] + + +class NoLock: + def __enter__(self) -> None: + pass + + def __exit__(self, exc_type, exc_value, traceback) -> None: + pass + + +try: + # If wsaccel is available we use compiled routines to validate UTF-8 + # strings. + from wsaccel.utf8validator import Utf8Validator + + def _validate_utf8(utfbytes: Union[str, bytes]) -> bool: + result: bool = Utf8Validator().validate(utfbytes)[0] + return result + +except ImportError: + # UTF-8 validator + # python implementation of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ + + _UTF8_ACCEPT = 0 + _UTF8_REJECT = 12 + + _UTF8D = [ + # The first part of the table maps bytes to character classes that + # to reduce the size of the transition table and create bitmasks. + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 8, + 8, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 10, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 4, + 3, + 3, + 11, + 6, + 6, + 6, + 5, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + # The second part is a transition table that maps a combination + # of a state of the automaton and a character class to a state. + 0, + 12, + 24, + 36, + 60, + 96, + 84, + 12, + 12, + 12, + 48, + 72, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 0, + 12, + 12, + 12, + 12, + 12, + 0, + 12, + 0, + 12, + 12, + 12, + 24, + 12, + 12, + 12, + 12, + 12, + 24, + 12, + 24, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 24, + 12, + 12, + 12, + 12, + 12, + 24, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 24, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 36, + 12, + 36, + 12, + 12, + 12, + 36, + 12, + 12, + 12, + 12, + 12, + 36, + 12, + 36, + 12, + 12, + 12, + 36, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + ] + + def _decode(state: int, codep: int, ch: int) -> tuple: + tp = _UTF8D[ch] + + codep = ( + (ch & 0x3F) | (codep << 6) if (state != _UTF8_ACCEPT) else (0xFF >> tp) & ch + ) + state = _UTF8D[256 + state + tp] + + return state, codep + + def _validate_utf8(utfbytes: Union[str, bytes]) -> bool: + state = _UTF8_ACCEPT + codep = 0 + for i in utfbytes: + state, codep = _decode(state, codep, int(i)) + if state == _UTF8_REJECT: + return False + + return True + + +def validate_utf8(utfbytes: Union[str, bytes]) -> bool: + """ + validate utf8 byte string. + utfbytes: utf byte string to check. + return value: if valid utf8 string, return true. Otherwise, return false. + """ + return _validate_utf8(utfbytes) + + +def extract_err_message(exception: Exception) -> Union[str, None]: + if exception.args: + exception_message: str = exception.args[0] + return exception_message + else: + return None + + +def extract_error_code(exception: Exception) -> Union[int, None]: + if exception.args and len(exception.args) > 1: + return exception.args[0] if isinstance(exception.args[0], int) else None diff --git a/env/lib/python3.10/site-packages/websocket/_wsdump.py b/env/lib/python3.10/site-packages/websocket/_wsdump.py new file mode 100644 index 0000000..d4d76dc --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/_wsdump.py @@ -0,0 +1,244 @@ +#!/usr/bin/env python3 + +""" +wsdump.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import argparse +import code +import gzip +import ssl +import sys +import threading +import time +import zlib +from urllib.parse import urlparse + +import websocket + +try: + import readline +except ImportError: + pass + + +def get_encoding() -> str: + encoding = getattr(sys.stdin, "encoding", "") + if not encoding: + return "utf-8" + else: + return encoding.lower() + + +OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY) +ENCODING = get_encoding() + + +class VAction(argparse.Action): + def __call__( + self, + parser: argparse.Namespace, + args: tuple, + values: str, + option_string: str = None, + ) -> None: + if values is None: + values = "1" + try: + values = int(values) + except ValueError: + values = values.count("v") + 1 + setattr(args, self.dest, values) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool") + parser.add_argument( + "url", metavar="ws_url", help="websocket url. ex. ws://echo.websocket.events/" + ) + parser.add_argument("-p", "--proxy", help="proxy url. ex. http://127.0.0.1:8080") + parser.add_argument( + "-v", + "--verbose", + default=0, + nargs="?", + action=VAction, + dest="verbose", + help="set verbose mode. If set to 1, show opcode. " + "If set to 2, enable to trace websocket module", + ) + parser.add_argument( + "-n", "--nocert", action="store_true", help="Ignore invalid SSL cert" + ) + parser.add_argument("-r", "--raw", action="store_true", help="raw output") + parser.add_argument("-s", "--subprotocols", nargs="*", help="Set subprotocols") + parser.add_argument("-o", "--origin", help="Set origin") + parser.add_argument( + "--eof-wait", + default=0, + type=int, + help="wait time(second) after 'EOF' received.", + ) + parser.add_argument("-t", "--text", help="Send initial text") + parser.add_argument( + "--timings", action="store_true", help="Print timings in seconds" + ) + parser.add_argument("--headers", help="Set custom headers. Use ',' as separator") + + return parser.parse_args() + + +class RawInput: + def raw_input(self, prompt: str = "") -> str: + line = input(prompt) + + if ENCODING and ENCODING != "utf-8" and not isinstance(line, str): + line = line.decode(ENCODING).encode("utf-8") + elif isinstance(line, str): + line = line.encode("utf-8") + + return line + + +class InteractiveConsole(RawInput, code.InteractiveConsole): + def write(self, data: str) -> None: + sys.stdout.write("\033[2K\033[E") + # sys.stdout.write("\n") + sys.stdout.write("\033[34m< " + data + "\033[39m") + sys.stdout.write("\n> ") + sys.stdout.flush() + + def read(self) -> str: + return self.raw_input("> ") + + +class NonInteractive(RawInput): + def write(self, data: str) -> None: + sys.stdout.write(data) + sys.stdout.write("\n") + sys.stdout.flush() + + def read(self) -> str: + return self.raw_input("") + + +def main() -> None: + start_time = time.time() + args = parse_args() + if args.verbose > 1: + websocket.enableTrace(True) + options = {} + if args.proxy: + p = urlparse(args.proxy) + options["http_proxy_host"] = p.hostname + options["http_proxy_port"] = p.port + if args.origin: + options["origin"] = args.origin + if args.subprotocols: + options["subprotocols"] = args.subprotocols + opts = {} + if args.nocert: + opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False} + if args.headers: + options["header"] = list(map(str.strip, args.headers.split(","))) + ws = websocket.create_connection(args.url, sslopt=opts, **options) + if args.raw: + console = NonInteractive() + else: + console = InteractiveConsole() + print("Press Ctrl+C to quit") + + def recv() -> tuple: + try: + frame = ws.recv_frame() + except websocket.WebSocketException: + return websocket.ABNF.OPCODE_CLOSE, "" + if not frame: + raise websocket.WebSocketException(f"Not a valid frame {frame}") + elif frame.opcode in OPCODE_DATA: + return frame.opcode, frame.data + elif frame.opcode == websocket.ABNF.OPCODE_CLOSE: + ws.send_close() + return frame.opcode, "" + elif frame.opcode == websocket.ABNF.OPCODE_PING: + ws.pong(frame.data) + return frame.opcode, frame.data + + return frame.opcode, frame.data + + def recv_ws() -> None: + while True: + opcode, data = recv() + msg = None + if opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes): + data = str(data, "utf-8") + if ( + isinstance(data, bytes) and len(data) > 2 and data[:2] == b"\037\213" + ): # gzip magick + try: + data = "[gzip] " + str(gzip.decompress(data), "utf-8") + except: + pass + elif isinstance(data, bytes): + try: + data = "[zlib] " + str( + zlib.decompress(data, -zlib.MAX_WBITS), "utf-8" + ) + except: + pass + + if isinstance(data, bytes): + data = repr(data) + + if args.verbose: + msg = f"{websocket.ABNF.OPCODE_MAP.get(opcode)}: {data}" + else: + msg = data + + if msg is not None: + if args.timings: + console.write(f"{time.time() - start_time}: {msg}") + else: + console.write(msg) + + if opcode == websocket.ABNF.OPCODE_CLOSE: + break + + thread = threading.Thread(target=recv_ws) + thread.daemon = True + thread.start() + + if args.text: + ws.send(args.text) + + while True: + try: + message = console.read() + ws.send(message) + except KeyboardInterrupt: + return + except EOFError: + time.sleep(args.eof_wait) + return + + +if __name__ == "__main__": + try: + main() + except Exception as e: + print(e) diff --git a/env/lib/python3.10/site-packages/websocket/py.typed b/env/lib/python3.10/site-packages/websocket/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/env/lib/python3.10/site-packages/websocket/tests/__init__.py b/env/lib/python3.10/site-packages/websocket/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/env/lib/python3.10/site-packages/websocket/tests/data/header01.txt b/env/lib/python3.10/site-packages/websocket/tests/data/header01.txt new file mode 100644 index 0000000..3142b43 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/tests/data/header01.txt @@ -0,0 +1,6 @@ +HTTP/1.1 101 WebSocket Protocol Handshake +Connection: Upgrade +Upgrade: WebSocket +Sec-WebSocket-Accept: Kxep+hNu9n51529fGidYu7a3wO0= +some_header: something + diff --git a/env/lib/python3.10/site-packages/websocket/tests/data/header02.txt b/env/lib/python3.10/site-packages/websocket/tests/data/header02.txt new file mode 100644 index 0000000..a9dd2ce --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/tests/data/header02.txt @@ -0,0 +1,6 @@ +HTTP/1.1 101 WebSocket Protocol Handshake +Connection: Upgrade +Upgrade WebSocket +Sec-WebSocket-Accept: Kxep+hNu9n51529fGidYu7a3wO0= +some_header: something + diff --git a/env/lib/python3.10/site-packages/websocket/tests/data/header03.txt b/env/lib/python3.10/site-packages/websocket/tests/data/header03.txt new file mode 100644 index 0000000..1a81dc7 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/tests/data/header03.txt @@ -0,0 +1,8 @@ +HTTP/1.1 101 WebSocket Protocol Handshake +Connection: Upgrade, Keep-Alive +Upgrade: WebSocket +Sec-WebSocket-Accept: Kxep+hNu9n51529fGidYu7a3wO0= +Set-Cookie: Token=ABCDE +Set-Cookie: Token=FGHIJ +some_header: something + diff --git a/env/lib/python3.10/site-packages/websocket/tests/echo-server.py b/env/lib/python3.10/site-packages/websocket/tests/echo-server.py new file mode 100644 index 0000000..5d1e870 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/tests/echo-server.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +# From https://github.com/aaugustin/websockets/blob/main/example/echo.py + +import asyncio +import os + +import websockets + +LOCAL_WS_SERVER_PORT = int(os.environ.get("LOCAL_WS_SERVER_PORT", "8765")) + + +async def echo(websocket): + async for message in websocket: + await websocket.send(message) + + +async def main(): + async with websockets.serve(echo, "localhost", LOCAL_WS_SERVER_PORT): + await asyncio.Future() # run forever + + +asyncio.run(main()) diff --git a/env/lib/python3.10/site-packages/websocket/tests/test_abnf.py b/env/lib/python3.10/site-packages/websocket/tests/test_abnf.py new file mode 100644 index 0000000..a749f13 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/tests/test_abnf.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +# +import unittest + +from websocket._abnf import ABNF, frame_buffer +from websocket._exceptions import WebSocketProtocolException + +""" +test_abnf.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + + +class ABNFTest(unittest.TestCase): + def test_init(self): + a = ABNF(0, 0, 0, 0, opcode=ABNF.OPCODE_PING) + self.assertEqual(a.fin, 0) + self.assertEqual(a.rsv1, 0) + self.assertEqual(a.rsv2, 0) + self.assertEqual(a.rsv3, 0) + self.assertEqual(a.opcode, 9) + self.assertEqual(a.data, "") + a_bad = ABNF(0, 1, 0, 0, opcode=77) + self.assertEqual(a_bad.rsv1, 1) + self.assertEqual(a_bad.opcode, 77) + + def test_validate(self): + a_invalid_ping = ABNF(0, 0, 0, 0, opcode=ABNF.OPCODE_PING) + self.assertRaises( + WebSocketProtocolException, + a_invalid_ping.validate, + skip_utf8_validation=False, + ) + a_bad_rsv_value = ABNF(0, 1, 0, 0, opcode=ABNF.OPCODE_TEXT) + self.assertRaises( + WebSocketProtocolException, + a_bad_rsv_value.validate, + skip_utf8_validation=False, + ) + a_bad_opcode = ABNF(0, 0, 0, 0, opcode=77) + self.assertRaises( + WebSocketProtocolException, + a_bad_opcode.validate, + skip_utf8_validation=False, + ) + a_bad_close_frame = ABNF(0, 0, 0, 0, opcode=ABNF.OPCODE_CLOSE, data=b"\x01") + self.assertRaises( + WebSocketProtocolException, + a_bad_close_frame.validate, + skip_utf8_validation=False, + ) + a_bad_close_frame_2 = ABNF( + 0, 0, 0, 0, opcode=ABNF.OPCODE_CLOSE, data=b"\x01\x8a\xaa\xff\xdd" + ) + self.assertRaises( + WebSocketProtocolException, + a_bad_close_frame_2.validate, + skip_utf8_validation=False, + ) + a_bad_close_frame_3 = ABNF( + 0, 0, 0, 0, opcode=ABNF.OPCODE_CLOSE, data=b"\x03\xe7" + ) + self.assertRaises( + WebSocketProtocolException, + a_bad_close_frame_3.validate, + skip_utf8_validation=True, + ) + + def test_mask(self): + abnf_none_data = ABNF( + 0, 0, 0, 0, opcode=ABNF.OPCODE_PING, mask_value=1, data=None + ) + bytes_val = b"aaaa" + self.assertEqual(abnf_none_data._get_masked(bytes_val), bytes_val) + abnf_str_data = ABNF( + 0, 0, 0, 0, opcode=ABNF.OPCODE_PING, mask_value=1, data="a" + ) + self.assertEqual(abnf_str_data._get_masked(bytes_val), b"aaaa\x00") + + def test_format(self): + abnf_bad_rsv_bits = ABNF(2, 0, 0, 0, opcode=ABNF.OPCODE_TEXT) + self.assertRaises(ValueError, abnf_bad_rsv_bits.format) + abnf_bad_opcode = ABNF(0, 0, 0, 0, opcode=5) + self.assertRaises(ValueError, abnf_bad_opcode.format) + abnf_length_10 = ABNF(0, 0, 0, 0, opcode=ABNF.OPCODE_TEXT, data="abcdefghij") + self.assertEqual(b"\x01", abnf_length_10.format()[0].to_bytes(1, "big")) + self.assertEqual(b"\x8a", abnf_length_10.format()[1].to_bytes(1, "big")) + self.assertEqual("fin=0 opcode=1 data=abcdefghij", abnf_length_10.__str__()) + abnf_length_20 = ABNF( + 0, 0, 0, 0, opcode=ABNF.OPCODE_BINARY, data="abcdefghijabcdefghij" + ) + self.assertEqual(b"\x02", abnf_length_20.format()[0].to_bytes(1, "big")) + self.assertEqual(b"\x94", abnf_length_20.format()[1].to_bytes(1, "big")) + abnf_no_mask = ABNF( + 0, 0, 0, 0, opcode=ABNF.OPCODE_TEXT, mask_value=0, data=b"\x01\x8a\xcc" + ) + self.assertEqual(b"\x01\x03\x01\x8a\xcc", abnf_no_mask.format()) + + def test_frame_buffer(self): + fb = frame_buffer(0, True) + self.assertEqual(fb.recv, 0) + self.assertEqual(fb.skip_utf8_validation, True) + fb.clear + self.assertEqual(fb.header, None) + self.assertEqual(fb.length, None) + self.assertEqual(fb.mask_value, None) + self.assertEqual(fb.has_mask(), False) + + +if __name__ == "__main__": + unittest.main() diff --git a/env/lib/python3.10/site-packages/websocket/tests/test_app.py b/env/lib/python3.10/site-packages/websocket/tests/test_app.py new file mode 100644 index 0000000..18eace5 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/tests/test_app.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +# +import os +import os.path +import ssl +import threading +import unittest + +import websocket as ws + +""" +test_app.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +# Skip test to access the internet unless TEST_WITH_INTERNET == 1 +TEST_WITH_INTERNET = os.environ.get("TEST_WITH_INTERNET", "0") == "1" +# Skip tests relying on local websockets server unless LOCAL_WS_SERVER_PORT != -1 +LOCAL_WS_SERVER_PORT = os.environ.get("LOCAL_WS_SERVER_PORT", "-1") +TEST_WITH_LOCAL_SERVER = LOCAL_WS_SERVER_PORT != "-1" +TRACEABLE = True + + +class WebSocketAppTest(unittest.TestCase): + class NotSetYet: + """A marker class for signalling that a value hasn't been set yet.""" + + def setUp(self): + ws.enableTrace(TRACEABLE) + + WebSocketAppTest.keep_running_open = WebSocketAppTest.NotSetYet() + WebSocketAppTest.keep_running_close = WebSocketAppTest.NotSetYet() + WebSocketAppTest.get_mask_key_id = WebSocketAppTest.NotSetYet() + WebSocketAppTest.on_error_data = WebSocketAppTest.NotSetYet() + + def tearDown(self): + WebSocketAppTest.keep_running_open = WebSocketAppTest.NotSetYet() + WebSocketAppTest.keep_running_close = WebSocketAppTest.NotSetYet() + WebSocketAppTest.get_mask_key_id = WebSocketAppTest.NotSetYet() + WebSocketAppTest.on_error_data = WebSocketAppTest.NotSetYet() + + def close(self): + pass + + @unittest.skipUnless( + TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled" + ) + def test_keep_running(self): + """A WebSocketApp should keep running as long as its self.keep_running + is not False (in the boolean context). + """ + + def on_open(self, *args, **kwargs): + """Set the keep_running flag for later inspection and immediately + close the connection. + """ + self.send("hello!") + WebSocketAppTest.keep_running_open = self.keep_running + self.keep_running = False + + def on_message(_, message): + print(message) + self.close() + + def on_close(self, *args, **kwargs): + """Set the keep_running flag for the test to use.""" + WebSocketAppTest.keep_running_close = self.keep_running + + app = ws.WebSocketApp( + f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}", + on_open=on_open, + on_close=on_close, + on_message=on_message, + ) + app.run_forever() + + # @unittest.skipUnless(TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled") + @unittest.skipUnless(False, "Test disabled for now (requires rel)") + def test_run_forever_dispatcher(self): + """A WebSocketApp should keep running as long as its self.keep_running + is not False (in the boolean context). + """ + + def on_open(self, *args, **kwargs): + """Send a message, receive, and send one more""" + self.send("hello!") + self.recv() + self.send("goodbye!") + + def on_message(_, message): + print(message) + self.close() + + app = ws.WebSocketApp( + f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}", + on_open=on_open, + on_message=on_message, + ) + app.run_forever(dispatcher="Dispatcher") # doesn't work + + # app.run_forever(dispatcher=rel) # would work + # rel.dispatch() + + @unittest.skipUnless( + TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled" + ) + def test_run_forever_teardown_clean_exit(self): + """The WebSocketApp.run_forever() method should return `False` when the application ends gracefully.""" + app = ws.WebSocketApp(f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}") + threading.Timer(interval=0.2, function=app.close).start() + teardown = app.run_forever() + self.assertEqual(teardown, False) + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_sock_mask_key(self): + """A WebSocketApp should forward the received mask_key function down + to the actual socket. + """ + + def my_mask_key_func(): + return "\x00\x00\x00\x00" + + app = ws.WebSocketApp( + "wss://api-pub.bitfinex.com/ws/1", get_mask_key=my_mask_key_func + ) + + # if numpy is installed, this assertion fail + # Note: We can't use 'is' for comparing the functions directly, need to use 'id'. + self.assertEqual(id(app.get_mask_key), id(my_mask_key_func)) + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_invalid_ping_interval_ping_timeout(self): + """Test exception handling if ping_interval < ping_timeout""" + + def on_ping(app, _): + print("Got a ping!") + app.close() + + def on_pong(app, _): + print("Got a pong! No need to respond") + app.close() + + app = ws.WebSocketApp( + "wss://api-pub.bitfinex.com/ws/1", on_ping=on_ping, on_pong=on_pong + ) + self.assertRaises( + ws.WebSocketException, + app.run_forever, + ping_interval=1, + ping_timeout=2, + sslopt={"cert_reqs": ssl.CERT_NONE}, + ) + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_ping_interval(self): + """Test WebSocketApp proper ping functionality""" + + def on_ping(app, _): + print("Got a ping!") + app.close() + + def on_pong(app, _): + print("Got a pong! No need to respond") + app.close() + + app = ws.WebSocketApp( + "wss://api-pub.bitfinex.com/ws/1", on_ping=on_ping, on_pong=on_pong + ) + app.run_forever( + ping_interval=2, ping_timeout=1, sslopt={"cert_reqs": ssl.CERT_NONE} + ) + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_opcode_close(self): + """Test WebSocketApp close opcode""" + + app = ws.WebSocketApp("wss://tsock.us1.twilio.com/v3/wsconnect") + app.run_forever(ping_interval=2, ping_timeout=1, ping_payload="Ping payload") + + # This is commented out because the URL no longer responds in the expected way + # @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + # def testOpcodeBinary(self): + # """ Test WebSocketApp binary opcode + # """ + # app = ws.WebSocketApp('wss://streaming.vn.teslamotors.com/streaming/') + # app.run_forever(ping_interval=2, ping_timeout=1, ping_payload="Ping payload") + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_bad_ping_interval(self): + """A WebSocketApp handling of negative ping_interval""" + app = ws.WebSocketApp("wss://api-pub.bitfinex.com/ws/1") + self.assertRaises( + ws.WebSocketException, + app.run_forever, + ping_interval=-5, + sslopt={"cert_reqs": ssl.CERT_NONE}, + ) + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_bad_ping_timeout(self): + """A WebSocketApp handling of negative ping_timeout""" + app = ws.WebSocketApp("wss://api-pub.bitfinex.com/ws/1") + self.assertRaises( + ws.WebSocketException, + app.run_forever, + ping_timeout=-3, + sslopt={"cert_reqs": ssl.CERT_NONE}, + ) + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_close_status_code(self): + """Test extraction of close frame status code and close reason in WebSocketApp""" + + def on_close(wsapp, close_status_code, close_msg): + print("on_close reached") + + app = ws.WebSocketApp( + "wss://tsock.us1.twilio.com/v3/wsconnect", on_close=on_close + ) + closeframe = ws.ABNF( + opcode=ws.ABNF.OPCODE_CLOSE, data=b"\x03\xe8no-init-from-client" + ) + self.assertEqual([1000, "no-init-from-client"], app._get_close_args(closeframe)) + + closeframe = ws.ABNF(opcode=ws.ABNF.OPCODE_CLOSE, data=b"") + self.assertEqual([None, None], app._get_close_args(closeframe)) + + app2 = ws.WebSocketApp("wss://tsock.us1.twilio.com/v3/wsconnect") + closeframe = ws.ABNF(opcode=ws.ABNF.OPCODE_CLOSE, data=b"") + self.assertEqual([None, None], app2._get_close_args(closeframe)) + + self.assertRaises( + ws.WebSocketConnectionClosedException, + app.send, + data="test if connection is closed", + ) + + @unittest.skipUnless( + TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled" + ) + def test_callback_function_exception(self): + """Test callback function exception handling""" + + exc = None + passed_app = None + + def on_open(app): + raise RuntimeError("Callback failed") + + def on_error(app, err): + nonlocal passed_app + passed_app = app + nonlocal exc + exc = err + + def on_pong(app, _): + app.close() + + app = ws.WebSocketApp( + f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}", + on_open=on_open, + on_error=on_error, + on_pong=on_pong, + ) + app.run_forever(ping_interval=2, ping_timeout=1) + + self.assertEqual(passed_app, app) + self.assertIsInstance(exc, RuntimeError) + self.assertEqual(str(exc), "Callback failed") + + @unittest.skipUnless( + TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled" + ) + def test_callback_method_exception(self): + """Test callback method exception handling""" + + class Callbacks: + def __init__(self): + self.exc = None + self.passed_app = None + self.app = ws.WebSocketApp( + f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}", + on_open=self.on_open, + on_error=self.on_error, + on_pong=self.on_pong, + ) + self.app.run_forever(ping_interval=2, ping_timeout=1) + + def on_open(self, _): + raise RuntimeError("Callback failed") + + def on_error(self, app, err): + self.passed_app = app + self.exc = err + + def on_pong(self, app, _): + app.close() + + callbacks = Callbacks() + + self.assertEqual(callbacks.passed_app, callbacks.app) + self.assertIsInstance(callbacks.exc, RuntimeError) + self.assertEqual(str(callbacks.exc), "Callback failed") + + @unittest.skipUnless( + TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled" + ) + def test_reconnect(self): + """Test reconnect""" + pong_count = 0 + exc = None + + def on_error(_, err): + nonlocal exc + exc = err + + def on_pong(app, _): + nonlocal pong_count + pong_count += 1 + if pong_count == 1: + # First pong, shutdown socket, enforce read error + app.sock.shutdown() + if pong_count >= 2: + # Got second pong after reconnect + app.close() + + app = ws.WebSocketApp( + f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}", on_pong=on_pong, on_error=on_error + ) + app.run_forever(ping_interval=2, ping_timeout=1, reconnect=3) + + self.assertEqual(pong_count, 2) + self.assertIsInstance(exc, ws.WebSocketTimeoutException) + self.assertEqual(str(exc), "ping/pong timed out") + + +if __name__ == "__main__": + unittest.main() diff --git a/env/lib/python3.10/site-packages/websocket/tests/test_cookiejar.py b/env/lib/python3.10/site-packages/websocket/tests/test_cookiejar.py new file mode 100644 index 0000000..67eddb6 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/tests/test_cookiejar.py @@ -0,0 +1,123 @@ +import unittest + +from websocket._cookiejar import SimpleCookieJar + +""" +test_cookiejar.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + + +class CookieJarTest(unittest.TestCase): + def test_add(self): + cookie_jar = SimpleCookieJar() + cookie_jar.add("") + self.assertFalse( + cookie_jar.jar, "Cookie with no domain should not be added to the jar" + ) + + cookie_jar = SimpleCookieJar() + cookie_jar.add("a=b") + self.assertFalse( + cookie_jar.jar, "Cookie with no domain should not be added to the jar" + ) + + cookie_jar = SimpleCookieJar() + cookie_jar.add("a=b; domain=.abc") + self.assertTrue(".abc" in cookie_jar.jar) + + cookie_jar = SimpleCookieJar() + cookie_jar.add("a=b; domain=abc") + self.assertTrue(".abc" in cookie_jar.jar) + self.assertTrue("abc" not in cookie_jar.jar) + + cookie_jar = SimpleCookieJar() + cookie_jar.add("a=b; c=d; domain=abc") + self.assertEqual(cookie_jar.get("abc"), "a=b; c=d") + self.assertEqual(cookie_jar.get(None), "") + + cookie_jar = SimpleCookieJar() + cookie_jar.add("a=b; c=d; domain=abc") + cookie_jar.add("e=f; domain=abc") + self.assertEqual(cookie_jar.get("abc"), "a=b; c=d; e=f") + + cookie_jar = SimpleCookieJar() + cookie_jar.add("a=b; c=d; domain=abc") + cookie_jar.add("e=f; domain=.abc") + self.assertEqual(cookie_jar.get("abc"), "a=b; c=d; e=f") + + cookie_jar = SimpleCookieJar() + cookie_jar.add("a=b; c=d; domain=abc") + cookie_jar.add("e=f; domain=xyz") + self.assertEqual(cookie_jar.get("abc"), "a=b; c=d") + self.assertEqual(cookie_jar.get("xyz"), "e=f") + self.assertEqual(cookie_jar.get("something"), "") + + def test_set(self): + cookie_jar = SimpleCookieJar() + cookie_jar.set("a=b") + self.assertFalse( + cookie_jar.jar, "Cookie with no domain should not be added to the jar" + ) + + cookie_jar = SimpleCookieJar() + cookie_jar.set("a=b; domain=.abc") + self.assertTrue(".abc" in cookie_jar.jar) + + cookie_jar = SimpleCookieJar() + cookie_jar.set("a=b; domain=abc") + self.assertTrue(".abc" in cookie_jar.jar) + self.assertTrue("abc" not in cookie_jar.jar) + + cookie_jar = SimpleCookieJar() + cookie_jar.set("a=b; c=d; domain=abc") + self.assertEqual(cookie_jar.get("abc"), "a=b; c=d") + + cookie_jar = SimpleCookieJar() + cookie_jar.set("a=b; c=d; domain=abc") + cookie_jar.set("e=f; domain=abc") + self.assertEqual(cookie_jar.get("abc"), "e=f") + + cookie_jar = SimpleCookieJar() + cookie_jar.set("a=b; c=d; domain=abc") + cookie_jar.set("e=f; domain=.abc") + self.assertEqual(cookie_jar.get("abc"), "e=f") + + cookie_jar = SimpleCookieJar() + cookie_jar.set("a=b; c=d; domain=abc") + cookie_jar.set("e=f; domain=xyz") + self.assertEqual(cookie_jar.get("abc"), "a=b; c=d") + self.assertEqual(cookie_jar.get("xyz"), "e=f") + self.assertEqual(cookie_jar.get("something"), "") + + def test_get(self): + cookie_jar = SimpleCookieJar() + cookie_jar.set("a=b; c=d; domain=abc.com") + self.assertEqual(cookie_jar.get("abc.com"), "a=b; c=d") + self.assertEqual(cookie_jar.get("x.abc.com"), "a=b; c=d") + self.assertEqual(cookie_jar.get("abc.com.es"), "") + self.assertEqual(cookie_jar.get("xabc.com"), "") + + cookie_jar.set("a=b; c=d; domain=.abc.com") + self.assertEqual(cookie_jar.get("abc.com"), "a=b; c=d") + self.assertEqual(cookie_jar.get("x.abc.com"), "a=b; c=d") + self.assertEqual(cookie_jar.get("abc.com.es"), "") + self.assertEqual(cookie_jar.get("xabc.com"), "") + + +if __name__ == "__main__": + unittest.main() diff --git a/env/lib/python3.10/site-packages/websocket/tests/test_http.py b/env/lib/python3.10/site-packages/websocket/tests/test_http.py new file mode 100644 index 0000000..f495e63 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/tests/test_http.py @@ -0,0 +1,370 @@ +# -*- coding: utf-8 -*- +# +import os +import os.path +import socket +import ssl +import unittest + +import websocket +from websocket._exceptions import WebSocketProxyException, WebSocketException +from websocket._http import ( + _get_addrinfo_list, + _start_proxied_socket, + _tunnel, + connect, + proxy_info, + read_headers, + HAVE_PYTHON_SOCKS, +) + +""" +test_http.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +try: + from python_socks._errors import ProxyConnectionError, ProxyError, ProxyTimeoutError +except: + from websocket._http import ProxyConnectionError, ProxyError, ProxyTimeoutError + +# Skip test to access the internet unless TEST_WITH_INTERNET == 1 +TEST_WITH_INTERNET = os.environ.get("TEST_WITH_INTERNET", "0") == "1" +TEST_WITH_PROXY = os.environ.get("TEST_WITH_PROXY", "0") == "1" +# Skip tests relying on local websockets server unless LOCAL_WS_SERVER_PORT != -1 +LOCAL_WS_SERVER_PORT = os.environ.get("LOCAL_WS_SERVER_PORT", "-1") +TEST_WITH_LOCAL_SERVER = LOCAL_WS_SERVER_PORT != "-1" + + +class SockMock: + def __init__(self): + self.data = [] + self.sent = [] + + def add_packet(self, data): + self.data.append(data) + + def gettimeout(self): + return None + + def recv(self, bufsize): + if self.data: + e = self.data.pop(0) + if isinstance(e, Exception): + raise e + if len(e) > bufsize: + self.data.insert(0, e[bufsize:]) + return e[:bufsize] + + def send(self, data): + self.sent.append(data) + return len(data) + + def close(self): + pass + + +class HeaderSockMock(SockMock): + def __init__(self, fname): + SockMock.__init__(self) + path = os.path.join(os.path.dirname(__file__), fname) + with open(path, "rb") as f: + self.add_packet(f.read()) + + +class OptsList: + def __init__(self): + self.timeout = 1 + self.sockopt = [] + self.sslopt = {"cert_reqs": ssl.CERT_NONE} + + +class HttpTest(unittest.TestCase): + def test_read_header(self): + status, header, _ = read_headers(HeaderSockMock("data/header01.txt")) + self.assertEqual(status, 101) + self.assertEqual(header["connection"], "Upgrade") + # header02.txt is intentionally malformed + self.assertRaises( + WebSocketException, read_headers, HeaderSockMock("data/header02.txt") + ) + + def test_tunnel(self): + self.assertRaises( + WebSocketProxyException, + _tunnel, + HeaderSockMock("data/header01.txt"), + "example.com", + 80, + ("username", "password"), + ) + self.assertRaises( + WebSocketProxyException, + _tunnel, + HeaderSockMock("data/header02.txt"), + "example.com", + 80, + ("username", "password"), + ) + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_connect(self): + # Not currently testing an actual proxy connection, so just check whether proxy errors are raised. This requires internet for a DNS lookup + if HAVE_PYTHON_SOCKS: + # Need this check, otherwise case where python_socks is not installed triggers + # websocket._exceptions.WebSocketException: Python Socks is needed for SOCKS proxying but is not available + self.assertRaises( + (ProxyTimeoutError, OSError), + _start_proxied_socket, + "wss://example.com", + OptsList(), + proxy_info( + http_proxy_host="example.com", + http_proxy_port="8080", + proxy_type="socks4", + http_proxy_timeout=1, + ), + ) + self.assertRaises( + (ProxyTimeoutError, OSError), + _start_proxied_socket, + "wss://example.com", + OptsList(), + proxy_info( + http_proxy_host="example.com", + http_proxy_port="8080", + proxy_type="socks4a", + http_proxy_timeout=1, + ), + ) + self.assertRaises( + (ProxyTimeoutError, OSError), + _start_proxied_socket, + "wss://example.com", + OptsList(), + proxy_info( + http_proxy_host="example.com", + http_proxy_port="8080", + proxy_type="socks5", + http_proxy_timeout=1, + ), + ) + self.assertRaises( + (ProxyTimeoutError, OSError), + _start_proxied_socket, + "wss://example.com", + OptsList(), + proxy_info( + http_proxy_host="example.com", + http_proxy_port="8080", + proxy_type="socks5h", + http_proxy_timeout=1, + ), + ) + self.assertRaises( + ProxyConnectionError, + connect, + "wss://example.com", + OptsList(), + proxy_info( + http_proxy_host="127.0.0.1", + http_proxy_port=9999, + proxy_type="socks4", + http_proxy_timeout=1, + ), + None, + ) + + self.assertRaises( + TypeError, + _get_addrinfo_list, + None, + 80, + True, + proxy_info( + http_proxy_host="127.0.0.1", http_proxy_port="9999", proxy_type="http" + ), + ) + self.assertRaises( + TypeError, + _get_addrinfo_list, + None, + 80, + True, + proxy_info( + http_proxy_host="127.0.0.1", http_proxy_port="9999", proxy_type="http" + ), + ) + self.assertRaises( + socket.timeout, + connect, + "wss://google.com", + OptsList(), + proxy_info( + http_proxy_host="8.8.8.8", + http_proxy_port=9999, + proxy_type="http", + http_proxy_timeout=1, + ), + None, + ) + self.assertEqual( + connect( + "wss://google.com", + OptsList(), + proxy_info( + http_proxy_host="8.8.8.8", http_proxy_port=8080, proxy_type="http" + ), + True, + ), + (True, ("google.com", 443, "/")), + ) + # The following test fails on Mac OS with a gaierror, not an OverflowError + # self.assertRaises(OverflowError, connect, "wss://example.com", OptsList(), proxy_info(http_proxy_host="127.0.0.1", http_proxy_port=99999, proxy_type="socks4", timeout=2), False) + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + @unittest.skipUnless( + TEST_WITH_PROXY, "This test requires a HTTP proxy to be running on port 8899" + ) + @unittest.skipUnless( + TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled" + ) + def test_proxy_connect(self): + ws = websocket.WebSocket() + ws.connect( + f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}", + http_proxy_host="127.0.0.1", + http_proxy_port="8899", + proxy_type="http", + ) + ws.send("Hello, Server") + server_response = ws.recv() + self.assertEqual(server_response, "Hello, Server") + # self.assertEqual(_start_proxied_socket("wss://api.bitfinex.com/ws/2", OptsList(), proxy_info(http_proxy_host="127.0.0.1", http_proxy_port="8899", proxy_type="http"))[1], ("api.bitfinex.com", 443, '/ws/2')) + self.assertEqual( + _get_addrinfo_list( + "api.bitfinex.com", + 443, + True, + proxy_info( + http_proxy_host="127.0.0.1", + http_proxy_port="8899", + proxy_type="http", + ), + ), + ( + socket.getaddrinfo( + "127.0.0.1", 8899, 0, socket.SOCK_STREAM, socket.SOL_TCP + ), + True, + None, + ), + ) + self.assertEqual( + connect( + "wss://api.bitfinex.com/ws/2", + OptsList(), + proxy_info( + http_proxy_host="127.0.0.1", http_proxy_port=8899, proxy_type="http" + ), + None, + )[1], + ("api.bitfinex.com", 443, "/ws/2"), + ) + # TODO: Test SOCKS4 and SOCK5 proxies with unit tests + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_sslopt(self): + ssloptions = { + "check_hostname": False, + "server_hostname": "ServerName", + "ssl_version": ssl.PROTOCOL_TLS_CLIENT, + "ciphers": "TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:\ + TLS_AES_128_GCM_SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:\ + ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:\ + ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:\ + DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:\ + ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES128-GCM-SHA256:\ + ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:\ + DHE-RSA-AES256-SHA256:ECDHE-ECDSA-AES128-SHA256:\ + ECDHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA256:\ + ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA", + "ecdh_curve": "prime256v1", + } + ws_ssl1 = websocket.WebSocket(sslopt=ssloptions) + ws_ssl1.connect("wss://api.bitfinex.com/ws/2") + ws_ssl1.send("Hello") + ws_ssl1.close() + + ws_ssl2 = websocket.WebSocket(sslopt={"check_hostname": True}) + ws_ssl2.connect("wss://api.bitfinex.com/ws/2") + ws_ssl2.close + + def test_proxy_info(self): + self.assertEqual( + proxy_info( + http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http" + ).proxy_protocol, + "http", + ) + self.assertRaises( + ProxyError, + proxy_info, + http_proxy_host="127.0.0.1", + http_proxy_port="8080", + proxy_type="badval", + ) + self.assertEqual( + proxy_info( + http_proxy_host="example.com", http_proxy_port="8080", proxy_type="http" + ).proxy_host, + "example.com", + ) + self.assertEqual( + proxy_info( + http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http" + ).proxy_port, + "8080", + ) + self.assertEqual( + proxy_info( + http_proxy_host="127.0.0.1", http_proxy_port="8080", proxy_type="http" + ).auth, + None, + ) + self.assertEqual( + proxy_info( + http_proxy_host="127.0.0.1", + http_proxy_port="8080", + proxy_type="http", + http_proxy_auth=("my_username123", "my_pass321"), + ).auth[0], + "my_username123", + ) + self.assertEqual( + proxy_info( + http_proxy_host="127.0.0.1", + http_proxy_port="8080", + proxy_type="http", + http_proxy_auth=("my_username123", "my_pass321"), + ).auth[1], + "my_pass321", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/env/lib/python3.10/site-packages/websocket/tests/test_url.py b/env/lib/python3.10/site-packages/websocket/tests/test_url.py new file mode 100644 index 0000000..110fdfa --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/tests/test_url.py @@ -0,0 +1,464 @@ +# -*- coding: utf-8 -*- +# +import os +import unittest + +from websocket._url import ( + _is_address_in_network, + _is_no_proxy_host, + get_proxy_info, + parse_url, +) +from websocket._exceptions import WebSocketProxyException + +""" +test_url.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + + +class UrlTest(unittest.TestCase): + def test_address_in_network(self): + self.assertTrue(_is_address_in_network("127.0.0.1", "127.0.0.0/8")) + self.assertTrue(_is_address_in_network("127.1.0.1", "127.0.0.0/8")) + self.assertFalse(_is_address_in_network("127.1.0.1", "127.0.0.0/24")) + + def test_parse_url(self): + p = parse_url("ws://www.example.com/r") + self.assertEqual(p[0], "www.example.com") + self.assertEqual(p[1], 80) + self.assertEqual(p[2], "/r") + self.assertEqual(p[3], False) + + p = parse_url("ws://www.example.com/r/") + self.assertEqual(p[0], "www.example.com") + self.assertEqual(p[1], 80) + self.assertEqual(p[2], "/r/") + self.assertEqual(p[3], False) + + p = parse_url("ws://www.example.com/") + self.assertEqual(p[0], "www.example.com") + self.assertEqual(p[1], 80) + self.assertEqual(p[2], "/") + self.assertEqual(p[3], False) + + p = parse_url("ws://www.example.com") + self.assertEqual(p[0], "www.example.com") + self.assertEqual(p[1], 80) + self.assertEqual(p[2], "/") + self.assertEqual(p[3], False) + + p = parse_url("ws://www.example.com:8080/r") + self.assertEqual(p[0], "www.example.com") + self.assertEqual(p[1], 8080) + self.assertEqual(p[2], "/r") + self.assertEqual(p[3], False) + + p = parse_url("ws://www.example.com:8080/") + self.assertEqual(p[0], "www.example.com") + self.assertEqual(p[1], 8080) + self.assertEqual(p[2], "/") + self.assertEqual(p[3], False) + + p = parse_url("ws://www.example.com:8080") + self.assertEqual(p[0], "www.example.com") + self.assertEqual(p[1], 8080) + self.assertEqual(p[2], "/") + self.assertEqual(p[3], False) + + p = parse_url("wss://www.example.com:8080/r") + self.assertEqual(p[0], "www.example.com") + self.assertEqual(p[1], 8080) + self.assertEqual(p[2], "/r") + self.assertEqual(p[3], True) + + p = parse_url("wss://www.example.com:8080/r?key=value") + self.assertEqual(p[0], "www.example.com") + self.assertEqual(p[1], 8080) + self.assertEqual(p[2], "/r?key=value") + self.assertEqual(p[3], True) + + self.assertRaises(ValueError, parse_url, "http://www.example.com/r") + + p = parse_url("ws://[2a03:4000:123:83::3]/r") + self.assertEqual(p[0], "2a03:4000:123:83::3") + self.assertEqual(p[1], 80) + self.assertEqual(p[2], "/r") + self.assertEqual(p[3], False) + + p = parse_url("ws://[2a03:4000:123:83::3]:8080/r") + self.assertEqual(p[0], "2a03:4000:123:83::3") + self.assertEqual(p[1], 8080) + self.assertEqual(p[2], "/r") + self.assertEqual(p[3], False) + + p = parse_url("wss://[2a03:4000:123:83::3]/r") + self.assertEqual(p[0], "2a03:4000:123:83::3") + self.assertEqual(p[1], 443) + self.assertEqual(p[2], "/r") + self.assertEqual(p[3], True) + + p = parse_url("wss://[2a03:4000:123:83::3]:8080/r") + self.assertEqual(p[0], "2a03:4000:123:83::3") + self.assertEqual(p[1], 8080) + self.assertEqual(p[2], "/r") + self.assertEqual(p[3], True) + + +class IsNoProxyHostTest(unittest.TestCase): + def setUp(self): + self.no_proxy = os.environ.get("no_proxy", None) + if "no_proxy" in os.environ: + del os.environ["no_proxy"] + + def tearDown(self): + if self.no_proxy: + os.environ["no_proxy"] = self.no_proxy + elif "no_proxy" in os.environ: + del os.environ["no_proxy"] + + def test_match_all(self): + self.assertTrue(_is_no_proxy_host("any.websocket.org", ["*"])) + self.assertTrue(_is_no_proxy_host("192.168.0.1", ["*"])) + self.assertFalse(_is_no_proxy_host("192.168.0.1", ["192.168.1.1"])) + self.assertFalse( + _is_no_proxy_host("any.websocket.org", ["other.websocket.org"]) + ) + self.assertTrue( + _is_no_proxy_host("any.websocket.org", ["other.websocket.org", "*"]) + ) + os.environ["no_proxy"] = "*" + self.assertTrue(_is_no_proxy_host("any.websocket.org", None)) + self.assertTrue(_is_no_proxy_host("192.168.0.1", None)) + os.environ["no_proxy"] = "other.websocket.org, *" + self.assertTrue(_is_no_proxy_host("any.websocket.org", None)) + + def test_ip_address(self): + self.assertTrue(_is_no_proxy_host("127.0.0.1", ["127.0.0.1"])) + self.assertFalse(_is_no_proxy_host("127.0.0.2", ["127.0.0.1"])) + self.assertTrue( + _is_no_proxy_host("127.0.0.1", ["other.websocket.org", "127.0.0.1"]) + ) + self.assertFalse( + _is_no_proxy_host("127.0.0.2", ["other.websocket.org", "127.0.0.1"]) + ) + os.environ["no_proxy"] = "127.0.0.1" + self.assertTrue(_is_no_proxy_host("127.0.0.1", None)) + self.assertFalse(_is_no_proxy_host("127.0.0.2", None)) + os.environ["no_proxy"] = "other.websocket.org, 127.0.0.1" + self.assertTrue(_is_no_proxy_host("127.0.0.1", None)) + self.assertFalse(_is_no_proxy_host("127.0.0.2", None)) + + def test_ip_address_in_range(self): + self.assertTrue(_is_no_proxy_host("127.0.0.1", ["127.0.0.0/8"])) + self.assertTrue(_is_no_proxy_host("127.0.0.2", ["127.0.0.0/8"])) + self.assertFalse(_is_no_proxy_host("127.1.0.1", ["127.0.0.0/24"])) + os.environ["no_proxy"] = "127.0.0.0/8" + self.assertTrue(_is_no_proxy_host("127.0.0.1", None)) + self.assertTrue(_is_no_proxy_host("127.0.0.2", None)) + os.environ["no_proxy"] = "127.0.0.0/24" + self.assertFalse(_is_no_proxy_host("127.1.0.1", None)) + + def test_hostname_match(self): + self.assertTrue(_is_no_proxy_host("my.websocket.org", ["my.websocket.org"])) + self.assertTrue( + _is_no_proxy_host( + "my.websocket.org", ["other.websocket.org", "my.websocket.org"] + ) + ) + self.assertFalse(_is_no_proxy_host("my.websocket.org", ["other.websocket.org"])) + os.environ["no_proxy"] = "my.websocket.org" + self.assertTrue(_is_no_proxy_host("my.websocket.org", None)) + self.assertFalse(_is_no_proxy_host("other.websocket.org", None)) + os.environ["no_proxy"] = "other.websocket.org, my.websocket.org" + self.assertTrue(_is_no_proxy_host("my.websocket.org", None)) + + def test_hostname_match_domain(self): + self.assertTrue(_is_no_proxy_host("any.websocket.org", [".websocket.org"])) + self.assertTrue(_is_no_proxy_host("my.other.websocket.org", [".websocket.org"])) + self.assertTrue( + _is_no_proxy_host( + "any.websocket.org", ["my.websocket.org", ".websocket.org"] + ) + ) + self.assertFalse(_is_no_proxy_host("any.websocket.com", [".websocket.org"])) + os.environ["no_proxy"] = ".websocket.org" + self.assertTrue(_is_no_proxy_host("any.websocket.org", None)) + self.assertTrue(_is_no_proxy_host("my.other.websocket.org", None)) + self.assertFalse(_is_no_proxy_host("any.websocket.com", None)) + os.environ["no_proxy"] = "my.websocket.org, .websocket.org" + self.assertTrue(_is_no_proxy_host("any.websocket.org", None)) + + +class ProxyInfoTest(unittest.TestCase): + def setUp(self): + self.http_proxy = os.environ.get("http_proxy", None) + self.https_proxy = os.environ.get("https_proxy", None) + self.no_proxy = os.environ.get("no_proxy", None) + if "http_proxy" in os.environ: + del os.environ["http_proxy"] + if "https_proxy" in os.environ: + del os.environ["https_proxy"] + if "no_proxy" in os.environ: + del os.environ["no_proxy"] + + def tearDown(self): + if self.http_proxy: + os.environ["http_proxy"] = self.http_proxy + elif "http_proxy" in os.environ: + del os.environ["http_proxy"] + + if self.https_proxy: + os.environ["https_proxy"] = self.https_proxy + elif "https_proxy" in os.environ: + del os.environ["https_proxy"] + + if self.no_proxy: + os.environ["no_proxy"] = self.no_proxy + elif "no_proxy" in os.environ: + del os.environ["no_proxy"] + + def test_proxy_from_args(self): + self.assertRaises( + WebSocketProxyException, + get_proxy_info, + "echo.websocket.events", + False, + proxy_host="localhost", + ) + self.assertEqual( + get_proxy_info( + "echo.websocket.events", False, proxy_host="localhost", proxy_port=3128 + ), + ("localhost", 3128, None), + ) + self.assertEqual( + get_proxy_info( + "echo.websocket.events", True, proxy_host="localhost", proxy_port=3128 + ), + ("localhost", 3128, None), + ) + + self.assertEqual( + get_proxy_info( + "echo.websocket.events", + False, + proxy_host="localhost", + proxy_port=9001, + proxy_auth=("a", "b"), + ), + ("localhost", 9001, ("a", "b")), + ) + self.assertEqual( + get_proxy_info( + "echo.websocket.events", + False, + proxy_host="localhost", + proxy_port=3128, + proxy_auth=("a", "b"), + ), + ("localhost", 3128, ("a", "b")), + ) + self.assertEqual( + get_proxy_info( + "echo.websocket.events", + True, + proxy_host="localhost", + proxy_port=8765, + proxy_auth=("a", "b"), + ), + ("localhost", 8765, ("a", "b")), + ) + self.assertEqual( + get_proxy_info( + "echo.websocket.events", + True, + proxy_host="localhost", + proxy_port=3128, + proxy_auth=("a", "b"), + ), + ("localhost", 3128, ("a", "b")), + ) + + self.assertEqual( + get_proxy_info( + "echo.websocket.events", + True, + proxy_host="localhost", + proxy_port=3128, + no_proxy=["example.com"], + proxy_auth=("a", "b"), + ), + ("localhost", 3128, ("a", "b")), + ) + self.assertEqual( + get_proxy_info( + "echo.websocket.events", + True, + proxy_host="localhost", + proxy_port=3128, + no_proxy=["echo.websocket.events"], + proxy_auth=("a", "b"), + ), + (None, 0, None), + ) + + self.assertEqual( + get_proxy_info( + "echo.websocket.events", + True, + proxy_host="localhost", + proxy_port=3128, + no_proxy=[".websocket.events"], + ), + (None, 0, None), + ) + + def test_proxy_from_env(self): + os.environ["http_proxy"] = "http://localhost/" + self.assertEqual( + get_proxy_info("echo.websocket.events", False), ("localhost", None, None) + ) + os.environ["http_proxy"] = "http://localhost:3128/" + self.assertEqual( + get_proxy_info("echo.websocket.events", False), ("localhost", 3128, None) + ) + + os.environ["http_proxy"] = "http://localhost/" + os.environ["https_proxy"] = "http://localhost2/" + self.assertEqual( + get_proxy_info("echo.websocket.events", False), ("localhost", None, None) + ) + os.environ["http_proxy"] = "http://localhost:3128/" + os.environ["https_proxy"] = "http://localhost2:3128/" + self.assertEqual( + get_proxy_info("echo.websocket.events", False), ("localhost", 3128, None) + ) + + os.environ["http_proxy"] = "http://localhost/" + os.environ["https_proxy"] = "http://localhost2/" + self.assertEqual( + get_proxy_info("echo.websocket.events", True), ("localhost2", None, None) + ) + os.environ["http_proxy"] = "http://localhost:3128/" + os.environ["https_proxy"] = "http://localhost2:3128/" + self.assertEqual( + get_proxy_info("echo.websocket.events", True), ("localhost2", 3128, None) + ) + + os.environ["http_proxy"] = "" + os.environ["https_proxy"] = "http://localhost2/" + self.assertEqual( + get_proxy_info("echo.websocket.events", True), ("localhost2", None, None) + ) + self.assertEqual( + get_proxy_info("echo.websocket.events", False), (None, 0, None) + ) + os.environ["http_proxy"] = "" + os.environ["https_proxy"] = "http://localhost2:3128/" + self.assertEqual( + get_proxy_info("echo.websocket.events", True), ("localhost2", 3128, None) + ) + self.assertEqual( + get_proxy_info("echo.websocket.events", False), (None, 0, None) + ) + + os.environ["http_proxy"] = "http://localhost/" + os.environ["https_proxy"] = "" + self.assertEqual(get_proxy_info("echo.websocket.events", True), (None, 0, None)) + self.assertEqual( + get_proxy_info("echo.websocket.events", False), ("localhost", None, None) + ) + os.environ["http_proxy"] = "http://localhost:3128/" + os.environ["https_proxy"] = "" + self.assertEqual(get_proxy_info("echo.websocket.events", True), (None, 0, None)) + self.assertEqual( + get_proxy_info("echo.websocket.events", False), ("localhost", 3128, None) + ) + + os.environ["http_proxy"] = "http://a:b@localhost/" + self.assertEqual( + get_proxy_info("echo.websocket.events", False), + ("localhost", None, ("a", "b")), + ) + os.environ["http_proxy"] = "http://a:b@localhost:3128/" + self.assertEqual( + get_proxy_info("echo.websocket.events", False), + ("localhost", 3128, ("a", "b")), + ) + + os.environ["http_proxy"] = "http://a:b@localhost/" + os.environ["https_proxy"] = "http://a:b@localhost2/" + self.assertEqual( + get_proxy_info("echo.websocket.events", False), + ("localhost", None, ("a", "b")), + ) + os.environ["http_proxy"] = "http://a:b@localhost:3128/" + os.environ["https_proxy"] = "http://a:b@localhost2:3128/" + self.assertEqual( + get_proxy_info("echo.websocket.events", False), + ("localhost", 3128, ("a", "b")), + ) + + os.environ["http_proxy"] = "http://a:b@localhost/" + os.environ["https_proxy"] = "http://a:b@localhost2/" + self.assertEqual( + get_proxy_info("echo.websocket.events", True), + ("localhost2", None, ("a", "b")), + ) + os.environ["http_proxy"] = "http://a:b@localhost:3128/" + os.environ["https_proxy"] = "http://a:b@localhost2:3128/" + self.assertEqual( + get_proxy_info("echo.websocket.events", True), + ("localhost2", 3128, ("a", "b")), + ) + + os.environ[ + "http_proxy" + ] = "http://john%40example.com:P%40SSWORD@localhost:3128/" + os.environ[ + "https_proxy" + ] = "http://john%40example.com:P%40SSWORD@localhost2:3128/" + self.assertEqual( + get_proxy_info("echo.websocket.events", True), + ("localhost2", 3128, ("john@example.com", "P@SSWORD")), + ) + + os.environ["http_proxy"] = "http://a:b@localhost/" + os.environ["https_proxy"] = "http://a:b@localhost2/" + os.environ["no_proxy"] = "example1.com,example2.com" + self.assertEqual( + get_proxy_info("example.1.com", True), ("localhost2", None, ("a", "b")) + ) + os.environ["http_proxy"] = "http://a:b@localhost:3128/" + os.environ["https_proxy"] = "http://a:b@localhost2:3128/" + os.environ["no_proxy"] = "example1.com,example2.com, echo.websocket.events" + self.assertEqual(get_proxy_info("echo.websocket.events", True), (None, 0, None)) + os.environ["http_proxy"] = "http://a:b@localhost:3128/" + os.environ["https_proxy"] = "http://a:b@localhost2:3128/" + os.environ["no_proxy"] = "example1.com,example2.com, .websocket.events" + self.assertEqual(get_proxy_info("echo.websocket.events", True), (None, 0, None)) + + os.environ["http_proxy"] = "http://a:b@localhost:3128/" + os.environ["https_proxy"] = "http://a:b@localhost2:3128/" + os.environ["no_proxy"] = "127.0.0.0/8, 192.168.0.0/16" + self.assertEqual(get_proxy_info("127.0.0.1", False), (None, 0, None)) + self.assertEqual(get_proxy_info("192.168.1.1", False), (None, 0, None)) + + +if __name__ == "__main__": + unittest.main() diff --git a/env/lib/python3.10/site-packages/websocket/tests/test_websocket.py b/env/lib/python3.10/site-packages/websocket/tests/test_websocket.py new file mode 100644 index 0000000..a1d7ad5 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket/tests/test_websocket.py @@ -0,0 +1,497 @@ +# -*- coding: utf-8 -*- +# +import os +import os.path +import socket +import unittest +from base64 import decodebytes as base64decode + +import websocket as ws +from websocket._exceptions import WebSocketBadStatusException, WebSocketAddressException +from websocket._handshake import _create_sec_websocket_key +from websocket._handshake import _validate as _validate_header +from websocket._http import read_headers +from websocket._utils import validate_utf8 + +""" +test_websocket.py +websocket - WebSocket client library for Python + +Copyright 2024 engn33r + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +try: + import ssl +except ImportError: + # dummy class of SSLError for ssl none-support environment. + class SSLError(Exception): + pass + + +# Skip test to access the internet unless TEST_WITH_INTERNET == 1 +TEST_WITH_INTERNET = os.environ.get("TEST_WITH_INTERNET", "0") == "1" +# Skip tests relying on local websockets server unless LOCAL_WS_SERVER_PORT != -1 +LOCAL_WS_SERVER_PORT = os.environ.get("LOCAL_WS_SERVER_PORT", "-1") +TEST_WITH_LOCAL_SERVER = LOCAL_WS_SERVER_PORT != "-1" +TRACEABLE = True + + +def create_mask_key(_): + return "abcd" + + +class SockMock: + def __init__(self): + self.data = [] + self.sent = [] + + def add_packet(self, data): + self.data.append(data) + + def gettimeout(self): + return None + + def recv(self, bufsize): + if self.data: + e = self.data.pop(0) + if isinstance(e, Exception): + raise e + if len(e) > bufsize: + self.data.insert(0, e[bufsize:]) + return e[:bufsize] + + def send(self, data): + self.sent.append(data) + return len(data) + + def close(self): + pass + + +class HeaderSockMock(SockMock): + def __init__(self, fname): + SockMock.__init__(self) + path = os.path.join(os.path.dirname(__file__), fname) + with open(path, "rb") as f: + self.add_packet(f.read()) + + +class WebSocketTest(unittest.TestCase): + def setUp(self): + ws.enableTrace(TRACEABLE) + + def tearDown(self): + pass + + def test_default_timeout(self): + self.assertEqual(ws.getdefaulttimeout(), None) + ws.setdefaulttimeout(10) + self.assertEqual(ws.getdefaulttimeout(), 10) + ws.setdefaulttimeout(None) + + def test_ws_key(self): + key = _create_sec_websocket_key() + self.assertTrue(key != 24) + self.assertTrue("¥n" not in key) + + def test_nonce(self): + """WebSocket key should be a random 16-byte nonce.""" + key = _create_sec_websocket_key() + nonce = base64decode(key.encode("utf-8")) + self.assertEqual(16, len(nonce)) + + def test_ws_utils(self): + key = "c6b8hTg4EeGb2gQMztV1/g==" + required_header = { + "upgrade": "websocket", + "connection": "upgrade", + "sec-websocket-accept": "Kxep+hNu9n51529fGidYu7a3wO0=", + } + self.assertEqual(_validate_header(required_header, key, None), (True, None)) + + header = required_header.copy() + header["upgrade"] = "http" + self.assertEqual(_validate_header(header, key, None), (False, None)) + del header["upgrade"] + self.assertEqual(_validate_header(header, key, None), (False, None)) + + header = required_header.copy() + header["connection"] = "something" + self.assertEqual(_validate_header(header, key, None), (False, None)) + del header["connection"] + self.assertEqual(_validate_header(header, key, None), (False, None)) + + header = required_header.copy() + header["sec-websocket-accept"] = "something" + self.assertEqual(_validate_header(header, key, None), (False, None)) + del header["sec-websocket-accept"] + self.assertEqual(_validate_header(header, key, None), (False, None)) + + header = required_header.copy() + header["sec-websocket-protocol"] = "sub1" + self.assertEqual( + _validate_header(header, key, ["sub1", "sub2"]), (True, "sub1") + ) + # This case will print out a logging error using the error() function, but that is expected + self.assertEqual(_validate_header(header, key, ["sub2", "sub3"]), (False, None)) + + header = required_header.copy() + header["sec-websocket-protocol"] = "sUb1" + self.assertEqual( + _validate_header(header, key, ["Sub1", "suB2"]), (True, "sub1") + ) + + header = required_header.copy() + # This case will print out a logging error using the error() function, but that is expected + self.assertEqual(_validate_header(header, key, ["Sub1", "suB2"]), (False, None)) + + def test_read_header(self): + status, header, _ = read_headers(HeaderSockMock("data/header01.txt")) + self.assertEqual(status, 101) + self.assertEqual(header["connection"], "Upgrade") + + status, header, _ = read_headers(HeaderSockMock("data/header03.txt")) + self.assertEqual(status, 101) + self.assertEqual(header["connection"], "Upgrade, Keep-Alive") + + HeaderSockMock("data/header02.txt") + self.assertRaises( + ws.WebSocketException, read_headers, HeaderSockMock("data/header02.txt") + ) + + def test_send(self): + # TODO: add longer frame data + sock = ws.WebSocket() + sock.set_mask_key(create_mask_key) + s = sock.sock = HeaderSockMock("data/header01.txt") + sock.send("Hello") + self.assertEqual(s.sent[0], b"\x81\x85abcd)\x07\x0f\x08\x0e") + + sock.send("こんにちは") + self.assertEqual( + s.sent[1], + b"\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc", + ) + + # sock.send("x" * 5000) + # self.assertEqual(s.sent[1], b'\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc") + + self.assertEqual(sock.send_binary(b"1111111111101"), 19) + + def test_recv(self): + # TODO: add longer frame data + sock = ws.WebSocket() + s = sock.sock = SockMock() + something = ( + b"\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc" + ) + s.add_packet(something) + data = sock.recv() + self.assertEqual(data, "こんにちは") + + s.add_packet(b"\x81\x85abcd)\x07\x0f\x08\x0e") + data = sock.recv() + self.assertEqual(data, "Hello") + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_iter(self): + count = 2 + s = ws.create_connection("wss://api.bitfinex.com/ws/2") + s.send('{"event": "subscribe", "channel": "ticker"}') + for _ in s: + count -= 1 + if count == 0: + break + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_next(self): + sock = ws.create_connection("wss://api.bitfinex.com/ws/2") + self.assertEqual(str, type(next(sock))) + + def test_internal_recv_strict(self): + sock = ws.WebSocket() + s = sock.sock = SockMock() + s.add_packet(b"foo") + s.add_packet(socket.timeout()) + s.add_packet(b"bar") + # s.add_packet(SSLError("The read operation timed out")) + s.add_packet(b"baz") + with self.assertRaises(ws.WebSocketTimeoutException): + sock.frame_buffer.recv_strict(9) + # with self.assertRaises(SSLError): + # data = sock._recv_strict(9) + data = sock.frame_buffer.recv_strict(9) + self.assertEqual(data, b"foobarbaz") + with self.assertRaises(ws.WebSocketConnectionClosedException): + sock.frame_buffer.recv_strict(1) + + def test_recv_timeout(self): + sock = ws.WebSocket() + s = sock.sock = SockMock() + s.add_packet(b"\x81") + s.add_packet(socket.timeout()) + s.add_packet(b"\x8dabcd\x29\x07\x0f\x08\x0e") + s.add_packet(socket.timeout()) + s.add_packet(b"\x4e\x43\x33\x0e\x10\x0f\x00\x40") + with self.assertRaises(ws.WebSocketTimeoutException): + sock.recv() + with self.assertRaises(ws.WebSocketTimeoutException): + sock.recv() + data = sock.recv() + self.assertEqual(data, "Hello, World!") + with self.assertRaises(ws.WebSocketConnectionClosedException): + sock.recv() + + def test_recv_with_simple_fragmentation(self): + sock = ws.WebSocket() + s = sock.sock = SockMock() + # OPCODE=TEXT, FIN=0, MSG="Brevity is " + s.add_packet(b"\x01\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C") + # OPCODE=CONT, FIN=1, MSG="the soul of wit" + s.add_packet(b"\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17") + data = sock.recv() + self.assertEqual(data, "Brevity is the soul of wit") + with self.assertRaises(ws.WebSocketConnectionClosedException): + sock.recv() + + def test_recv_with_fire_event_of_fragmentation(self): + sock = ws.WebSocket(fire_cont_frame=True) + s = sock.sock = SockMock() + # OPCODE=TEXT, FIN=0, MSG="Brevity is " + s.add_packet(b"\x01\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C") + # OPCODE=CONT, FIN=0, MSG="Brevity is " + s.add_packet(b"\x00\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C") + # OPCODE=CONT, FIN=1, MSG="the soul of wit" + s.add_packet(b"\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17") + + _, data = sock.recv_data() + self.assertEqual(data, b"Brevity is ") + _, data = sock.recv_data() + self.assertEqual(data, b"Brevity is ") + _, data = sock.recv_data() + self.assertEqual(data, b"the soul of wit") + + # OPCODE=CONT, FIN=0, MSG="Brevity is " + s.add_packet(b"\x80\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C") + + with self.assertRaises(ws.WebSocketException): + sock.recv_data() + + with self.assertRaises(ws.WebSocketConnectionClosedException): + sock.recv() + + def test_close(self): + sock = ws.WebSocket() + sock.connected = True + sock.close + + sock = ws.WebSocket() + s = sock.sock = SockMock() + sock.connected = True + s.add_packet(b"\x88\x80\x17\x98p\x84") + sock.recv() + self.assertEqual(sock.connected, False) + + def test_recv_cont_fragmentation(self): + sock = ws.WebSocket() + s = sock.sock = SockMock() + # OPCODE=CONT, FIN=1, MSG="the soul of wit" + s.add_packet(b"\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17") + self.assertRaises(ws.WebSocketException, sock.recv) + + def test_recv_with_prolonged_fragmentation(self): + sock = ws.WebSocket() + s = sock.sock = SockMock() + # OPCODE=TEXT, FIN=0, MSG="Once more unto the breach, " + s.add_packet( + b"\x01\x9babcd.\x0c\x00\x01A\x0f\x0c\x16\x04B\x16\n\x15\rC\x10\t\x07C\x06\x13\x07\x02\x07\tNC" + ) + # OPCODE=CONT, FIN=0, MSG="dear friends, " + s.add_packet(b"\x00\x8eabcd\x05\x07\x02\x16A\x04\x11\r\x04\x0c\x07\x17MB") + # OPCODE=CONT, FIN=1, MSG="once more" + s.add_packet(b"\x80\x89abcd\x0e\x0c\x00\x01A\x0f\x0c\x16\x04") + data = sock.recv() + self.assertEqual(data, "Once more unto the breach, dear friends, once more") + with self.assertRaises(ws.WebSocketConnectionClosedException): + sock.recv() + + def test_recv_with_fragmentation_and_control_frame(self): + sock = ws.WebSocket() + sock.set_mask_key(create_mask_key) + s = sock.sock = SockMock() + # OPCODE=TEXT, FIN=0, MSG="Too much " + s.add_packet(b"\x01\x89abcd5\r\x0cD\x0c\x17\x00\x0cA") + # OPCODE=PING, FIN=1, MSG="Please PONG this" + s.add_packet(b"\x89\x90abcd1\x0e\x06\x05\x12\x07C4.,$D\x15\n\n\x17") + # OPCODE=CONT, FIN=1, MSG="of a good thing" + s.add_packet(b"\x80\x8fabcd\x0e\x04C\x05A\x05\x0c\x0b\x05B\x17\x0c\x08\x0c\x04") + data = sock.recv() + self.assertEqual(data, "Too much of a good thing") + with self.assertRaises(ws.WebSocketConnectionClosedException): + sock.recv() + self.assertEqual( + s.sent[0], b"\x8a\x90abcd1\x0e\x06\x05\x12\x07C4.,$D\x15\n\n\x17" + ) + + @unittest.skipUnless( + TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled" + ) + def test_websocket(self): + s = ws.create_connection(f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}") + self.assertNotEqual(s, None) + s.send("Hello, World") + result = s.next() + s.fileno() + self.assertEqual(result, "Hello, World") + + s.send("こにゃにゃちは、世界") + result = s.recv() + self.assertEqual(result, "こにゃにゃちは、世界") + self.assertRaises(ValueError, s.send_close, -1, "") + s.close() + + @unittest.skipUnless( + TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled" + ) + def test_ping_pong(self): + s = ws.create_connection(f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}") + self.assertNotEqual(s, None) + s.ping("Hello") + s.pong("Hi") + s.close() + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_support_redirect(self): + s = ws.WebSocket() + self.assertRaises(WebSocketBadStatusException, s.connect, "ws://google.com/") + # Need to find a URL that has a redirect code leading to a websocket + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_secure_websocket(self): + s = ws.create_connection("wss://api.bitfinex.com/ws/2") + self.assertNotEqual(s, None) + self.assertTrue(isinstance(s.sock, ssl.SSLSocket)) + self.assertEqual(s.getstatus(), 101) + self.assertNotEqual(s.getheaders(), None) + s.settimeout(10) + self.assertEqual(s.gettimeout(), 10) + self.assertEqual(s.getsubprotocol(), None) + s.abort() + + @unittest.skipUnless( + TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled" + ) + def test_websocket_with_custom_header(self): + s = ws.create_connection( + f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}", + headers={"User-Agent": "PythonWebsocketClient"}, + ) + self.assertNotEqual(s, None) + self.assertEqual(s.getsubprotocol(), None) + s.send("Hello, World") + result = s.recv() + self.assertEqual(result, "Hello, World") + self.assertRaises(ValueError, s.close, -1, "") + s.close() + + @unittest.skipUnless( + TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled" + ) + def test_after_close(self): + s = ws.create_connection(f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}") + self.assertNotEqual(s, None) + s.close() + self.assertRaises(ws.WebSocketConnectionClosedException, s.send, "Hello") + self.assertRaises(ws.WebSocketConnectionClosedException, s.recv) + + +class SockOptTest(unittest.TestCase): + @unittest.skipUnless( + TEST_WITH_LOCAL_SERVER, "Tests using local websocket server are disabled" + ) + def test_sockopt(self): + sockopt = ((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),) + s = ws.create_connection( + f"ws://127.0.0.1:{LOCAL_WS_SERVER_PORT}", sockopt=sockopt + ) + self.assertNotEqual( + s.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY), 0 + ) + s.close() + + +class UtilsTest(unittest.TestCase): + def test_utf8_validator(self): + state = validate_utf8(b"\xf0\x90\x80\x80") + self.assertEqual(state, True) + state = validate_utf8( + b"\xce\xba\xe1\xbd\xb9\xcf\x83\xce\xbc\xce\xb5\xed\xa0\x80edited" + ) + self.assertEqual(state, False) + state = validate_utf8(b"") + self.assertEqual(state, True) + + +class HandshakeTest(unittest.TestCase): + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_http_ssl(self): + websock1 = ws.WebSocket( + sslopt={"cert_chain": ssl.get_default_verify_paths().capath}, + enable_multithread=False, + ) + self.assertRaises(ValueError, websock1.connect, "wss://api.bitfinex.com/ws/2") + websock2 = ws.WebSocket(sslopt={"certfile": "myNonexistentCertFile"}) + self.assertRaises( + FileNotFoundError, websock2.connect, "wss://api.bitfinex.com/ws/2" + ) + + @unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled") + def test_manual_headers(self): + websock3 = ws.WebSocket( + sslopt={ + "ca_certs": ssl.get_default_verify_paths().cafile, + "ca_cert_path": ssl.get_default_verify_paths().capath, + } + ) + self.assertRaises( + WebSocketBadStatusException, + websock3.connect, + "wss://api.bitfinex.com/ws/2", + cookie="chocolate", + origin="testing_websockets.com", + host="echo.websocket.events/websocket-client-test", + subprotocols=["testproto"], + connection="Upgrade", + header={ + "CustomHeader1": "123", + "Cookie": "TestValue", + "Sec-WebSocket-Key": "k9kFAUWNAMmf5OEMfTlOEA==", + "Sec-WebSocket-Protocol": "newprotocol", + }, + ) + + def test_ipv6(self): + websock2 = ws.WebSocket() + self.assertRaises(ValueError, websock2.connect, "2001:4860:4860::8888") + + def test_bad_urls(self): + websock3 = ws.WebSocket() + self.assertRaises(ValueError, websock3.connect, "ws//example.com") + self.assertRaises(WebSocketAddressException, websock3.connect, "ws://example") + self.assertRaises(ValueError, websock3.connect, "example.com") + + +if __name__ == "__main__": + unittest.main() diff --git a/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/INSTALLER b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/LICENSE b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/LICENSE new file mode 100644 index 0000000..62a54ca --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 engn33r + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/METADATA b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/METADATA new file mode 100644 index 0000000..563e5c0 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/METADATA @@ -0,0 +1,184 @@ +Metadata-Version: 2.1 +Name: websocket-client +Version: 1.8.0 +Summary: WebSocket client for Python with low level API options +Home-page: https://github.com/websocket-client/websocket-client.git +Download-URL: https://github.com/websocket-client/websocket-client/releases +Author: liris +Author-email: liris.pp@gmail.com +Maintainer: engn33r +Maintainer-email: websocket.client@proton.me +License: Apache-2.0 +Project-URL: Documentation, https://websocket-client.readthedocs.io/ +Project-URL: Source, https://github.com/websocket-client/websocket-client/ +Keywords: websockets client +Classifier: Development Status :: 4 - Beta +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Topic :: Internet +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Intended Audience :: Developers +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE +Provides-Extra: docs +Requires-Dist: Sphinx >=6.0 ; extra == 'docs' +Requires-Dist: sphinx-rtd-theme >=1.1.0 ; extra == 'docs' +Requires-Dist: myst-parser >=2.0.0 ; extra == 'docs' +Provides-Extra: optional +Requires-Dist: python-socks ; extra == 'optional' +Requires-Dist: wsaccel ; extra == 'optional' +Provides-Extra: test +Requires-Dist: websockets ; extra == 'test' + +[![docs](https://readthedocs.org/projects/websocket-client/badge/?style=flat)](https://websocket-client.readthedocs.io/) +[![Build Status](https://github.com/websocket-client/websocket-client/actions/workflows/build.yml/badge.svg)](https://github.com/websocket-client/websocket-client/actions/workflows/build.yml) +[![codecov](https://codecov.io/gh/websocket-client/websocket-client/branch/master/graph/badge.svg?token=pcXhUQwiL3)](https://codecov.io/gh/websocket-client/websocket-client) +[![PyPI Downloads](https://pepy.tech/badge/websocket-client)](https://pepy.tech/project/websocket-client) +[![PyPI version](https://img.shields.io/pypi/v/websocket_client)](https://pypi.org/project/websocket_client/) +[![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) + +# websocket-client + +websocket-client is a WebSocket client for Python. It provides access +to low level APIs for WebSockets. websocket-client implements version +[hybi-13](https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-13) +of the WebSocket protocol. This client does not currently support the +permessage-deflate extension from +[RFC 7692](https://tools.ietf.org/html/rfc7692). + +## Documentation + +This project's documentation can be found at +[https://websocket-client.readthedocs.io/](https://websocket-client.readthedocs.io/) + +## Contributing + +Please see the [contribution guidelines](https://github.com/websocket-client/websocket-client/blob/master/CONTRIBUTING.md) + +## Installation + +You can use `pip install websocket-client` to install, or `pip install -e .` +to install from a local copy of the code. This module is tested on Python 3.8+. + +There are several optional dependencies that can be installed to enable +specific websocket-client features. +- To install `python-socks` for proxy usage and `wsaccel` for a minor performance boost, use: + `pip install websocket-client[optional]` +- To install `websockets` to run unit tests using the local echo server, use: + `pip install websocket-client[test]` +- To install `Sphinx` and `sphinx_rtd_theme` to build project documentation, use: + `pip install websocket-client[docs]` + +While not a strict dependency, [rel](https://github.com/bubbleboy14/registeredeventlistener) +is useful when using `run_forever` with automatic reconnect. Install rel with `pip install rel`. + +Footnote: Some shells, such as zsh, require you to escape the `[` and `]` characters with a `\`. + +## Usage Tips + +Check out the documentation's FAQ for additional guidelines: +[https://websocket-client.readthedocs.io/en/latest/faq.html](https://websocket-client.readthedocs.io/en/latest/faq.html) + +Known issues with this library include lack of WebSocket Compression +support (RFC 7692) and [minimal threading documentation/support](https://websocket-client.readthedocs.io/en/latest/threading.html). + +## Performance + +The `send` and `validate_utf8` methods can sometimes be bottleneck. +You can disable UTF8 validation in this library (and receive a +performance enhancement) with the `skip_utf8_validation` parameter. +If you want to get better performance, install wsaccel. While +websocket-client does not depend on wsaccel, it will be used if +available. wsaccel doubles the speed of UTF8 validation and +offers a very minor 10% performance boost when masking the +payload data as part of the `send` process. Numpy used to +be a suggested performance enhancement alternative, but +[issue #687](https://github.com/websocket-client/websocket-client/issues/687) +found it didn't help. + +## Examples + +Many more examples are found in the +[examples documentation](https://websocket-client.readthedocs.io/en/latest/examples.html). + +### Long-lived Connection + +Most real-world WebSockets situations involve longer-lived connections. +The WebSocketApp `run_forever` loop will automatically try to reconnect +to an open WebSocket connection when a network +connection is lost if it is provided with: + +- a `dispatcher` argument (async dispatcher like rel or pyevent) +- a non-zero `reconnect` argument (delay between disconnection and attempted reconnection) + +`run_forever` provides a variety of event-based connection controls +using callbacks like `on_message` and `on_error`. +`run_forever` **does not automatically reconnect** if the server +closes the WebSocket gracefully (returning +[a standard websocket close code](https://www.rfc-editor.org/rfc/rfc6455.html#section-7.4.1)). +[This is the logic](https://github.com/websocket-client/websocket-client/pull/838#issuecomment-1228454826) behind the decision. +Customizing behavior when the server closes +the WebSocket should be handled in the `on_close` callback. +This example uses [rel](https://github.com/bubbleboy14/registeredeventlistener) +for the dispatcher to provide automatic reconnection. + +```python +import websocket +import _thread +import time +import rel + +def on_message(ws, message): + print(message) + +def on_error(ws, error): + print(error) + +def on_close(ws, close_status_code, close_msg): + print("### closed ###") + +def on_open(ws): + print("Opened connection") + +if __name__ == "__main__": + websocket.enableTrace(True) + ws = websocket.WebSocketApp("wss://api.gemini.com/v1/marketdata/BTCUSD", + on_open=on_open, + on_message=on_message, + on_error=on_error, + on_close=on_close) + + ws.run_forever(dispatcher=rel, reconnect=5) # Set dispatcher to automatic reconnection, 5 second reconnect delay if connection closed unexpectedly + rel.signal(2, rel.abort) # Keyboard Interrupt + rel.dispatch() +``` + +### Short-lived Connection + +This is if you want to communicate a short message and disconnect +immediately when done. For example, if you want to confirm that a WebSocket +server is running and responds properly to a specific request. + +```python +from websocket import create_connection + +ws = create_connection("ws://echo.websocket.events/") +print(ws.recv()) +print("Sending 'Hello, World'...") +ws.send("Hello, World") +print("Sent") +print("Receiving...") +result = ws.recv() +print("Received '%s'" % result) +ws.close() +``` diff --git a/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/RECORD b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/RECORD new file mode 100644 index 0000000..aa8cdd4 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/RECORD @@ -0,0 +1,56 @@ +../../../bin/wsdump,sha256=edt_9jaCHaOMIFRaGWjwmBnnETBahqu3Qn2RPFzZYuA,262 +websocket/__init__.py,sha256=GvVpTPkjvhYEGZcjXszSfrs2FaVHudUdGgGSqf_Rbw4,833 +websocket/__pycache__/__init__.cpython-310.pyc,, +websocket/__pycache__/_abnf.cpython-310.pyc,, +websocket/__pycache__/_app.cpython-310.pyc,, +websocket/__pycache__/_cookiejar.cpython-310.pyc,, +websocket/__pycache__/_core.cpython-310.pyc,, +websocket/__pycache__/_exceptions.cpython-310.pyc,, +websocket/__pycache__/_handshake.cpython-310.pyc,, +websocket/__pycache__/_http.cpython-310.pyc,, +websocket/__pycache__/_logging.cpython-310.pyc,, +websocket/__pycache__/_socket.cpython-310.pyc,, +websocket/__pycache__/_ssl_compat.cpython-310.pyc,, +websocket/__pycache__/_url.cpython-310.pyc,, +websocket/__pycache__/_utils.cpython-310.pyc,, +websocket/__pycache__/_wsdump.cpython-310.pyc,, +websocket/_abnf.py,sha256=WesUJSSGSRpzuTp0VeK12O4SYYVMN6CyN6bqXS4lk2w,14385 +websocket/_app.py,sha256=xCAlaM2FfsPLsaEsBGXhdg4olHDEEzif6llSWefHcm8,24176 +websocket/_cookiejar.py,sha256=893SWoHmk_JysiXj8lkyLy95co5zvuC62XGMrvmgI7E,2399 +websocket/_core.py,sha256=P-lYcwk-LKJUKDqBleJsmRSIJjX2v_o4FongdJYbhn8,21080 +websocket/_exceptions.py,sha256=r7lGaC8Y2brBnaK_YJJRDdYY6UCGWxOXoQsMcgFFeJ4,2178 +websocket/_handshake.py,sha256=h_88S6vhStOZBj5zMGJtIKFV1RVMVuEskLybjJCnaj4,6578 +websocket/_http.py,sha256=33GsroWgLhOsE8pMC2Xka-RjonPuUypjeFRABFAtQJo,12818 +websocket/_logging.py,sha256=DHRUl4sEaSkolvMo4A6upn7UIYF0kJT5NlAL0vWCHRI,2228 +websocket/_socket.py,sha256=NpsUBO1ihnC-xPA0U2o1-hMXK8eiplNcU9R2VZvZ3qU,5198 +websocket/_ssl_compat.py,sha256=SRPtw1rT3LPSl9q70mCi5hW9h2xS-nIfdcXbjyGi8sE,1188 +websocket/_url.py,sha256=kbEdbdZ-BMMoYQ3sMfcp9QEY1IYrDppIKCpIIHdGVMc,5251 +websocket/_utils.py,sha256=R36FnFTxYVJyKlh-yEaapRxpoK8Xwn9JFXCy2q2haY0,6961 +websocket/_wsdump.py,sha256=kV82LWLlD6d3vzOKOVAEqkfoCA_Qkpd0hc9WN2Tq2kM,7010 +websocket/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +websocket/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +websocket/tests/__pycache__/__init__.cpython-310.pyc,, +websocket/tests/__pycache__/echo-server.cpython-310.pyc,, +websocket/tests/__pycache__/test_abnf.cpython-310.pyc,, +websocket/tests/__pycache__/test_app.cpython-310.pyc,, +websocket/tests/__pycache__/test_cookiejar.cpython-310.pyc,, +websocket/tests/__pycache__/test_http.cpython-310.pyc,, +websocket/tests/__pycache__/test_url.cpython-310.pyc,, +websocket/tests/__pycache__/test_websocket.cpython-310.pyc,, +websocket/tests/data/header01.txt,sha256=eR9UDpnf7mREys9MttKytzB5OXA5IwOGWJZKmaF4II8,163 +websocket/tests/data/header02.txt,sha256=1HzQGIMG0OGwfnboRkUqwbTEg2nTevOX2Waj0gQARaw,161 +websocket/tests/data/header03.txt,sha256=l_soTbfEWjZTLC5Ydx2U8uj8NJ30IwAc8yo5IUG5fyQ,216 +websocket/tests/echo-server.py,sha256=yYwKXimgqo6gHFjGgqhkADb6fRncSrF-OewX6UinZVg,482 +websocket/tests/test_abnf.py,sha256=fGZ335DReScT4zSNayl5H8z7MxzJ9NXbiSvt97SJGDI,4629 +websocket/tests/test_app.py,sha256=0YoTjRjcZ2MqG3Hd8-4rfvkPkNA9BUAt1UODkjXu3Y0,12364 +websocket/tests/test_cookiejar.py,sha256=FNm9Hjxu0YBwA8GLRi1GSrAp8jkKzdND6Icm7iIIu_Y,4395 +websocket/tests/test_http.py,sha256=VLqGlXOovB6DGZDnq25c7cOjAWWTJAhhjeKSFYXnLnc,12461 +websocket/tests/test_url.py,sha256=5oONcPf4rU8KSQjDYjNevcnUpOnqUNFmontSiXB5Qd4,17718 +websocket/tests/test_websocket.py,sha256=F5XtOwa_v2JHUE-xOpCWdhUgUYfNbHGErHJKQ0bEsTI,18390 +websocket_client-1.8.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +websocket_client-1.8.0.dist-info/LICENSE,sha256=RT5qaUJ-2f-SoD7y17Ie8dtB_fVtYjB6SbqFBZ96DGI,11339 +websocket_client-1.8.0.dist-info/METADATA,sha256=mUuXo30suA6pEGGHkoT30gxksNLloIXGTTH1JPE5zjc,7962 +websocket_client-1.8.0.dist-info/RECORD,, +websocket_client-1.8.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +websocket_client-1.8.0.dist-info/entry_points.txt,sha256=IoCGCuANLuLxE3m2QXEd2Ip57qScBjf7RfQnkjn6DNE,50 +websocket_client-1.8.0.dist-info/top_level.txt,sha256=8m_tTpcUlzWGl8v-pj5Wi7XhAFaN1_bLKRHQKCyz5_I,10 diff --git a/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/WHEEL b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/WHEEL new file mode 100644 index 0000000..bab98d6 --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/entry_points.txt b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/entry_points.txt new file mode 100644 index 0000000..45c854e --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +wsdump = websocket._wsdump:main diff --git a/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/top_level.txt b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/top_level.txt new file mode 100644 index 0000000..ca4cb0c --- /dev/null +++ b/env/lib/python3.10/site-packages/websocket_client-1.8.0.dist-info/top_level.txt @@ -0,0 +1 @@ +websocket diff --git a/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/INSTALLER b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/LICENSE b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/LICENSE new file mode 100644 index 0000000..cc10784 --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Benno Rice and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/METADATA b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/METADATA new file mode 100644 index 0000000..32e8bdb --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/METADATA @@ -0,0 +1,177 @@ +Metadata-Version: 2.1 +Name: wsproto +Version: 1.2.0 +Summary: WebSockets state-machine based protocol implementation +Home-page: https://github.com/python-hyper/wsproto/ +Author: Benno Rice +Author-email: benno@jeamland.net +License: MIT License +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=3.7.0 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: h11 (<1,>=0.9.0) + +======================================================== +Pure Python, pure state-machine WebSocket implementation +======================================================== + +.. image:: https://github.com/python-hyper/wsproto/workflows/CI/badge.svg + :target: https://github.com/python-hyper/wsproto/actions + :alt: Build Status +.. image:: https://codecov.io/gh/python-hyper/wsproto/branch/main/graph/badge.svg + :target: https://codecov.io/gh/python-hyper/wsproto + :alt: Code Coverage +.. image:: https://readthedocs.org/projects/wsproto/badge/?version=latest + :target: https://wsproto.readthedocs.io/en/latest/ + :alt: Documentation Status +.. image:: https://img.shields.io/badge/chat-join_now-brightgreen.svg + :target: https://gitter.im/python-hyper/community + :alt: Chat community + + +This repository contains a pure-Python implementation of a WebSocket protocol +stack. It's written from the ground up to be embeddable in whatever program you +choose to use, ensuring that you can communicate via WebSockets, as defined in +`RFC6455 `_, regardless of your programming +paradigm. + +This repository does not provide a parsing layer, a network layer, or any rules +about concurrency. Instead, it's a purely in-memory solution, defined in terms +of data actions and WebSocket frames. RFC6455 and Compression Extensions for +WebSocket via `RFC7692 `_ are fully +supported. + +wsproto supports Python 3.6.1 or higher. + +To install it, just run: + +.. code-block:: console + + $ pip install wsproto + + +Usage +===== + +Let's assume you have some form of network socket available. wsproto client +connections automatically generate a HTTP request to initiate the WebSocket +handshake. To create a WebSocket client connection: + +.. code-block:: python + + from wsproto import WSConnection, ConnectionType + from wsproto.events import Request + + ws = WSConnection(ConnectionType.CLIENT) + ws.send(Request(host='echo.websocket.org', target='/')) + +To create a WebSocket server connection: + +.. code-block:: python + + from wsproto.connection import WSConnection, ConnectionType + + ws = WSConnection(ConnectionType.SERVER) + +Every time you send a message, or call a ping, or simply if you receive incoming +data, wsproto might respond with some outgoing data that you have to send: + +.. code-block:: python + + some_socket.send(ws.bytes_to_send()) + +Both connection types need to receive incoming data: + +.. code-block:: python + + ws.receive_data(some_byte_string_of_data) + +And wsproto will issue events if the data contains any WebSocket messages or state changes: + +.. code-block:: python + + for event in ws.events(): + if isinstance(event, Request): + # only client connections get this event + ws.send(AcceptConnection()) + elif isinstance(event, CloseConnection): + # guess nobody wants to talk to us any more... + elif isinstance(event, TextMessage): + print('We got text!', event.data) + elif isinstance(event, BytesMessage): + print('We got bytes!', event.data) + +Take a look at our docs for a `full list of events +`! + +Testing +======= + +It passes the autobahn test suite completely and strictly in both client and +server modes and using permessage-deflate. + +If you want to run the compliance tests, go into the compliance directory and +then to test client mode, in one shell run the Autobahn test server: + +.. code-block:: console + + $ wstest -m fuzzingserver -s ws-fuzzingserver.json + +And in another shell run the test client: + +.. code-block:: console + + $ python test_client.py + +And to test server mode, run the test server: + +.. code-block:: console + + $ python test_server.py + +And in another shell run the Autobahn test client: + +.. code-block:: console + + $ wstest -m fuzzingclient -s ws-fuzzingclient.json + + +Documentation +============= + +Documentation is available at https://wsproto.readthedocs.io/en/latest/. + +Contributing +============ + +``wsproto`` welcomes contributions from anyone! Unlike many other projects we +are happy to accept cosmetic contributions and small contributions, in addition +to large feature requests and changes. + +Before you contribute (either by opening an issue or filing a pull request), +please `read the contribution guidelines`_. + +.. _read the contribution guidelines: http://python-hyper.org/en/latest/contributing.html + +License +======= + +``wsproto`` is made available under the MIT License. For more details, see the +``LICENSE`` file in the repository. + +Authors +======= + +``wsproto`` was created by @jeamland, and is maintained by the python-hyper +community. diff --git a/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/RECORD b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/RECORD new file mode 100644 index 0000000..027961b --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/RECORD @@ -0,0 +1,23 @@ +wsproto-1.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +wsproto-1.2.0.dist-info/LICENSE,sha256=wDKajb80N7CV9_XPQlfWu4VeBxIMroeGWGBz_3ppmVk,1093 +wsproto-1.2.0.dist-info/METADATA,sha256=V7EI9a-gXS3NLxeYYmWqtf_MkMfepGoTF03IaR-OVwo,5607 +wsproto-1.2.0.dist-info/RECORD,, +wsproto-1.2.0.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92 +wsproto-1.2.0.dist-info/top_level.txt,sha256=BUdIrwL11zET0fkWkYRJ1yZKrEfvDF9DZqjhABOio6Y,8 +wsproto/__init__.py,sha256=zQSIjLjveTHwyhGAfqG_n_cVl54hTkeV6vuad1cnEOE,2887 +wsproto/__pycache__/__init__.cpython-310.pyc,, +wsproto/__pycache__/connection.cpython-310.pyc,, +wsproto/__pycache__/events.cpython-310.pyc,, +wsproto/__pycache__/extensions.cpython-310.pyc,, +wsproto/__pycache__/frame_protocol.cpython-310.pyc,, +wsproto/__pycache__/handshake.cpython-310.pyc,, +wsproto/__pycache__/typing.cpython-310.pyc,, +wsproto/__pycache__/utilities.cpython-310.pyc,, +wsproto/connection.py,sha256=LhsbokxZUmAtMsOFFZ45puZDPyIXNEmq7SanE7swAgE,6813 +wsproto/events.py,sha256=DW7YQ823oK3MjXHqcPvjJzjBGk5UGuMO_rpNnKgmmW8,7979 +wsproto/extensions.py,sha256=VlnojvsC2AO7vbUkw_TOqCgtmHb1dSplXeRwjMfjCo4,11211 +wsproto/frame_protocol.py,sha256=B5p_wRq54gvTihegbJ39RkONrdhtipoYDEiYq95BdAk,23401 +wsproto/handshake.py,sha256=hPqTo15MqOxYlvcNYTo-bIzQJHtRLq8qq2jBdHdz2x8,18036 +wsproto/py.typed,sha256=sow9soTwP9T_gEAQSVh7Gb8855h04Nwmhs2We-JRgZM,7 +wsproto/typing.py,sha256=Ryf6eOhAzfZlHCFELiiayDzNqdXQG7JKccblOqNx6Wo,68 +wsproto/utilities.py,sha256=5qmPXSUhUp2GESgvgIacZ7N4uqd0vBijhVV7t6XTiZw,2816 diff --git a/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/WHEEL b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/WHEEL new file mode 100644 index 0000000..5bad85f --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/top_level.txt b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/top_level.txt new file mode 100644 index 0000000..8b7d144 --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto-1.2.0.dist-info/top_level.txt @@ -0,0 +1 @@ +wsproto diff --git a/env/lib/python3.10/site-packages/wsproto/__init__.py b/env/lib/python3.10/site-packages/wsproto/__init__.py new file mode 100644 index 0000000..46fde3f --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto/__init__.py @@ -0,0 +1,94 @@ +""" +wsproto +~~~~~~~ + +A WebSocket implementation. +""" +from typing import Generator, Optional, Union + +from .connection import Connection, ConnectionState, ConnectionType +from .events import Event +from .handshake import H11Handshake +from .typing import Headers + +__version__ = "1.2.0" + + +class WSConnection: + """ + Represents the local end of a WebSocket connection to a remote peer. + """ + + def __init__(self, connection_type: ConnectionType) -> None: + """ + Constructor + + :param wsproto.connection.ConnectionType connection_type: Controls + whether the library behaves as a client or as a server. + """ + self.client = connection_type is ConnectionType.CLIENT + self.handshake = H11Handshake(connection_type) + self.connection: Optional[Connection] = None + + @property + def state(self) -> ConnectionState: + """ + :returns: Connection state + :rtype: wsproto.connection.ConnectionState + """ + if self.connection is None: + return self.handshake.state + return self.connection.state + + def initiate_upgrade_connection( + self, headers: Headers, path: Union[bytes, str] + ) -> None: + self.handshake.initiate_upgrade_connection(headers, path) + + def send(self, event: Event) -> bytes: + """ + Generate network data for the specified event. + + When you want to communicate with a WebSocket peer, you should construct + an event and pass it to this method. This method will return the bytes + that you should send to the peer. + + :param wsproto.events.Event event: The event to generate data for + :returns bytes: The data to send to the peer + """ + data = b"" + if self.connection is None: + data += self.handshake.send(event) + self.connection = self.handshake.connection + else: + data += self.connection.send(event) + return data + + def receive_data(self, data: Optional[bytes]) -> None: + """ + Feed network data into the connection instance. + + After calling this method, you should call :meth:`events` to see if the + received data triggered any new events. + + :param bytes data: Data received from remote peer + """ + if self.connection is None: + self.handshake.receive_data(data) + self.connection = self.handshake.connection + else: + self.connection.receive_data(data) + + def events(self) -> Generator[Event, None, None]: + """ + A generator that yields pending events. + + Each event is an instance of a subclass of + :class:`wsproto.events.Event`. + """ + yield from self.handshake.events() + if self.connection is not None: + yield from self.connection.events() + + +__all__ = ("ConnectionType", "WSConnection") diff --git a/env/lib/python3.10/site-packages/wsproto/connection.py b/env/lib/python3.10/site-packages/wsproto/connection.py new file mode 100644 index 0000000..4439165 --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto/connection.py @@ -0,0 +1,189 @@ +""" +wsproto/connection +~~~~~~~~~~~~~~~~~~ + +An implementation of a WebSocket connection. +""" + +from collections import deque +from enum import Enum +from typing import Deque, Generator, List, Optional + +from .events import ( + BytesMessage, + CloseConnection, + Event, + Message, + Ping, + Pong, + TextMessage, +) +from .extensions import Extension +from .frame_protocol import CloseReason, FrameProtocol, Opcode, ParseFailed +from .utilities import LocalProtocolError + + +class ConnectionState(Enum): + """ + RFC 6455, Section 4 - Opening Handshake + """ + + #: The opening handshake is in progress. + CONNECTING = 0 + #: The opening handshake is complete. + OPEN = 1 + #: The remote WebSocket has initiated a connection close. + REMOTE_CLOSING = 2 + #: The local WebSocket (i.e. this instance) has initiated a connection close. + LOCAL_CLOSING = 3 + #: The closing handshake has completed. + CLOSED = 4 + #: The connection was rejected during the opening handshake. + REJECTING = 5 + + +class ConnectionType(Enum): + """An enumeration of connection types.""" + + #: This connection will act as client and talk to a remote server + CLIENT = 1 + + #: This connection will as as server and waits for client connections + SERVER = 2 + + +CLIENT = ConnectionType.CLIENT +SERVER = ConnectionType.SERVER + + +class Connection: + """ + A low-level WebSocket connection object. + + This wraps two other protocol objects, an HTTP/1.1 protocol object used + to do the initial HTTP upgrade handshake and a WebSocket frame protocol + object used to exchange messages and other control frames. + + :param conn_type: Whether this object is on the client- or server-side of + a connection. To initialise as a client pass ``CLIENT`` otherwise + pass ``SERVER``. + :type conn_type: ``ConnectionType`` + """ + + def __init__( + self, + connection_type: ConnectionType, + extensions: Optional[List[Extension]] = None, + trailing_data: bytes = b"", + ) -> None: + self.client = connection_type is ConnectionType.CLIENT + self._events: Deque[Event] = deque() + self._proto = FrameProtocol(self.client, extensions or []) + self._state = ConnectionState.OPEN + self.receive_data(trailing_data) + + @property + def state(self) -> ConnectionState: + return self._state + + def send(self, event: Event) -> bytes: + data = b"" + if isinstance(event, Message) and self.state == ConnectionState.OPEN: + data += self._proto.send_data(event.data, event.message_finished) + elif isinstance(event, Ping) and self.state == ConnectionState.OPEN: + data += self._proto.ping(event.payload) + elif isinstance(event, Pong) and self.state == ConnectionState.OPEN: + data += self._proto.pong(event.payload) + elif isinstance(event, CloseConnection) and self.state in { + ConnectionState.OPEN, + ConnectionState.REMOTE_CLOSING, + }: + data += self._proto.close(event.code, event.reason) + if self.state == ConnectionState.REMOTE_CLOSING: + self._state = ConnectionState.CLOSED + else: + self._state = ConnectionState.LOCAL_CLOSING + else: + raise LocalProtocolError( + f"Event {event} cannot be sent in state {self.state}." + ) + return data + + def receive_data(self, data: Optional[bytes]) -> None: + """ + Pass some received data to the connection for handling. + + A list of events that the remote peer triggered by sending this data can + be retrieved with :meth:`~wsproto.connection.Connection.events`. + + :param data: The data received from the remote peer on the network. + :type data: ``bytes`` + """ + + if data is None: + # "If _The WebSocket Connection is Closed_ and no Close control + # frame was received by the endpoint (such as could occur if the + # underlying transport connection is lost), _The WebSocket + # Connection Close Code_ is considered to be 1006." + self._events.append(CloseConnection(code=CloseReason.ABNORMAL_CLOSURE)) + self._state = ConnectionState.CLOSED + return + + if self.state in (ConnectionState.OPEN, ConnectionState.LOCAL_CLOSING): + self._proto.receive_bytes(data) + elif self.state is ConnectionState.CLOSED: + raise LocalProtocolError("Connection already closed.") + else: + pass # pragma: no cover + + def events(self) -> Generator[Event, None, None]: + """ + Return a generator that provides any events that have been generated + by protocol activity. + + :returns: generator of :class:`Event ` subclasses + """ + while self._events: + yield self._events.popleft() + + try: + for frame in self._proto.received_frames(): + if frame.opcode is Opcode.PING: + assert frame.frame_finished and frame.message_finished + assert isinstance(frame.payload, (bytes, bytearray)) + yield Ping(payload=frame.payload) + + elif frame.opcode is Opcode.PONG: + assert frame.frame_finished and frame.message_finished + assert isinstance(frame.payload, (bytes, bytearray)) + yield Pong(payload=frame.payload) + + elif frame.opcode is Opcode.CLOSE: + assert isinstance(frame.payload, tuple) + code, reason = frame.payload + if self.state is ConnectionState.LOCAL_CLOSING: + self._state = ConnectionState.CLOSED + else: + self._state = ConnectionState.REMOTE_CLOSING + yield CloseConnection(code=code, reason=reason) + + elif frame.opcode is Opcode.TEXT: + assert isinstance(frame.payload, str) + yield TextMessage( + data=frame.payload, + frame_finished=frame.frame_finished, + message_finished=frame.message_finished, + ) + + elif frame.opcode is Opcode.BINARY: + assert isinstance(frame.payload, (bytes, bytearray)) + yield BytesMessage( + data=frame.payload, + frame_finished=frame.frame_finished, + message_finished=frame.message_finished, + ) + + else: + pass # pragma: no cover + except ParseFailed as exc: + yield CloseConnection(code=exc.code, reason=str(exc)) diff --git a/env/lib/python3.10/site-packages/wsproto/events.py b/env/lib/python3.10/site-packages/wsproto/events.py new file mode 100644 index 0000000..d758f8a --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto/events.py @@ -0,0 +1,295 @@ +""" +wsproto/events +~~~~~~~~~~~~~~ + +Events that result from processing data on a WebSocket connection. +""" +from abc import ABC +from dataclasses import dataclass, field +from typing import Generic, List, Optional, Sequence, TypeVar, Union + +from .extensions import Extension +from .typing import Headers + + +class Event(ABC): + """ + Base class for wsproto events. + """ + + pass # noqa + + +@dataclass(frozen=True) +class Request(Event): + """The beginning of a Websocket connection, the HTTP Upgrade request + + This event is fired when a SERVER connection receives a WebSocket + handshake request (HTTP with upgrade header). + + Fields: + + .. attribute:: host + + (Required) The hostname, or host header value. + + .. attribute:: target + + (Required) The request target (path and query string) + + .. attribute:: extensions + + The proposed extensions. + + .. attribute:: extra_headers + + The additional request headers, excluding extensions, host, subprotocols, + and version headers. + + .. attribute:: subprotocols + + A list of the subprotocols proposed in the request, as a list + of strings. + """ + + host: str + target: str + extensions: Union[Sequence[Extension], Sequence[str]] = field( # type: ignore[assignment] + default_factory=list + ) + extra_headers: Headers = field(default_factory=list) + subprotocols: List[str] = field(default_factory=list) + + +@dataclass(frozen=True) +class AcceptConnection(Event): + """The acceptance of a Websocket upgrade request. + + This event is fired when a CLIENT receives an acceptance response + from a server. It is also used to accept an upgrade request when + acting as a SERVER. + + Fields: + + .. attribute:: extra_headers + + Any additional (non websocket related) headers present in the + acceptance response. + + .. attribute:: subprotocol + + The accepted subprotocol to use. + + """ + + subprotocol: Optional[str] = None + extensions: List[Extension] = field(default_factory=list) + extra_headers: Headers = field(default_factory=list) + + +@dataclass(frozen=True) +class RejectConnection(Event): + """The rejection of a Websocket upgrade request, the HTTP response. + + The ``RejectConnection`` event sends the appropriate HTTP headers to + communicate to the peer that the handshake has been rejected. You may also + send an HTTP body by setting the ``has_body`` attribute to ``True`` and then + sending one or more :class:`RejectData` events after this one. When sending + a response body, the caller should set the ``Content-Length``, + ``Content-Type``, and/or ``Transfer-Encoding`` headers as appropriate. + + When receiving a ``RejectConnection`` event, the ``has_body`` attribute will + in almost all cases be ``True`` (even if the server set it to ``False``) and + will be followed by at least one ``RejectData`` events, even though the data + itself might be just ``b""``. (The only scenario in which the caller + receives a ``RejectConnection`` with ``has_body == False`` is if the peer + violates sends an informational status code (1xx) other than 101.) + + The ``has_body`` attribute should only be used when receiving the event. (It + has ) is False the headers must include a + content-length or transfer encoding. + + Fields: + + .. attribute:: headers (Headers) + + The headers to send with the response. + + .. attribute:: has_body + + This defaults to False, but set to True if there is a body. See + also :class:`~RejectData`. + + .. attribute:: status_code + + The response status code. + + """ + + status_code: int = 400 + headers: Headers = field(default_factory=list) + has_body: bool = False + + +@dataclass(frozen=True) +class RejectData(Event): + """The rejection HTTP response body. + + The caller may send multiple ``RejectData`` events. The final event should + have the ``body_finished`` attribute set to ``True``. + + Fields: + + .. attribute:: body_finished + + True if this is the final chunk of the body data. + + .. attribute:: data (bytes) + + (Required) The raw body data. + + """ + + data: bytes + body_finished: bool = True + + +@dataclass(frozen=True) +class CloseConnection(Event): + + """The end of a Websocket connection, represents a closure frame. + + **wsproto does not automatically send a response to a close event.** To + comply with the RFC you MUST send a close event back to the remote WebSocket + if you have not already sent one. The :meth:`response` method provides a + suitable event for this purpose, and you should check if a response needs + to be sent by checking :func:`wsproto.WSConnection.state`. + + Fields: + + .. attribute:: code + + (Required) The integer close code to indicate why the connection + has closed. + + .. attribute:: reason + + Additional reasoning for why the connection has closed. + + """ + + code: int + reason: Optional[str] = None + + def response(self) -> "CloseConnection": + """Generate an RFC-compliant close frame to send back to the peer.""" + return CloseConnection(code=self.code, reason=self.reason) + + +T = TypeVar("T", bytes, str) + + +@dataclass(frozen=True) +class Message(Event, Generic[T]): + """The websocket data message. + + Fields: + + .. attribute:: data + + (Required) The message data as byte string, can be decoded as UTF-8 for + TEXT messages. This only represents a single chunk of data and + not a full WebSocket message. You need to buffer and + reassemble these chunks to get the full message. + + .. attribute:: frame_finished + + This has no semantic content, but is provided just in case some + weird edge case user wants to be able to reconstruct the + fragmentation pattern of the original stream. + + .. attribute:: message_finished + + True if this frame is the last one of this message, False if + more frames are expected. + + """ + + data: T + frame_finished: bool = True + message_finished: bool = True + + +@dataclass(frozen=True) +class TextMessage(Message[str]): # pylint: disable=unsubscriptable-object + """This event is fired when a data frame with TEXT payload is received. + + Fields: + + .. attribute:: data + + The message data as string, This only represents a single chunk + of data and not a full WebSocket message. You need to buffer + and reassemble these chunks to get the full message. + + """ + + # https://github.com/python/mypy/issues/5744 + data: str + + +@dataclass(frozen=True) +class BytesMessage(Message[bytes]): # pylint: disable=unsubscriptable-object + """This event is fired when a data frame with BINARY payload is + received. + + Fields: + + .. attribute:: data + + The message data as byte string, can be decoded as UTF-8 for + TEXT messages. This only represents a single chunk of data and + not a full WebSocket message. You need to buffer and + reassemble these chunks to get the full message. + """ + + # https://github.com/python/mypy/issues/5744 + data: bytes + + +@dataclass(frozen=True) +class Ping(Event): + """The Ping event can be sent to trigger a ping frame and is fired + when a Ping is received. + + **wsproto does not automatically send a pong response to a ping event.** To + comply with the RFC you MUST send a pong even as soon as is practical. The + :meth:`response` method provides a suitable event for this purpose. + + Fields: + + .. attribute:: payload + + An optional payload to emit with the ping frame. + """ + + payload: bytes = b"" + + def response(self) -> "Pong": + """Generate an RFC-compliant :class:`Pong` response to this ping.""" + return Pong(payload=self.payload) + + +@dataclass(frozen=True) +class Pong(Event): + """The Pong event is fired when a Pong is received. + + Fields: + + .. attribute:: payload + + An optional payload to emit with the pong frame. + + """ + + payload: bytes = b"" diff --git a/env/lib/python3.10/site-packages/wsproto/extensions.py b/env/lib/python3.10/site-packages/wsproto/extensions.py new file mode 100644 index 0000000..ea8555d --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto/extensions.py @@ -0,0 +1,315 @@ +""" +wsproto/extensions +~~~~~~~~~~~~~~~~~~ + +WebSocket extensions. +""" + +import zlib +from typing import Optional, Tuple, Union + +from .frame_protocol import CloseReason, FrameDecoder, FrameProtocol, Opcode, RsvBits + + +class Extension: + name: str + + def enabled(self) -> bool: + return False + + def offer(self) -> Union[bool, str]: + pass + + def accept(self, offer: str) -> Optional[Union[bool, str]]: + pass + + def finalize(self, offer: str) -> None: + pass + + def frame_inbound_header( + self, + proto: Union[FrameDecoder, FrameProtocol], + opcode: Opcode, + rsv: RsvBits, + payload_length: int, + ) -> Union[CloseReason, RsvBits]: + return RsvBits(False, False, False) + + def frame_inbound_payload_data( + self, proto: Union[FrameDecoder, FrameProtocol], data: bytes + ) -> Union[bytes, CloseReason]: + return data + + def frame_inbound_complete( + self, proto: Union[FrameDecoder, FrameProtocol], fin: bool + ) -> Union[bytes, CloseReason, None]: + pass + + def frame_outbound( + self, + proto: Union[FrameDecoder, FrameProtocol], + opcode: Opcode, + rsv: RsvBits, + data: bytes, + fin: bool, + ) -> Tuple[RsvBits, bytes]: + return (rsv, data) + + +class PerMessageDeflate(Extension): + name = "permessage-deflate" + + DEFAULT_CLIENT_MAX_WINDOW_BITS = 15 + DEFAULT_SERVER_MAX_WINDOW_BITS = 15 + + def __init__( + self, + client_no_context_takeover: bool = False, + client_max_window_bits: Optional[int] = None, + server_no_context_takeover: bool = False, + server_max_window_bits: Optional[int] = None, + ) -> None: + self.client_no_context_takeover = client_no_context_takeover + self.server_no_context_takeover = server_no_context_takeover + self._client_max_window_bits = self.DEFAULT_CLIENT_MAX_WINDOW_BITS + self._server_max_window_bits = self.DEFAULT_SERVER_MAX_WINDOW_BITS + if client_max_window_bits is not None: + self.client_max_window_bits = client_max_window_bits + if server_max_window_bits is not None: + self.server_max_window_bits = server_max_window_bits + + self._compressor: Optional[zlib._Compress] = None # noqa + self._decompressor: Optional[zlib._Decompress] = None # noqa + # This refers to the current frame + self._inbound_is_compressible: Optional[bool] = None + # This refers to the ongoing message (which might span multiple + # frames). Only the first frame in a fragmented message is flagged for + # compression, so this carries that bit forward. + self._inbound_compressed: Optional[bool] = None + + self._enabled = False + + @property + def client_max_window_bits(self) -> int: + return self._client_max_window_bits + + @client_max_window_bits.setter + def client_max_window_bits(self, value: int) -> None: + if value < 9 or value > 15: + raise ValueError("Window size must be between 9 and 15 inclusive") + self._client_max_window_bits = value + + @property + def server_max_window_bits(self) -> int: + return self._server_max_window_bits + + @server_max_window_bits.setter + def server_max_window_bits(self, value: int) -> None: + if value < 9 or value > 15: + raise ValueError("Window size must be between 9 and 15 inclusive") + self._server_max_window_bits = value + + def _compressible_opcode(self, opcode: Opcode) -> bool: + return opcode in (Opcode.TEXT, Opcode.BINARY, Opcode.CONTINUATION) + + def enabled(self) -> bool: + return self._enabled + + def offer(self) -> Union[bool, str]: + parameters = [ + "client_max_window_bits=%d" % self.client_max_window_bits, + "server_max_window_bits=%d" % self.server_max_window_bits, + ] + + if self.client_no_context_takeover: + parameters.append("client_no_context_takeover") + if self.server_no_context_takeover: + parameters.append("server_no_context_takeover") + + return "; ".join(parameters) + + def finalize(self, offer: str) -> None: + bits = [b.strip() for b in offer.split(";")] + for bit in bits[1:]: + if bit.startswith("client_no_context_takeover"): + self.client_no_context_takeover = True + elif bit.startswith("server_no_context_takeover"): + self.server_no_context_takeover = True + elif bit.startswith("client_max_window_bits"): + self.client_max_window_bits = int(bit.split("=", 1)[1].strip()) + elif bit.startswith("server_max_window_bits"): + self.server_max_window_bits = int(bit.split("=", 1)[1].strip()) + + self._enabled = True + + def _parse_params(self, params: str) -> Tuple[Optional[int], Optional[int]]: + client_max_window_bits = None + server_max_window_bits = None + + bits = [b.strip() for b in params.split(";")] + for bit in bits[1:]: + if bit.startswith("client_no_context_takeover"): + self.client_no_context_takeover = True + elif bit.startswith("server_no_context_takeover"): + self.server_no_context_takeover = True + elif bit.startswith("client_max_window_bits"): + if "=" in bit: + client_max_window_bits = int(bit.split("=", 1)[1].strip()) + else: + client_max_window_bits = self.client_max_window_bits + elif bit.startswith("server_max_window_bits"): + if "=" in bit: + server_max_window_bits = int(bit.split("=", 1)[1].strip()) + else: + server_max_window_bits = self.server_max_window_bits + + return client_max_window_bits, server_max_window_bits + + def accept(self, offer: str) -> Union[bool, None, str]: + client_max_window_bits, server_max_window_bits = self._parse_params(offer) + + parameters = [] + + if self.client_no_context_takeover: + parameters.append("client_no_context_takeover") + if self.server_no_context_takeover: + parameters.append("server_no_context_takeover") + try: + if client_max_window_bits is not None: + parameters.append("client_max_window_bits=%d" % client_max_window_bits) + self.client_max_window_bits = client_max_window_bits + if server_max_window_bits is not None: + parameters.append("server_max_window_bits=%d" % server_max_window_bits) + self.server_max_window_bits = server_max_window_bits + except ValueError: + return None + else: + self._enabled = True + return "; ".join(parameters) + + def frame_inbound_header( + self, + proto: Union[FrameDecoder, FrameProtocol], + opcode: Opcode, + rsv: RsvBits, + payload_length: int, + ) -> Union[CloseReason, RsvBits]: + if rsv.rsv1 and opcode.iscontrol(): + return CloseReason.PROTOCOL_ERROR + if rsv.rsv1 and opcode is Opcode.CONTINUATION: + return CloseReason.PROTOCOL_ERROR + + self._inbound_is_compressible = self._compressible_opcode(opcode) + + if self._inbound_compressed is None: + self._inbound_compressed = rsv.rsv1 + if self._inbound_compressed: + assert self._inbound_is_compressible + if proto.client: + bits = self.server_max_window_bits + else: + bits = self.client_max_window_bits + if self._decompressor is None: + self._decompressor = zlib.decompressobj(-int(bits)) + + return RsvBits(True, False, False) + + def frame_inbound_payload_data( + self, proto: Union[FrameDecoder, FrameProtocol], data: bytes + ) -> Union[bytes, CloseReason]: + if not self._inbound_compressed or not self._inbound_is_compressible: + return data + assert self._decompressor is not None + + try: + return self._decompressor.decompress(bytes(data)) + except zlib.error: + return CloseReason.INVALID_FRAME_PAYLOAD_DATA + + def frame_inbound_complete( + self, proto: Union[FrameDecoder, FrameProtocol], fin: bool + ) -> Union[bytes, CloseReason, None]: + if not fin: + return None + if not self._inbound_is_compressible: + self._inbound_compressed = None + return None + if not self._inbound_compressed: + self._inbound_compressed = None + return None + assert self._decompressor is not None + + try: + data = self._decompressor.decompress(b"\x00\x00\xff\xff") + data += self._decompressor.flush() + except zlib.error: + return CloseReason.INVALID_FRAME_PAYLOAD_DATA + + if proto.client: + no_context_takeover = self.server_no_context_takeover + else: + no_context_takeover = self.client_no_context_takeover + + if no_context_takeover: + self._decompressor = None + + self._inbound_compressed = None + + return data + + def frame_outbound( + self, + proto: Union[FrameDecoder, FrameProtocol], + opcode: Opcode, + rsv: RsvBits, + data: bytes, + fin: bool, + ) -> Tuple[RsvBits, bytes]: + if not self._compressible_opcode(opcode): + return (rsv, data) + + if opcode is not Opcode.CONTINUATION: + rsv = RsvBits(True, *rsv[1:]) + + if self._compressor is None: + assert opcode is not Opcode.CONTINUATION + if proto.client: + bits = self.client_max_window_bits + else: + bits = self.server_max_window_bits + self._compressor = zlib.compressobj( + zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -int(bits) + ) + + data = self._compressor.compress(bytes(data)) + + if fin: + data += self._compressor.flush(zlib.Z_SYNC_FLUSH) + data = data[:-4] + + if proto.client: + no_context_takeover = self.client_no_context_takeover + else: + no_context_takeover = self.server_no_context_takeover + + if no_context_takeover: + self._compressor = None + + return (rsv, data) + + def __repr__(self) -> str: + descr = ["client_max_window_bits=%d" % self.client_max_window_bits] + if self.client_no_context_takeover: + descr.append("client_no_context_takeover") + descr.append("server_max_window_bits=%d" % self.server_max_window_bits) + if self.server_no_context_takeover: + descr.append("server_no_context_takeover") + + return "<{} {}>".format(self.__class__.__name__, "; ".join(descr)) + + +#: SUPPORTED_EXTENSIONS maps all supported extension names to their class. +#: This can be used to iterate all supported extensions of wsproto, instantiate +#: new extensions based on their name, or check if a given extension is +#: supported or not. +SUPPORTED_EXTENSIONS = {PerMessageDeflate.name: PerMessageDeflate} diff --git a/env/lib/python3.10/site-packages/wsproto/frame_protocol.py b/env/lib/python3.10/site-packages/wsproto/frame_protocol.py new file mode 100644 index 0000000..d13a769 --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto/frame_protocol.py @@ -0,0 +1,673 @@ +""" +wsproto/frame_protocol +~~~~~~~~~~~~~~~~~~~~~~ + +WebSocket frame protocol implementation. +""" + +import os +import struct +from codecs import getincrementaldecoder, IncrementalDecoder +from enum import IntEnum +from typing import Generator, List, NamedTuple, Optional, Tuple, TYPE_CHECKING, Union + +if TYPE_CHECKING: + from .extensions import Extension # pragma: no cover + + +_XOR_TABLE = [bytes(a ^ b for a in range(256)) for b in range(256)] + + +class XorMaskerSimple: + def __init__(self, masking_key: bytes) -> None: + self._masking_key = masking_key + + def process(self, data: bytes) -> bytes: + if data: + data_array = bytearray(data) + a, b, c, d = (_XOR_TABLE[n] for n in self._masking_key) + data_array[::4] = data_array[::4].translate(a) + data_array[1::4] = data_array[1::4].translate(b) + data_array[2::4] = data_array[2::4].translate(c) + data_array[3::4] = data_array[3::4].translate(d) + + # Rotate the masking key so that the next usage continues + # with the next key element, rather than restarting. + key_rotation = len(data) % 4 + self._masking_key = ( + self._masking_key[key_rotation:] + self._masking_key[:key_rotation] + ) + + return bytes(data_array) + return data + + +class XorMaskerNull: + def process(self, data: bytes) -> bytes: + return data + + +# RFC6455, Section 5.2 - Base Framing Protocol + +# Payload length constants +PAYLOAD_LENGTH_TWO_BYTE = 126 +PAYLOAD_LENGTH_EIGHT_BYTE = 127 +MAX_PAYLOAD_NORMAL = 125 +MAX_PAYLOAD_TWO_BYTE = 2**16 - 1 +MAX_PAYLOAD_EIGHT_BYTE = 2**64 - 1 +MAX_FRAME_PAYLOAD = MAX_PAYLOAD_EIGHT_BYTE + +# MASK and PAYLOAD LEN are packed into a byte +MASK_MASK = 0x80 +PAYLOAD_LEN_MASK = 0x7F + +# FIN, RSV[123] and OPCODE are packed into a single byte +FIN_MASK = 0x80 +RSV1_MASK = 0x40 +RSV2_MASK = 0x20 +RSV3_MASK = 0x10 +OPCODE_MASK = 0x0F + + +class Opcode(IntEnum): + """ + RFC 6455, Section 5.2 - Base Framing Protocol + """ + + #: Continuation frame + CONTINUATION = 0x0 + + #: Text message + TEXT = 0x1 + + #: Binary message + BINARY = 0x2 + + #: Close frame + CLOSE = 0x8 + + #: Ping frame + PING = 0x9 + + #: Pong frame + PONG = 0xA + + def iscontrol(self) -> bool: + return bool(self & 0x08) + + +class CloseReason(IntEnum): + """ + RFC 6455, Section 7.4.1 - Defined Status Codes + """ + + #: indicates a normal closure, meaning that the purpose for + #: which the connection was established has been fulfilled. + NORMAL_CLOSURE = 1000 + + #: indicates that an endpoint is "going away", such as a server + #: going down or a browser having navigated away from a page. + GOING_AWAY = 1001 + + #: indicates that an endpoint is terminating the connection due + #: to a protocol error. + PROTOCOL_ERROR = 1002 + + #: indicates that an endpoint is terminating the connection + #: because it has received a type of data it cannot accept (e.g., an + #: endpoint that understands only text data MAY send this if it + #: receives a binary message). + UNSUPPORTED_DATA = 1003 + + #: Reserved. The specific meaning might be defined in the future. + # DON'T DEFINE THIS: RESERVED_1004 = 1004 + + #: is a reserved value and MUST NOT be set as a status code in a + #: Close control frame by an endpoint. It is designated for use in + #: applications expecting a status code to indicate that no status + #: code was actually present. + NO_STATUS_RCVD = 1005 + + #: is a reserved value and MUST NOT be set as a status code in a + #: Close control frame by an endpoint. It is designated for use in + #: applications expecting a status code to indicate that the + #: connection was closed abnormally, e.g., without sending or + #: receiving a Close control frame. + ABNORMAL_CLOSURE = 1006 + + #: indicates that an endpoint is terminating the connection + #: because it has received data within a message that was not + #: consistent with the type of the message (e.g., non-UTF-8 [RFC3629] + #: data within a text message). + INVALID_FRAME_PAYLOAD_DATA = 1007 + + #: indicates that an endpoint is terminating the connection + #: because it has received a message that violates its policy. This + #: is a generic status code that can be returned when there is no + #: other more suitable status code (e.g., 1003 or 1009) or if there + #: is a need to hide specific details about the policy. + POLICY_VIOLATION = 1008 + + #: indicates that an endpoint is terminating the connection + #: because it has received a message that is too big for it to + #: process. + MESSAGE_TOO_BIG = 1009 + + #: indicates that an endpoint (client) is terminating the + #: connection because it has expected the server to negotiate one or + #: more extension, but the server didn't return them in the response + #: message of the WebSocket handshake. The list of extensions that + #: are needed SHOULD appear in the /reason/ part of the Close frame. + #: Note that this status code is not used by the server, because it + #: can fail the WebSocket handshake instead. + MANDATORY_EXT = 1010 + + #: indicates that a server is terminating the connection because + #: it encountered an unexpected condition that prevented it from + #: fulfilling the request. + INTERNAL_ERROR = 1011 + + #: Server/service is restarting + #: (not part of RFC6455) + SERVICE_RESTART = 1012 + + #: Temporary server condition forced blocking client's request + #: (not part of RFC6455) + TRY_AGAIN_LATER = 1013 + + #: is a reserved value and MUST NOT be set as a status code in a + #: Close control frame by an endpoint. It is designated for use in + #: applications expecting a status code to indicate that the + #: connection was closed due to a failure to perform a TLS handshake + #: (e.g., the server certificate can't be verified). + TLS_HANDSHAKE_FAILED = 1015 + + +# RFC 6455, Section 7.4.1 - Defined Status Codes +LOCAL_ONLY_CLOSE_REASONS = ( + CloseReason.NO_STATUS_RCVD, + CloseReason.ABNORMAL_CLOSURE, + CloseReason.TLS_HANDSHAKE_FAILED, +) + + +# RFC 6455, Section 7.4.2 - Status Code Ranges +MIN_CLOSE_REASON = 1000 +MIN_PROTOCOL_CLOSE_REASON = 1000 +MAX_PROTOCOL_CLOSE_REASON = 2999 +MIN_LIBRARY_CLOSE_REASON = 3000 +MAX_LIBRARY_CLOSE_REASON = 3999 +MIN_PRIVATE_CLOSE_REASON = 4000 +MAX_PRIVATE_CLOSE_REASON = 4999 +MAX_CLOSE_REASON = 4999 + + +NULL_MASK = struct.pack("!I", 0) + + +class ParseFailed(Exception): + def __init__( + self, msg: str, code: CloseReason = CloseReason.PROTOCOL_ERROR + ) -> None: + super().__init__(msg) + self.code = code + + +class RsvBits(NamedTuple): + rsv1: bool + rsv2: bool + rsv3: bool + + +class Header(NamedTuple): + fin: bool + rsv: RsvBits + opcode: Opcode + payload_len: int + masking_key: Optional[bytes] + + +class Frame(NamedTuple): + opcode: Opcode + payload: Union[bytes, str, Tuple[int, str]] + frame_finished: bool + message_finished: bool + + +def _truncate_utf8(data: bytes, nbytes: int) -> bytes: + if len(data) <= nbytes: + return data + + # Truncate + data = data[:nbytes] + # But we might have cut a codepoint in half, in which case we want to + # discard the partial character so the data is at least + # well-formed. This is a little inefficient since it processes the + # whole message twice when in theory we could just peek at the last + # few characters, but since this is only used for close messages (max + # length = 125 bytes) it really doesn't matter. + data = data.decode("utf-8", errors="ignore").encode("utf-8") + return data + + +class Buffer: + def __init__(self, initial_bytes: Optional[bytes] = None) -> None: + self.buffer = bytearray() + self.bytes_used = 0 + if initial_bytes: + self.feed(initial_bytes) + + def feed(self, new_bytes: bytes) -> None: + self.buffer += new_bytes + + def consume_at_most(self, nbytes: int) -> bytes: + if not nbytes: + return bytearray() + + data = self.buffer[self.bytes_used : self.bytes_used + nbytes] + self.bytes_used += len(data) + return data + + def consume_exactly(self, nbytes: int) -> Optional[bytes]: + if len(self.buffer) - self.bytes_used < nbytes: + return None + + return self.consume_at_most(nbytes) + + def commit(self) -> None: + # In CPython 3.4+, del[:n] is amortized O(n), *not* quadratic + del self.buffer[: self.bytes_used] + self.bytes_used = 0 + + def rollback(self) -> None: + self.bytes_used = 0 + + def __len__(self) -> int: + return len(self.buffer) + + +class MessageDecoder: + def __init__(self) -> None: + self.opcode: Optional[Opcode] = None + self.decoder: Optional[IncrementalDecoder] = None + + def process_frame(self, frame: Frame) -> Frame: + assert not frame.opcode.iscontrol() + + if self.opcode is None: + if frame.opcode is Opcode.CONTINUATION: + raise ParseFailed("unexpected CONTINUATION") + self.opcode = frame.opcode + elif frame.opcode is not Opcode.CONTINUATION: + raise ParseFailed("expected CONTINUATION, got %r" % frame.opcode) + + if frame.opcode is Opcode.TEXT: + self.decoder = getincrementaldecoder("utf-8")() + + finished = frame.frame_finished and frame.message_finished + + if self.decoder is None: + data = frame.payload + else: + assert isinstance(frame.payload, (bytes, bytearray)) + try: + data = self.decoder.decode(frame.payload, finished) + except UnicodeDecodeError as exc: + raise ParseFailed(str(exc), CloseReason.INVALID_FRAME_PAYLOAD_DATA) + + frame = Frame(self.opcode, data, frame.frame_finished, finished) + + if finished: + self.opcode = None + self.decoder = None + + return frame + + +class FrameDecoder: + def __init__( + self, client: bool, extensions: Optional[List["Extension"]] = None + ) -> None: + self.client = client + self.extensions = extensions or [] + + self.buffer = Buffer() + + self.header: Optional[Header] = None + self.effective_opcode: Optional[Opcode] = None + self.masker: Union[None, XorMaskerNull, XorMaskerSimple] = None + self.payload_required = 0 + self.payload_consumed = 0 + + def receive_bytes(self, data: bytes) -> None: + self.buffer.feed(data) + + def process_buffer(self) -> Optional[Frame]: + if not self.header: + if not self.parse_header(): + return None + # parse_header() sets these. + assert self.header is not None + assert self.masker is not None + assert self.effective_opcode is not None + + if len(self.buffer) < self.payload_required: + return None + + payload_remaining = self.header.payload_len - self.payload_consumed + payload = self.buffer.consume_at_most(payload_remaining) + if not payload and self.header.payload_len > 0: + return None + self.buffer.commit() + + self.payload_consumed += len(payload) + finished = self.payload_consumed == self.header.payload_len + + payload = self.masker.process(payload) + + for extension in self.extensions: + payload_ = extension.frame_inbound_payload_data(self, payload) + if isinstance(payload_, CloseReason): + raise ParseFailed("error in extension", payload_) + payload = payload_ + + if finished: + final = bytearray() + for extension in self.extensions: + result = extension.frame_inbound_complete(self, self.header.fin) + if isinstance(result, CloseReason): + raise ParseFailed("error in extension", result) + if result is not None: + final += result + payload += final + + frame = Frame(self.effective_opcode, payload, finished, self.header.fin) + + if finished: + self.header = None + self.effective_opcode = None + self.masker = None + else: + self.effective_opcode = Opcode.CONTINUATION + + return frame + + def parse_header(self) -> bool: + data = self.buffer.consume_exactly(2) + if data is None: + self.buffer.rollback() + return False + + fin = bool(data[0] & FIN_MASK) + rsv = RsvBits( + bool(data[0] & RSV1_MASK), + bool(data[0] & RSV2_MASK), + bool(data[0] & RSV3_MASK), + ) + opcode = data[0] & OPCODE_MASK + try: + opcode = Opcode(opcode) + except ValueError: + raise ParseFailed(f"Invalid opcode {opcode:#x}") + + if opcode.iscontrol() and not fin: + raise ParseFailed("Invalid attempt to fragment control frame") + + has_mask = bool(data[1] & MASK_MASK) + payload_len_short = data[1] & PAYLOAD_LEN_MASK + payload_len = self.parse_extended_payload_length(opcode, payload_len_short) + if payload_len is None: + self.buffer.rollback() + return False + + self.extension_processing(opcode, rsv, payload_len) + + if has_mask and self.client: + raise ParseFailed("client received unexpected masked frame") + if not has_mask and not self.client: + raise ParseFailed("server received unexpected unmasked frame") + if has_mask: + masking_key = self.buffer.consume_exactly(4) + if masking_key is None: + self.buffer.rollback() + return False + self.masker = XorMaskerSimple(masking_key) + else: + self.masker = XorMaskerNull() + + self.buffer.commit() + self.header = Header(fin, rsv, opcode, payload_len, None) + self.effective_opcode = self.header.opcode + if self.header.opcode.iscontrol(): + self.payload_required = payload_len + else: + self.payload_required = 0 + self.payload_consumed = 0 + return True + + def parse_extended_payload_length( + self, opcode: Opcode, payload_len: int + ) -> Optional[int]: + if opcode.iscontrol() and payload_len > MAX_PAYLOAD_NORMAL: + raise ParseFailed("Control frame with payload len > 125") + if payload_len == PAYLOAD_LENGTH_TWO_BYTE: + data = self.buffer.consume_exactly(2) + if data is None: + return None + (payload_len,) = struct.unpack("!H", data) + if payload_len <= MAX_PAYLOAD_NORMAL: + raise ParseFailed( + "Payload length used 2 bytes when 1 would have sufficed" + ) + elif payload_len == PAYLOAD_LENGTH_EIGHT_BYTE: + data = self.buffer.consume_exactly(8) + if data is None: + return None + (payload_len,) = struct.unpack("!Q", data) + if payload_len <= MAX_PAYLOAD_TWO_BYTE: + raise ParseFailed( + "Payload length used 8 bytes when 2 would have sufficed" + ) + if payload_len >> 63: + # I'm not sure why this is illegal, but that's what the RFC + # says, so... + raise ParseFailed("8-byte payload length with non-zero MSB") + + return payload_len + + def extension_processing( + self, opcode: Opcode, rsv: RsvBits, payload_len: int + ) -> None: + rsv_used = [False, False, False] + for extension in self.extensions: + result = extension.frame_inbound_header(self, opcode, rsv, payload_len) + if isinstance(result, CloseReason): + raise ParseFailed("error in extension", result) + for bit, used in enumerate(result): + if used: + rsv_used[bit] = True + for expected, found in zip(rsv_used, rsv): + if found and not expected: + raise ParseFailed("Reserved bit set unexpectedly") + + +class FrameProtocol: + def __init__(self, client: bool, extensions: List["Extension"]) -> None: + self.client = client + self.extensions = [ext for ext in extensions if ext.enabled()] + + # Global state + self._frame_decoder = FrameDecoder(self.client, self.extensions) + self._message_decoder = MessageDecoder() + self._parse_more = self._parse_more_gen() + + self._outbound_opcode: Optional[Opcode] = None + + def _process_close(self, frame: Frame) -> Frame: + data = frame.payload + assert isinstance(data, (bytes, bytearray)) + + if not data: + # "If this Close control frame contains no status code, _The + # WebSocket Connection Close Code_ is considered to be 1005" + data = (CloseReason.NO_STATUS_RCVD, "") + elif len(data) == 1: + raise ParseFailed("CLOSE with 1 byte payload") + else: + (code,) = struct.unpack("!H", data[:2]) + if code < MIN_CLOSE_REASON or code > MAX_CLOSE_REASON: + raise ParseFailed("CLOSE with invalid code") + try: + code = CloseReason(code) + except ValueError: + pass + if code in LOCAL_ONLY_CLOSE_REASONS: + raise ParseFailed("remote CLOSE with local-only reason") + if not isinstance(code, CloseReason) and code <= MAX_PROTOCOL_CLOSE_REASON: + raise ParseFailed("CLOSE with unknown reserved code") + try: + reason = data[2:].decode("utf-8") + except UnicodeDecodeError as exc: + raise ParseFailed( + "Error decoding CLOSE reason: " + str(exc), + CloseReason.INVALID_FRAME_PAYLOAD_DATA, + ) + data = (code, reason) + + return Frame(frame.opcode, data, frame.frame_finished, frame.message_finished) + + def _parse_more_gen(self) -> Generator[Optional[Frame], None, None]: + # Consume as much as we can from self._buffer, yielding events, and + # then yield None when we need more data. Or raise ParseFailed. + + # XX FIXME this should probably be refactored so that we never see + # disabled extensions in the first place... + self.extensions = [ext for ext in self.extensions if ext.enabled()] + closed = False + + while not closed: + frame = self._frame_decoder.process_buffer() + + if frame is not None: + if not frame.opcode.iscontrol(): + frame = self._message_decoder.process_frame(frame) + elif frame.opcode == Opcode.CLOSE: + frame = self._process_close(frame) + closed = True + + yield frame + + def receive_bytes(self, data: bytes) -> None: + self._frame_decoder.receive_bytes(data) + + def received_frames(self) -> Generator[Frame, None, None]: + for event in self._parse_more: + if event is None: + break + else: + yield event + + def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> bytes: + payload = bytearray() + if code is CloseReason.NO_STATUS_RCVD: + code = None + if code is None and reason: + raise TypeError("cannot specify a reason without a code") + if code in LOCAL_ONLY_CLOSE_REASONS: + code = CloseReason.NORMAL_CLOSURE + if code is not None: + payload += bytearray(struct.pack("!H", code)) + if reason is not None: + payload += _truncate_utf8( + reason.encode("utf-8"), MAX_PAYLOAD_NORMAL - 2 + ) + + return self._serialize_frame(Opcode.CLOSE, payload) + + def ping(self, payload: bytes = b"") -> bytes: + return self._serialize_frame(Opcode.PING, payload) + + def pong(self, payload: bytes = b"") -> bytes: + return self._serialize_frame(Opcode.PONG, payload) + + def send_data( + self, payload: Union[bytes, bytearray, str] = b"", fin: bool = True + ) -> bytes: + if isinstance(payload, (bytes, bytearray, memoryview)): + opcode = Opcode.BINARY + elif isinstance(payload, str): + opcode = Opcode.TEXT + payload = payload.encode("utf-8") + else: + raise ValueError("Must provide bytes or text") + + if self._outbound_opcode is None: + self._outbound_opcode = opcode + elif self._outbound_opcode is not opcode: + raise TypeError("Data type mismatch inside message") + else: + opcode = Opcode.CONTINUATION + + if fin: + self._outbound_opcode = None + + return self._serialize_frame(opcode, payload, fin) + + def _make_fin_rsv_opcode(self, fin: bool, rsv: RsvBits, opcode: Opcode) -> int: + fin_bits = int(fin) << 7 + rsv_bits = (int(rsv.rsv1) << 6) + (int(rsv.rsv2) << 5) + (int(rsv.rsv3) << 4) + opcode_bits = int(opcode) + + return fin_bits | rsv_bits | opcode_bits + + def _serialize_frame( + self, opcode: Opcode, payload: bytes = b"", fin: bool = True + ) -> bytes: + rsv = RsvBits(False, False, False) + for extension in reversed(self.extensions): + rsv, payload = extension.frame_outbound(self, opcode, rsv, payload, fin) + + fin_rsv_opcode = self._make_fin_rsv_opcode(fin, rsv, opcode) + + payload_length = len(payload) + quad_payload = False + if payload_length <= MAX_PAYLOAD_NORMAL: + first_payload = payload_length + second_payload = None + elif payload_length <= MAX_PAYLOAD_TWO_BYTE: + first_payload = PAYLOAD_LENGTH_TWO_BYTE + second_payload = payload_length + else: + first_payload = PAYLOAD_LENGTH_EIGHT_BYTE + second_payload = payload_length + quad_payload = True + + if self.client: + first_payload |= 1 << 7 + + header = bytearray([fin_rsv_opcode, first_payload]) + if second_payload is not None: + if opcode.iscontrol(): + raise ValueError("payload too long for control frame") + if quad_payload: + header += bytearray(struct.pack("!Q", second_payload)) + else: + header += bytearray(struct.pack("!H", second_payload)) + + if self.client: + # "The masking key is a 32-bit value chosen at random by the + # client. When preparing a masked frame, the client MUST pick a + # fresh masking key from the set of allowed 32-bit values. The + # masking key needs to be unpredictable; thus, the masking key + # MUST be derived from a strong source of entropy, and the masking + # key for a given frame MUST NOT make it simple for a server/proxy + # to predict the masking key for a subsequent frame. The + # unpredictability of the masking key is essential to prevent + # authors of malicious applications from selecting the bytes that + # appear on the wire." + # -- https://tools.ietf.org/html/rfc6455#section-5.3 + masking_key = os.urandom(4) + masker = XorMaskerSimple(masking_key) + return header + masking_key + masker.process(payload) + + return header + payload diff --git a/env/lib/python3.10/site-packages/wsproto/handshake.py b/env/lib/python3.10/site-packages/wsproto/handshake.py new file mode 100644 index 0000000..c456939 --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto/handshake.py @@ -0,0 +1,491 @@ +""" +wsproto/handshake +~~~~~~~~~~~~~~~~~~ + +An implementation of WebSocket handshakes. +""" +from collections import deque +from typing import ( + cast, + Deque, + Dict, + Generator, + Iterable, + List, + Optional, + Sequence, + Union, +) + +import h11 + +from .connection import Connection, ConnectionState, ConnectionType +from .events import AcceptConnection, Event, RejectConnection, RejectData, Request +from .extensions import Extension +from .typing import Headers +from .utilities import ( + generate_accept_token, + generate_nonce, + LocalProtocolError, + normed_header_dict, + RemoteProtocolError, + split_comma_header, +) + +# RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake +WEBSOCKET_VERSION = b"13" + + +class H11Handshake: + """A Handshake implementation for HTTP/1.1 connections.""" + + def __init__(self, connection_type: ConnectionType) -> None: + self.client = connection_type is ConnectionType.CLIENT + self._state = ConnectionState.CONNECTING + + if self.client: + self._h11_connection = h11.Connection(h11.CLIENT) + else: + self._h11_connection = h11.Connection(h11.SERVER) + + self._connection: Optional[Connection] = None + self._events: Deque[Event] = deque() + self._initiating_request: Optional[Request] = None + self._nonce: Optional[bytes] = None + + @property + def state(self) -> ConnectionState: + return self._state + + @property + def connection(self) -> Optional[Connection]: + """Return the established connection. + + This will either return the connection or raise a + LocalProtocolError if the connection has not yet been + established. + + :rtype: h11.Connection + """ + return self._connection + + def initiate_upgrade_connection( + self, headers: Headers, path: Union[bytes, str] + ) -> None: + """Initiate an upgrade connection. + + This should be used if the request has already be received and + parsed. + + :param list headers: HTTP headers represented as a list of 2-tuples. + :param str path: A URL path. + """ + if self.client: + raise LocalProtocolError( + "Cannot initiate an upgrade connection when acting as the client" + ) + upgrade_request = h11.Request(method=b"GET", target=path, headers=headers) + h11_client = h11.Connection(h11.CLIENT) + self.receive_data(h11_client.send(upgrade_request)) + + def send(self, event: Event) -> bytes: + """Send an event to the remote. + + This will return the bytes to send based on the event or raise + a LocalProtocolError if the event is not valid given the + state. + + :returns: Data to send to the WebSocket peer. + :rtype: bytes + """ + data = b"" + if isinstance(event, Request): + data += self._initiate_connection(event) + elif isinstance(event, AcceptConnection): + data += self._accept(event) + elif isinstance(event, RejectConnection): + data += self._reject(event) + elif isinstance(event, RejectData): + data += self._send_reject_data(event) + else: + raise LocalProtocolError( + f"Event {event} cannot be sent during the handshake" + ) + return data + + def receive_data(self, data: Optional[bytes]) -> None: + """Receive data from the remote. + + A list of events that the remote peer triggered by sending + this data can be retrieved with :meth:`events`. + + :param bytes data: Data received from the WebSocket peer. + """ + self._h11_connection.receive_data(data or b"") + while True: + try: + event = self._h11_connection.next_event() + except h11.RemoteProtocolError: + raise RemoteProtocolError( + "Bad HTTP message", event_hint=RejectConnection() + ) + if ( + isinstance(event, h11.ConnectionClosed) + or event is h11.NEED_DATA + or event is h11.PAUSED + ): + break + + if self.client: + if isinstance(event, h11.InformationalResponse): + if event.status_code == 101: + self._events.append(self._establish_client_connection(event)) + else: + self._events.append( + RejectConnection( + headers=list(event.headers), + status_code=event.status_code, + has_body=False, + ) + ) + self._state = ConnectionState.CLOSED + elif isinstance(event, h11.Response): + self._state = ConnectionState.REJECTING + self._events.append( + RejectConnection( + headers=list(event.headers), + status_code=event.status_code, + has_body=True, + ) + ) + elif isinstance(event, h11.Data): + self._events.append( + RejectData(data=event.data, body_finished=False) + ) + elif isinstance(event, h11.EndOfMessage): + self._events.append(RejectData(data=b"", body_finished=True)) + self._state = ConnectionState.CLOSED + else: + if isinstance(event, h11.Request): + self._events.append(self._process_connection_request(event)) + + def events(self) -> Generator[Event, None, None]: + """Return a generator that provides any events that have been generated + by protocol activity. + + :returns: a generator that yields H11 events. + """ + while self._events: + yield self._events.popleft() + + # Server mode methods + + def _process_connection_request( # noqa: MC0001 + self, event: h11.Request + ) -> Request: + if event.method != b"GET": + raise RemoteProtocolError( + "Request method must be GET", event_hint=RejectConnection() + ) + connection_tokens = None + extensions: List[str] = [] + host = None + key = None + subprotocols: List[str] = [] + upgrade = b"" + version = None + headers: Headers = [] + for name, value in event.headers: + name = name.lower() + if name == b"connection": + connection_tokens = split_comma_header(value) + elif name == b"host": + host = value.decode("idna") + continue # Skip appending to headers + elif name == b"sec-websocket-extensions": + extensions.extend(split_comma_header(value)) + continue # Skip appending to headers + elif name == b"sec-websocket-key": + key = value + elif name == b"sec-websocket-protocol": + subprotocols.extend(split_comma_header(value)) + continue # Skip appending to headers + elif name == b"sec-websocket-version": + version = value + elif name == b"upgrade": + upgrade = value + headers.append((name, value)) + if connection_tokens is None or not any( + token.lower() == "upgrade" for token in connection_tokens + ): + raise RemoteProtocolError( + "Missing header, 'Connection: Upgrade'", event_hint=RejectConnection() + ) + if version != WEBSOCKET_VERSION: + raise RemoteProtocolError( + "Missing header, 'Sec-WebSocket-Version'", + event_hint=RejectConnection( + headers=[(b"Sec-WebSocket-Version", WEBSOCKET_VERSION)], + status_code=426 if version else 400, + ), + ) + if key is None: + raise RemoteProtocolError( + "Missing header, 'Sec-WebSocket-Key'", event_hint=RejectConnection() + ) + if upgrade.lower() != b"websocket": + raise RemoteProtocolError( + "Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection() + ) + if host is None: + raise RemoteProtocolError( + "Missing header, 'Host'", event_hint=RejectConnection() + ) + + self._initiating_request = Request( + extensions=extensions, + extra_headers=headers, + host=host, + subprotocols=subprotocols, + target=event.target.decode("ascii"), + ) + return self._initiating_request + + def _accept(self, event: AcceptConnection) -> bytes: + # _accept is always called after _process_connection_request. + assert self._initiating_request is not None + request_headers = normed_header_dict(self._initiating_request.extra_headers) + + nonce = request_headers[b"sec-websocket-key"] + accept_token = generate_accept_token(nonce) + + headers = [ + (b"Upgrade", b"WebSocket"), + (b"Connection", b"Upgrade"), + (b"Sec-WebSocket-Accept", accept_token), + ] + + if event.subprotocol is not None: + if event.subprotocol not in self._initiating_request.subprotocols: + raise LocalProtocolError(f"unexpected subprotocol {event.subprotocol}") + headers.append( + (b"Sec-WebSocket-Protocol", event.subprotocol.encode("ascii")) + ) + + if event.extensions: + accepts = server_extensions_handshake( + cast(Sequence[str], self._initiating_request.extensions), + event.extensions, + ) + if accepts: + headers.append((b"Sec-WebSocket-Extensions", accepts)) + + response = h11.InformationalResponse( + status_code=101, headers=headers + event.extra_headers + ) + self._connection = Connection( + ConnectionType.CLIENT if self.client else ConnectionType.SERVER, + event.extensions, + ) + self._state = ConnectionState.OPEN + return self._h11_connection.send(response) or b"" + + def _reject(self, event: RejectConnection) -> bytes: + if self.state != ConnectionState.CONNECTING: + raise LocalProtocolError( + "Connection cannot be rejected in state %s" % self.state + ) + + headers = list(event.headers) + if not event.has_body: + headers.append((b"content-length", b"0")) + response = h11.Response(status_code=event.status_code, headers=headers) + data = self._h11_connection.send(response) or b"" + self._state = ConnectionState.REJECTING + if not event.has_body: + data += self._h11_connection.send(h11.EndOfMessage()) or b"" + self._state = ConnectionState.CLOSED + return data + + def _send_reject_data(self, event: RejectData) -> bytes: + if self.state != ConnectionState.REJECTING: + raise LocalProtocolError( + f"Cannot send rejection data in state {self.state}" + ) + + data = self._h11_connection.send(h11.Data(data=event.data)) or b"" + if event.body_finished: + data += self._h11_connection.send(h11.EndOfMessage()) or b"" + self._state = ConnectionState.CLOSED + return data + + # Client mode methods + + def _initiate_connection(self, request: Request) -> bytes: + self._initiating_request = request + self._nonce = generate_nonce() + + headers = [ + (b"Host", request.host.encode("idna")), + (b"Upgrade", b"WebSocket"), + (b"Connection", b"Upgrade"), + (b"Sec-WebSocket-Key", self._nonce), + (b"Sec-WebSocket-Version", WEBSOCKET_VERSION), + ] + + if request.subprotocols: + headers.append( + ( + b"Sec-WebSocket-Protocol", + (", ".join(request.subprotocols)).encode("ascii"), + ) + ) + + if request.extensions: + offers: Dict[str, Union[str, bool]] = {} + for e in request.extensions: + assert isinstance(e, Extension) + offers[e.name] = e.offer() + extensions = [] + for name, params in offers.items(): + bname = name.encode("ascii") + if isinstance(params, bool): + if params: + extensions.append(bname) + else: + extensions.append(b"%s; %s" % (bname, params.encode("ascii"))) + if extensions: + headers.append((b"Sec-WebSocket-Extensions", b", ".join(extensions))) + + upgrade = h11.Request( + method=b"GET", + target=request.target.encode("ascii"), + headers=headers + request.extra_headers, + ) + return self._h11_connection.send(upgrade) or b"" + + def _establish_client_connection( + self, event: h11.InformationalResponse + ) -> AcceptConnection: # noqa: MC0001 + # _establish_client_connection is always called after _initiate_connection. + assert self._initiating_request is not None + assert self._nonce is not None + + accept = None + connection_tokens = None + accepts: List[str] = [] + subprotocol = None + upgrade = b"" + headers: Headers = [] + for name, value in event.headers: + name = name.lower() + if name == b"connection": + connection_tokens = split_comma_header(value) + continue # Skip appending to headers + elif name == b"sec-websocket-extensions": + accepts = split_comma_header(value) + continue # Skip appending to headers + elif name == b"sec-websocket-accept": + accept = value + continue # Skip appending to headers + elif name == b"sec-websocket-protocol": + subprotocol = value.decode("ascii") + continue # Skip appending to headers + elif name == b"upgrade": + upgrade = value + continue # Skip appending to headers + headers.append((name, value)) + + if connection_tokens is None or not any( + token.lower() == "upgrade" for token in connection_tokens + ): + raise RemoteProtocolError( + "Missing header, 'Connection: Upgrade'", event_hint=RejectConnection() + ) + if upgrade.lower() != b"websocket": + raise RemoteProtocolError( + "Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection() + ) + accept_token = generate_accept_token(self._nonce) + if accept != accept_token: + raise RemoteProtocolError("Bad accept token", event_hint=RejectConnection()) + if subprotocol is not None: + if subprotocol not in self._initiating_request.subprotocols: + raise RemoteProtocolError( + f"unrecognized subprotocol {subprotocol}", + event_hint=RejectConnection(), + ) + extensions = client_extensions_handshake( + accepts, cast(Sequence[Extension], self._initiating_request.extensions) + ) + + self._connection = Connection( + ConnectionType.CLIENT if self.client else ConnectionType.SERVER, + extensions, + self._h11_connection.trailing_data[0], + ) + self._state = ConnectionState.OPEN + return AcceptConnection( + extensions=extensions, extra_headers=headers, subprotocol=subprotocol + ) + + def __repr__(self) -> str: + return "{}(client={}, state={})".format( + self.__class__.__name__, self.client, self.state + ) + + +def server_extensions_handshake( + requested: Iterable[str], supported: List[Extension] +) -> Optional[bytes]: + """Agree on the extensions to use returning an appropriate header value. + + This returns None if there are no agreed extensions + """ + accepts: Dict[str, Union[bool, bytes]] = {} + for offer in requested: + name = offer.split(";", 1)[0].strip() + for extension in supported: + if extension.name == name: + accept = extension.accept(offer) + if isinstance(accept, bool): + if accept: + accepts[extension.name] = True + elif accept is not None: + accepts[extension.name] = accept.encode("ascii") + + if accepts: + extensions: List[bytes] = [] + for name, params in accepts.items(): + name_bytes = name.encode("ascii") + if isinstance(params, bool): + assert params + extensions.append(name_bytes) + else: + if params == b"": + extensions.append(b"%s" % (name_bytes)) + else: + extensions.append(b"%s; %s" % (name_bytes, params)) + return b", ".join(extensions) + + return None + + +def client_extensions_handshake( + accepted: Iterable[str], supported: Sequence[Extension] +) -> List[Extension]: + # This raises RemoteProtocolError is the accepted extension is not + # supported. + extensions = [] + for accept in accepted: + name = accept.split(";", 1)[0].strip() + for extension in supported: + if extension.name == name: + extension.finalize(accept) + extensions.append(extension) + break + else: + raise RemoteProtocolError( + f"unrecognized extension {name}", event_hint=RejectConnection() + ) + return extensions diff --git a/env/lib/python3.10/site-packages/wsproto/py.typed b/env/lib/python3.10/site-packages/wsproto/py.typed new file mode 100644 index 0000000..f5642f7 --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto/py.typed @@ -0,0 +1 @@ +Marker diff --git a/env/lib/python3.10/site-packages/wsproto/typing.py b/env/lib/python3.10/site-packages/wsproto/typing.py new file mode 100644 index 0000000..a44b27e --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto/typing.py @@ -0,0 +1,3 @@ +from typing import List, Tuple + +Headers = List[Tuple[bytes, bytes]] diff --git a/env/lib/python3.10/site-packages/wsproto/utilities.py b/env/lib/python3.10/site-packages/wsproto/utilities.py new file mode 100644 index 0000000..7cf53d1 --- /dev/null +++ b/env/lib/python3.10/site-packages/wsproto/utilities.py @@ -0,0 +1,88 @@ +""" +wsproto/utilities +~~~~~~~~~~~~~~~~~ + +Utility functions that do not belong in a separate module. +""" +import base64 +import hashlib +import os +from typing import Dict, List, Optional, Union + +from h11._headers import Headers as H11Headers + +from .events import Event +from .typing import Headers + +# RFC6455, Section 1.3 - Opening Handshake +ACCEPT_GUID = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + +class ProtocolError(Exception): + pass + + +class LocalProtocolError(ProtocolError): + """Indicates an error due to local/programming errors. + + This is raised when the connection is asked to do something that + is either incompatible with the state or the websocket standard. + + """ + + pass # noqa + + +class RemoteProtocolError(ProtocolError): + """Indicates an error due to the remote's actions. + + This is raised when processing the bytes from the remote if the + remote has sent data that is incompatible with the websocket + standard. + + .. attribute:: event_hint + + This is a suggested wsproto Event to send to the client based + on the error. It could be None if no hint is available. + + """ + + def __init__(self, message: str, event_hint: Optional[Event] = None) -> None: + self.event_hint = event_hint + super().__init__(message) + + +# Some convenience utilities for working with HTTP headers +def normed_header_dict(h11_headers: Union[Headers, H11Headers]) -> Dict[bytes, bytes]: + # This mangles Set-Cookie headers. But it happens that we don't care about + # any of those, so it's OK. For every other HTTP header, if there are + # multiple instances then you're allowed to join them together with + # commas. + name_to_values: Dict[bytes, List[bytes]] = {} + for name, value in h11_headers: + name_to_values.setdefault(name, []).append(value) + name_to_normed_value = {} + for name, values in name_to_values.items(): + name_to_normed_value[name] = b", ".join(values) + return name_to_normed_value + + +# We use this for parsing the proposed protocol list, and for parsing the +# proposed and accepted extension lists. For the proposed protocol list it's +# fine, because the ABNF is just 1#token. But for the extension lists, it's +# wrong, because those can contain quoted strings, which can in turn contain +# commas. XX FIXME +def split_comma_header(value: bytes) -> List[str]: + return [piece.decode("ascii").strip() for piece in value.split(b",")] + + +def generate_nonce() -> bytes: + # os.urandom may be overkill for this use case, but I don't think this + # is a bottleneck, and better safe than sorry... + return base64.b64encode(os.urandom(16)) + + +def generate_accept_token(token: bytes) -> bytes: + accept_token = token + ACCEPT_GUID + accept_token = hashlib.sha1(accept_token).digest() + return base64.b64encode(accept_token) diff --git a/evolutionapi/services/websocket.py b/evolutionapi/services/websocket.py new file mode 100644 index 0000000..d3e89af --- /dev/null +++ b/evolutionapi/services/websocket.py @@ -0,0 +1,135 @@ +import socketio +from typing import Callable, Dict, Any +import logging +import ssl +import time +from typing import Optional + +class WebSocketManager: + def __init__(self, base_url: str, instance_id: str, api_token: str, max_retries: int = 5, retry_delay: float = 1.0): + """ + Inicializa o gerenciador de WebSocket + + Args: + base_url (str): URL base da API + instance_id (str): ID da instância + api_token (str): Token de autenticação da API + max_retries (int): Número máximo de tentativas de reconexão + retry_delay (float): Delay inicial entre tentativas em segundos + """ + self.base_url = base_url.rstrip('/') + self.instance_id = instance_id + self.api_token = api_token + self.max_retries = max_retries + self.retry_delay = retry_delay + self.retry_count = 0 + self.should_reconnect = True + + # Configuração do Socket.IO + self.sio = socketio.Client( + ssl_verify=False, # Para desenvolvimento local + logger=False, + engineio_logger=False, + request_timeout=30 + ) + + # Configura o logger da classe para INFO + self.logger = logging.getLogger(__name__) + self.logger.setLevel(logging.INFO) + + # Dicionário para armazenar os handlers registrados + self._handlers = {} + + # Configuração dos handlers de eventos + self.sio.on('connect', self._on_connect) + self.sio.on('disconnect', self._on_disconnect) + self.sio.on('error', self._on_error) + + # Registra o handler global no namespace específico da instância + self.sio.on('*', self._handle_event, namespace=f'/{self.instance_id}') + + def _on_connect(self): + """Handler para evento de conexão""" + self.logger.info("Socket.IO conectado") + self.retry_count = 0 # Reseta o contador de retry após conexão bem-sucedida + + def _on_disconnect(self): + """Handler para evento de desconexão""" + self.logger.warning(f"Socket.IO desconectado. Tentativa {self.retry_count + 1}/{self.max_retries}") + if self.should_reconnect and self.retry_count < self.max_retries: + self._attempt_reconnect() + else: + self.logger.error("Número máximo de tentativas de reconexão atingido") + + def _on_error(self, error): + """Handler para eventos de erro""" + self.logger.error(f"Erro no Socket.IO: {str(error)}", exc_info=True) + + def _attempt_reconnect(self): + """Tenta reconectar com backoff exponencial""" + try: + delay = self.retry_delay * (2 ** self.retry_count) # Backoff exponencial + self.logger.info(f"Tentando reconectar em {delay:.2f} segundos...") + time.sleep(delay) + self.connect() + self.retry_count += 1 + except Exception as e: + self.logger.error(f"Erro durante tentativa de reconexão: {str(e)}", exc_info=True) + if self.retry_count < self.max_retries: + self._attempt_reconnect() + else: + self.logger.error("Todas as tentativas de reconexão falharam") + + def _handle_event(self, event, *args): + """Handler global para todos os eventos""" + # Só processa eventos que foram registrados + if event in self._handlers: + self.logger.debug(f"Evento recebido no namespace /{self.instance_id}: {event}") + self.logger.debug(f"Dados do evento: {args}") + + try: + # Extrai os dados do evento + raw_data = args[0] if args else {} + + # Garante que estamos passando o objeto correto para o callback + if isinstance(raw_data, dict): + self.logger.debug(f"Chamando handler para {event} com dados: {raw_data}") + self._handlers[event](raw_data) + else: + self.logger.error(f"Dados inválidos recebidos para evento {event}: {raw_data}") + except Exception as e: + self.logger.error(f"Erro ao processar evento {event}: {str(e)}", exc_info=True) + + def connect(self): + """Conecta ao servidor Socket.IO""" + try: + # Conecta apenas ao namespace da instância com o header de autenticação + self.sio.connect( + f"{self.base_url}?apikey={self.api_token}", + transports=['websocket'], + namespaces=[f'/{self.instance_id}'], + wait_timeout=30 + ) + + # Entra na sala específica da instância + self.sio.emit('subscribe', {'instance': self.instance_id}, namespace=f'/{self.instance_id}') + + except Exception as e: + self.logger.error(f"Erro ao conectar ao Socket.IO: {str(e)}", exc_info=True) + raise + + def disconnect(self): + """Desconecta do servidor Socket.IO""" + self.should_reconnect = False # Impede tentativas de reconexão + if self.sio.connected: + self.sio.disconnect() + + def on(self, event: str, callback: Callable): + """ + Registra um callback para um evento específico + + Args: + event (str): Nome do evento + callback (Callable): Função a ser chamada quando o evento ocorrer + """ + self._handlers[event] = callback \ No newline at end of file diff --git a/setup.py b/setup.py index 2872aa8..dae0359 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,8 @@ setup( include_package_data=True, install_requires=[ 'requests>=2.25.1', - 'requests_toolbelt>=1.0.0' + 'requests_toolbelt>=1.0.0', + 'python-socketio>=5.11.1' ], python_requires='>=3.6', ) diff --git a/test_evolution.py b/test_evolution.py index 5652156..10f3a0c 100644 --- a/test_evolution.py +++ b/test_evolution.py @@ -1,7 +1,16 @@ from evolutionapi.client import EvolutionClient from evolutionapi.models.instance import InstanceConfig from evolutionapi.models.message import TextMessage, MediaMessage, MediaType +from evolutionapi.services.websocket import WebSocketManager +import time +import logging +# Configuração do logging +logging.basicConfig( + level=logging.DEBUG, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) print("Iniciando cliente") @@ -10,9 +19,58 @@ client = EvolutionClient( api_token='429683C4C977415CAAFCCE10F7D57E11' ) +instance_token = "82D55E57CBBC-48A5-98FB-E99655AE7148" +instance_id = "teste" -instance_token = "60BDC703E413-4710-9473-CA2A763866FE" -instance_id = "94a0aafa-e636-4534-a185-5562bf8f2c22" +# Inicializando o WebSocket +websocket_manager = WebSocketManager( + base_url='http://localhost:8081', + instance_id=instance_id, + api_token=instance_token +) + +def on_message(data): + """Handler para evento de mensagens""" + try: + if 'data' in data: + message_data = data['data'] + logger.info("=== Mensagem Recebida ===") + logger.info(f"De: {message_data['key']['remoteJid']}") + logger.info(f"Tipo: {message_data['messageType']}") + + # Extrai o conteúdo baseado no tipo da mensagem + if 'message' in message_data: + if 'conversation' in message_data['message']: + logger.info(f"Conteúdo: {message_data['message']['conversation']}") + elif 'extendedTextMessage' in message_data['message']: + logger.info(f"Conteúdo: {message_data['message']['extendedTextMessage']['text']}") + elif 'imageMessage' in message_data['message']: + logger.info(f"Conteúdo: [Imagem] {message_data['message']['imageMessage'].get('caption', '')}") + else: + logger.info(f"Conteúdo: {message_data['message']}") + + logger.info("=======================") + except Exception as e: + logger.error(f"Erro ao processar mensagem: {e}", exc_info=True) + +logger.info("Registrando handlers de eventos...") + +# Registrando handlers de eventos +websocket_manager.on('messages.upsert', on_message) + +try: + logger.info("Iniciando conexão WebSocket...") + # Conectando ao WebSocket + websocket_manager.connect() + + # Mantendo o programa rodando para receber eventos + logger.info("Aguardando eventos...") + while True: + time.sleep(1) +except KeyboardInterrupt: + logger.info("Encerrando conexão WebSocket...") +finally: + websocket_manager.disconnect() # response = client.group.fetch_all_groups(instance_id, instance_token, False) @@ -94,17 +152,17 @@ instance_id = "94a0aafa-e636-4534-a185-5562bf8f2c22" # print("Instância deletada") # print(delete_instance) -# group_id = "120363024931487276@g.us" +# group_id = "120363026465248932@g.us" # # Buscando as 3 últimas mensagens do grupo # mensagens = client.chat.get_messages( # instance_id=instance_id, # remote_jid=group_id, # instance_token=instance_token, -# timestamp_start="2024-12-27T00:00:00Z", -# timestamp_end="2024-12-27T23:59:59Z", +# timestamp_start="2025-01-16T00:00:00Z", +# timestamp_end="2025-01-16T23:59:59Z", # page=1, -# offset=3 +# offset=10 # ) # print("Mensagens encontradas:")