diff --git a/.drone.yml b/.drone.yml index 653a8b7..4ad799e 100644 --- a/.drone.yml +++ b/.drone.yml @@ -1,19 +1,19 @@ --- -kind: pipeline -type: exec -name: default +# kind: pipeline +# type: exec +# name: default -platform: - os: linux - arch: amd64 +# platform: +# os: linux +# arch: amd64 -clone: - # убрано так как сейчас не тестим ничего предварительно а сразу тянем в директорию - disable: true +# clone: +# # убрано так как сейчас не тестим ничего предварительно а сразу тянем в директорию +# disable: true -steps: -- name: pull into the folduh - commands: - - cd /etc/ansible - - git pull origin some-kind-of-lobster \ No newline at end of file +# steps: +# - name: pull into the folduh +# commands: +# - cd /etc/ansible +# - git pull origin some-kind-of-lobster \ No newline at end of file diff --git a/.gitignore b/.gitignore index 1fa222a..dcb25f8 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ .vaulto +asdf \ No newline at end of file diff --git a/ansible.cfg b/ansible.cfg index 42749e3..e820ee7 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,4 +1,15 @@ +#### export ANSIBLE_CONFIG=./ansible.cfg + [defaults] +gathering = smart +fact_caching = jsonfile +fact_caching_connection = /tmp/facts_cache +# two hours timeout +fact_caching_timeout = 7200 + + +interpreter_python = auto_silent +ansible_python_interpreter = auto_silent # Use the YAML callback plugin. stdout_callback = yaml # Use the stdout_callback when running ad-hoc commands. @@ -6,12 +17,28 @@ bin_ansible_callbacks = True host_key_checking = false -vault_password_file = /etc/ansible/.vaulto +#vault_password_file = /etc/ansible/.vaulto +vault_password_file = /tmp/.vaulto -callback_plugins = /etc/ansible/plugins/callback -callback_whitelist = telegram -callbacks_enabled = telegram +# callback_plugins = /etc/ansible/plugins/callback +# callback_whitelist = telegram +# callbacks_enabled = telegram -[callback_telegram] -tg_token = 6472915685:AAHPvgrQoqG7DxtfbnHWPe3Lfild-CGJ1j8 -tg_chat_id = -4023350326 \ No newline at end of file +strategy_plugins = mitogen-0.3.9/ansible_mitogen/plugins/strategy +strategy = mitogen_linear + +#### TODO чому-то не делается +roles_path = roles:internal_roles +# # [callback_telegram] +# # tg_token = 6472915685:AAHPvgrQoqG7DxtfbnHWPe3Lfild-CGJ1j8 +# # tg_chat_id = -4023350326 + + +# добавление юзера +# useradd -m hogweed1 -s /usr/bin/bash +# passwd hogweed1 +# sudo adduser hogweed1 sudo + +[ssh_connection] +# Enable pipelining, requires disabling requiretty in sudoers +pipelining = True \ No newline at end of file diff --git a/environments/just-created/group_vars/all/ssh-creds.yml b/environments/just-created/group_vars/all/ssh-creds.yml new file mode 100644 index 0000000..322f857 --- /dev/null +++ b/environments/just-created/group_vars/all/ssh-creds.yml @@ -0,0 +1,5 @@ +--- +ansible_ssh_user: root +ansible_ssh_pass: admin +ansible_sudo_pass: admin +ansible_ssh_private_key_file: '/home/hogweed1/id25519.key' \ No newline at end of file diff --git a/environments/just-created/group_vars/lxc.yml b/environments/just-created/group_vars/lxc.yml new file mode 100644 index 0000000..e907b26 --- /dev/null +++ b/environments/just-created/group_vars/lxc.yml @@ -0,0 +1,5 @@ +--- +ansible_ssh_user: hogweed1 +ansible_ssh_pass: coloredhorses +ansible_sudo_pass: coloredhorses +ansible_ssh_private_key_file: '/home/hogweed1/id25519.key' \ No newline at end of file diff --git a/environments/just-created/hosts.yml b/environments/just-created/hosts.yml new file mode 100644 index 0000000..a37d47c --- /dev/null +++ b/environments/just-created/hosts.yml @@ -0,0 +1,11 @@ +--- +# all: # keys must be unique, i.e. only one 'hosts' per group +# hosts: +# #nexus.guaranteedstruggle.host: +# #printing-slut.guaranteedstruggle.host: +# harbor.guaranteedstruggle.host: + +lxc: # keys must be unique, i.e. only one 'hosts' per group + hosts: + ### but its a vm wtf + harbor.guaranteedstruggle.host: \ No newline at end of file diff --git a/environments/proxmoxes/group_vars/all/all.yml b/environments/proxmoxes/group_vars/all/all.yml new file mode 100644 index 0000000..48b125b --- /dev/null +++ b/environments/proxmoxes/group_vars/all/all.yml @@ -0,0 +1,2 @@ +# отключаем ворнинг с митогена - https://github.com/mitogen-hq/mitogen/issues/740#issuecomment-731513058 +ansible_python_interpreter: /usr/bin/python3 \ No newline at end of file diff --git a/environments/proxmoxes/group_vars/all/ssh-creds.yml b/environments/proxmoxes/group_vars/all/ssh-creds.yml new file mode 100644 index 0000000..b5cc034 --- /dev/null +++ b/environments/proxmoxes/group_vars/all/ssh-creds.yml @@ -0,0 +1,4 @@ +--- +ansible_ssh_user: hogweed1 +ansible_ssh_pass: coloredhorses +ansible_sudo_pass: coloredhorses \ No newline at end of file diff --git a/environments/proxmoxes/hosts.yml b/environments/proxmoxes/hosts.yml new file mode 100644 index 0000000..d8edfc3 --- /dev/null +++ b/environments/proxmoxes/hosts.yml @@ -0,0 +1,55 @@ +--- +physical_machines: + hosts: + cyberbully.guaranteedstruggle.host: + gpu-slut.guaranteedstruggle.host: + children: + proxmoxes: + +proxmoxes: # keys must be unique, i.e. only one 'hosts' per group + hosts: + king-albert.guaranteedstruggle.host: + children: + semyons: + +semyons: # keys must be unique, i.e. only one 'hosts' per group + hosts: + semyon-0x01.guaranteedstruggle.host: + semyon-0x02.guaranteedstruggle.host: + semyon-0x03.guaranteedstruggle.host: + semyon-0x04.guaranteedstruggle.host: + semyon-0x05.guaranteedstruggle.host: +vms: + children: + printer: + kubernetes: + docker: + +docker: + hosts: + swarm-node1.guaranteedstruggle.host: + swarm-node2.guaranteedstruggle.host: + swarm-node3.guaranteedstruggle.host: + + harbor.guaranteedstruggle.host: + +kubernetes: + hosts: + rke2-master1.guaranteedstruggle.host: + rke2-master2.guaranteedstruggle.host: + rke2-master3.guaranteedstruggle.host: + rke2-worker1.guaranteedstruggle.host: + rke2-worker2.guaranteedstruggle.host: + rke2-worker3.guaranteedstruggle.host: + rke2-worker4.guaranteedstruggle.host: + rke2-worker5.guaranteedstruggle.host: + + k3s-rancher.guaranteedstruggle.host: + k3s-awx.guaranteedstruggle.host: + +printer: + hosts: + printing-slut.guaranteedstruggle.host: + +#### TODO +# lxc: diff --git a/mitogen-0.3.9.tar.gz b/mitogen-0.3.9.tar.gz new file mode 100644 index 0000000..d2a6e2e Binary files /dev/null and b/mitogen-0.3.9.tar.gz differ diff --git a/mitogen-0.3.9/LICENSE b/mitogen-0.3.9/LICENSE new file mode 100644 index 0000000..62ef15d --- /dev/null +++ b/mitogen-0.3.9/LICENSE @@ -0,0 +1,26 @@ +Copyright 2021, the Mitogen authors + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mitogen-0.3.9/MANIFEST.in b/mitogen-0.3.9/MANIFEST.in new file mode 100644 index 0000000..1aba38f --- /dev/null +++ b/mitogen-0.3.9/MANIFEST.in @@ -0,0 +1 @@ +include LICENSE diff --git a/mitogen-0.3.9/PKG-INFO b/mitogen-0.3.9/PKG-INFO new file mode 100644 index 0000000..e324817 --- /dev/null +++ b/mitogen-0.3.9/PKG-INFO @@ -0,0 +1,39 @@ +Metadata-Version: 2.1 +Name: mitogen +Version: 0.3.9 +Summary: Library for writing distributed self-replicating programs. +Home-page: https://github.com/mitogen-hq/mitogen/ +Author: David Wilson +License: New BSD +Classifier: Environment :: Console +Classifier: Framework :: Ansible +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: System :: Distributed Computing +Classifier: Topic :: System :: Systems Administration +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.* +Description-Content-Type: text/markdown +License-File: LICENSE + +# Mitogen + +Please see the documentation. + +![](https://i.imgur.com/eBM6LhJ.gif) + +[![Total alerts](https://img.shields.io/lgtm/alerts/g/mitogen-hq/mitogen.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/mitogen-hq/mitogen/alerts/) + +[![Build Status](https://dev.azure.com/mitogen-hq/mitogen/_apis/build/status/mitogen-hq.mitogen?branchName=master)](https://dev.azure.com/mitogen-hq/mitogen/_build/latest?definitionId=1&branchName=master) diff --git a/mitogen-0.3.9/README.md b/mitogen-0.3.9/README.md new file mode 100644 index 0000000..0d4d1b3 --- /dev/null +++ b/mitogen-0.3.9/README.md @@ -0,0 +1,9 @@ +# Mitogen + +Please see the documentation. + +![](https://i.imgur.com/eBM6LhJ.gif) + +[![Total alerts](https://img.shields.io/lgtm/alerts/g/mitogen-hq/mitogen.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/mitogen-hq/mitogen/alerts/) + +[![Build Status](https://dev.azure.com/mitogen-hq/mitogen/_apis/build/status/mitogen-hq.mitogen?branchName=master)](https://dev.azure.com/mitogen-hq/mitogen/_build/latest?definitionId=1&branchName=master) diff --git a/mitogen-0.3.9/ansible_mitogen/__init__.py b/mitogen-0.3.9/ansible_mitogen/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/__init__.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000..109c61a Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/__init__.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/affinity.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/affinity.cpython-310.pyc new file mode 100644 index 0000000..3a3095e Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/affinity.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/connection.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000..c77892c Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/connection.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/loaders.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/loaders.cpython-310.pyc new file mode 100644 index 0000000..cb88f93 Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/loaders.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/logging.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000..4933db2 Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/logging.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/mixins.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/mixins.cpython-310.pyc new file mode 100644 index 0000000..385b339 Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/mixins.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/module_finder.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/module_finder.cpython-310.pyc new file mode 100644 index 0000000..d582354 Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/module_finder.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/parsing.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/parsing.cpython-310.pyc new file mode 100644 index 0000000..43256c7 Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/parsing.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/planner.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/planner.cpython-310.pyc new file mode 100644 index 0000000..2c36c82 Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/planner.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/process.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/process.cpython-310.pyc new file mode 100644 index 0000000..93e7dff Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/process.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/runner.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/runner.cpython-310.pyc new file mode 100644 index 0000000..9f69ca8 Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/runner.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/services.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/services.cpython-310.pyc new file mode 100644 index 0000000..1fcb9b6 Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/services.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/strategy.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/strategy.cpython-310.pyc new file mode 100644 index 0000000..e4bf582 Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/strategy.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/target.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/target.cpython-310.pyc new file mode 100644 index 0000000..68d3c0e Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/target.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/__pycache__/transport_config.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/__pycache__/transport_config.cpython-310.pyc new file mode 100644 index 0000000..9da3bac Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/__pycache__/transport_config.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/affinity.py b/mitogen-0.3.9/ansible_mitogen/affinity.py new file mode 100644 index 0000000..635ee7b --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/affinity.py @@ -0,0 +1,288 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +As Mitogen separates asynchronous IO out to a broker thread, communication +necessarily involves context switching and waking that thread. When application +threads and the broker share a CPU, this can be almost invisibly fast - around +25 microseconds for a full A->B->A round-trip. + +However when threads are scheduled on different CPUs, round-trip delays +regularly vary wildly, and easily into milliseconds. Many contributing factors +exist, not least scenarios like: + +1. A is preempted immediately after waking B, but before releasing the GIL. +2. B wakes from IO wait only to immediately enter futex wait. +3. A may wait 10ms or more for another timeslice, as the scheduler on its CPU + runs threads unrelated to its transaction (i.e. not B), wake only to release + its GIL, before entering IO sleep waiting for a reply from B, which cannot + exist yet. +4. B wakes, acquires GIL, performs work, and sends reply to A, causing it to + wake. B is preempted before releasing GIL. +5. A wakes from IO wait only to immediately enter futex wait. +6. B may wait 10ms or more for another timeslice, wake only to release its GIL, + before sleeping again. +7. A wakes, acquires GIL, finally receives reply. + +Per above if we are unlucky, on an even moderately busy machine it is possible +to lose milliseconds just in scheduling delay, and the effect is compounded +when pairs of threads in process A are communicating with pairs of threads in +process B using the same scheme, such as when Ansible WorkerProcess is +communicating with ContextService in the connection multiplexer. In the worst +case it could involve 4 threads working in lockstep spread across 4 busy CPUs. + +Since multithreading in Python is essentially useless except for waiting on IO +due to the presence of the GIL, at least in Ansible there is no good reason for +threads in the same process to run on distinct CPUs - they always operate in +lockstep due to the GIL, and are thus vulnerable to issues like above. + +Linux lacks any natural API to describe what we want, it only permits +individual threads to be constrained to run on specific CPUs, and for that +constraint to be inherited by new threads and forks of the constrained thread. + +This module therefore implements a CPU pinning policy for Ansible processes, +providing methods that should be called early in any new process, either to +rebalance which CPU it is pinned to, or in the case of subprocesses, to remove +the pinning entirely. It is likely to require ongoing tweaking, since pinning +necessarily involves preventing the scheduler from making load balancing +decisions. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import ctypes +import logging +import mmap +import multiprocessing +import os +import struct + +import mitogen.core +import mitogen.parent + + +LOG = logging.getLogger(__name__) + + +try: + _libc = ctypes.CDLL(None, use_errno=True) + _strerror = _libc.strerror + _strerror.restype = ctypes.c_char_p + _sem_init = _libc.sem_init + _sem_wait = _libc.sem_wait + _sem_post = _libc.sem_post + _sched_setaffinity = _libc.sched_setaffinity +except (OSError, AttributeError): + _libc = None + _strerror = None + _sem_init = None + _sem_wait = None + _sem_post = None + _sched_setaffinity = None + + +class sem_t(ctypes.Structure): + """ + Wrap sem_t to allow storing a lock in shared memory. + """ + _fields_ = [ + ('data', ctypes.c_uint8 * 128), + ] + + def init(self): + if _sem_init(self.data, 1, 1): + raise Exception(_strerror(ctypes.get_errno())) + + def acquire(self): + if _sem_wait(self.data): + raise Exception(_strerror(ctypes.get_errno())) + + def release(self): + if _sem_post(self.data): + raise Exception(_strerror(ctypes.get_errno())) + + +class State(ctypes.Structure): + """ + Contents of shared memory segment. This allows :meth:`Manager.assign` to be + called from any child, since affinity assignment must happen from within + the context of the new child process. + """ + _fields_ = [ + ('lock', sem_t), + ('counter', ctypes.c_uint8), + ] + + +class Policy(object): + """ + Process affinity policy. + """ + def assign_controller(self): + """ + Assign the Ansible top-level policy to this process. + """ + + def assign_muxprocess(self, index): + """ + Assign the MuxProcess policy to this process. + """ + + def assign_worker(self): + """ + Assign the WorkerProcess policy to this process. + """ + + def assign_subprocess(self): + """ + Assign the helper subprocess policy to this process. + """ + +class FixedPolicy(Policy): + """ + :class:`Policy` for machines where the only control method available is + fixed CPU placement. The scheme here was tested on an otherwise idle 16 + thread machine. + + - The connection multiplexer is pinned to CPU 0. + - The Ansible top-level (strategy) is pinned to CPU 1. + - WorkerProcesses are pinned sequentually to 2..N, wrapping around when no + more CPUs exist. + - Children such as SSH may be scheduled on any CPU except 0/1. + + If the machine has less than 4 cores available, the top-level and workers + are pinned between CPU 2..N, i.e. no CPU is reserved for the top-level + process. + + This could at least be improved by having workers pinned to independent + cores, before reusing the second hyperthread of an existing core. + + A hook is installed that causes :meth:`reset` to run in the child of any + process created with :func:`mitogen.parent.popen`, ensuring CPU-intensive + children like SSH are not forced to share the same core as the (otherwise + potentially very busy) parent. + """ + def __init__(self, cpu_count=None): + #: For tests. + self.cpu_count = cpu_count or multiprocessing.cpu_count() + self.mem = mmap.mmap(-1, 4096) + self.state = State.from_buffer(self.mem) + self.state.lock.init() + + if self.cpu_count < 2: + # uniprocessor + self._reserve_mux = False + self._reserve_controller = False + self._reserve_mask = 0 + self._reserve_shift = 0 + elif self.cpu_count < 4: + # small SMP + self._reserve_mux = True + self._reserve_controller = False + self._reserve_mask = 1 + self._reserve_shift = 1 + else: + # big SMP + self._reserve_mux = True + self._reserve_controller = True + self._reserve_mask = 3 + self._reserve_shift = 2 + + def _set_affinity(self, descr, mask): + if descr: + LOG.debug('CPU mask for %s: %#08x', descr, mask) + mitogen.parent._preexec_hook = self._clear + self._set_cpu_mask(mask) + + def _balance(self, descr): + self.state.lock.acquire() + try: + n = self.state.counter + self.state.counter += 1 + finally: + self.state.lock.release() + + self._set_cpu(descr, self._reserve_shift + ( + (n % (self.cpu_count - self._reserve_shift)) + )) + + def _set_cpu(self, descr, cpu): + self._set_affinity(descr, 1 << (cpu % self.cpu_count)) + + def _clear(self): + all_cpus = (1 << self.cpu_count) - 1 + self._set_affinity(None, all_cpus & ~self._reserve_mask) + + def assign_controller(self): + if self._reserve_controller: + self._set_cpu('Ansible top-level process', 1) + else: + self._balance('Ansible top-level process') + + def assign_muxprocess(self, index): + self._set_cpu('MuxProcess %d' % (index,), index) + + def assign_worker(self): + self._balance('WorkerProcess') + + def assign_subprocess(self): + self._clear() + + +class LinuxPolicy(FixedPolicy): + def _mask_to_bytes(self, mask): + """ + Convert the (type long) mask to a cpu_set_t. + """ + chunks = [] + shiftmask = (2 ** 64) - 1 + for x in range(16): + chunks.append(struct.pack('>= 64 + return mitogen.core.b('').join(chunks) + + def _get_thread_ids(self): + try: + ents = os.listdir('/proc/self/task') + except OSError: + LOG.debug('cannot fetch thread IDs for current process') + return [os.getpid()] + + return [int(s) for s in ents if s.isdigit()] + + def _set_cpu_mask(self, mask): + s = self._mask_to_bytes(mask) + for tid in self._get_thread_ids(): + _sched_setaffinity(tid, len(s), s) + + +if _sched_setaffinity is not None: + policy = LinuxPolicy() +else: + policy = Policy() diff --git a/mitogen-0.3.9/ansible_mitogen/compat/__init__.py b/mitogen-0.3.9/ansible_mitogen/compat/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mitogen-0.3.9/ansible_mitogen/connection.py b/mitogen-0.3.9/ansible_mitogen/connection.py new file mode 100644 index 0000000..6bdf11b --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/connection.py @@ -0,0 +1,1134 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +from __future__ import unicode_literals +__metaclass__ = type + +import errno +import logging +import os +import pprint +import stat +import sys +import time + +import ansible.constants as C +import ansible.errors +import ansible.plugins.connection + +import mitogen.core + +import ansible_mitogen.mixins +import ansible_mitogen.parsing +import ansible_mitogen.process +import ansible_mitogen.services +import ansible_mitogen.target +import ansible_mitogen.transport_config +import ansible_mitogen.utils.unsafe + + +LOG = logging.getLogger(__name__) + +task_vars_msg = ( + 'could not recover task_vars. This means some connection ' + 'settings may erroneously be reset to their defaults. ' + 'Please report a bug if you encounter this message.' +) + + +def get_remote_name(spec): + """ + Return the value to use for the "remote_name" parameter. + """ + if spec.mitogen_mask_remote_name(): + return 'ansible' + return None + + +def optional_int(value): + """ + Convert `value` to an integer if it is not :data:`None`, otherwise return + :data:`None`. + """ + try: + return int(value) + except (TypeError, ValueError): + return None + + +def convert_bool(obj): + if isinstance(obj, bool): + return obj + if str(obj).lower() in ('no', 'false', '0'): + return False + if str(obj).lower() not in ('yes', 'true', '1'): + raise ansible.errors.AnsibleConnectionFailure( + 'expected yes/no/true/false/0/1, got %r' % (obj,) + ) + return True + + +def default(value, default): + """ + Return `default` is `value` is :data:`None`, otherwise return `value`. + """ + if value is None: + return default + return value + + +def _connect_local(spec): + """ + Return ContextService arguments for a local connection. + """ + return { + 'method': 'local', + 'kwargs': { + 'python_path': spec.python_path(), + } + } + + +def _connect_ssh(spec): + """ + Return ContextService arguments for an SSH connection. + """ + if spec.host_key_checking(): + check_host_keys = 'enforce' + else: + check_host_keys = 'ignore' + + # #334: tilde-expand private_key_file to avoid implementation difference + # between Python and OpenSSH. + private_key_file = spec.private_key_file() + if private_key_file is not None: + private_key_file = os.path.expanduser(private_key_file) + + return { + 'method': 'ssh', + 'kwargs': { + 'check_host_keys': check_host_keys, + 'hostname': spec.remote_addr(), + 'username': spec.remote_user(), + 'compression': convert_bool( + default(spec.mitogen_ssh_compression(), True) + ), + 'password': spec.password(), + 'port': spec.port(), + 'python_path': spec.python_path(), + 'identity_file': private_key_file, + 'identities_only': False, + 'ssh_path': spec.ssh_executable(), + 'connect_timeout': spec.ansible_ssh_timeout(), + 'ssh_args': spec.ssh_args(), + 'ssh_debug_level': spec.mitogen_ssh_debug_level(), + 'remote_name': get_remote_name(spec), + 'keepalive_count': ( + spec.mitogen_ssh_keepalive_count() or 10 + ), + 'keepalive_interval': ( + spec.mitogen_ssh_keepalive_interval() or 30 + ), + } + } + +def _connect_buildah(spec): + """ + Return ContextService arguments for a Buildah connection. + """ + return { + 'method': 'buildah', + 'kwargs': { + 'username': spec.remote_user(), + 'container': spec.remote_addr(), + 'python_path': spec.python_path(), + 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), + 'remote_name': get_remote_name(spec), + } + } + +def _connect_docker(spec): + """ + Return ContextService arguments for a Docker connection. + """ + return { + 'method': 'docker', + 'kwargs': { + 'username': spec.remote_user(), + 'container': spec.remote_addr(), + 'python_path': spec.python_path(rediscover_python=True), + 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), + 'remote_name': get_remote_name(spec), + } + } + + +def _connect_kubectl(spec): + """ + Return ContextService arguments for a Kubernetes connection. + """ + return { + 'method': 'kubectl', + 'kwargs': { + 'pod': spec.remote_addr(), + 'python_path': spec.python_path(), + 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), + 'kubectl_path': spec.mitogen_kubectl_path(), + 'kubectl_args': spec.extra_args(), + 'remote_name': get_remote_name(spec), + } + } + + +def _connect_jail(spec): + """ + Return ContextService arguments for a FreeBSD jail connection. + """ + return { + 'method': 'jail', + 'kwargs': { + 'username': spec.remote_user(), + 'container': spec.remote_addr(), + 'python_path': spec.python_path(), + 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), + 'remote_name': get_remote_name(spec), + } + } + + +def _connect_lxc(spec): + """ + Return ContextService arguments for an LXC Classic container connection. + """ + return { + 'method': 'lxc', + 'kwargs': { + 'container': spec.remote_addr(), + 'python_path': spec.python_path(), + 'lxc_attach_path': spec.mitogen_lxc_attach_path(), + 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), + 'remote_name': get_remote_name(spec), + } + } + + +def _connect_lxd(spec): + """ + Return ContextService arguments for an LXD container connection. + """ + return { + 'method': 'lxd', + 'kwargs': { + 'container': spec.remote_addr(), + 'python_path': spec.python_path(), + 'lxc_path': spec.mitogen_lxc_path(), + 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), + 'remote_name': get_remote_name(spec), + } + } + + +def _connect_machinectl(spec): + """ + Return ContextService arguments for a machinectl connection. + """ + return _connect_setns(spec, kind='machinectl') + + +def _connect_podman(spec): + """ + Return ContextService arguments for a Docker connection. + """ + return { + 'method': 'podman', + 'kwargs': { + 'username': spec.remote_user(), + 'container': spec.remote_addr(), + 'python_path': spec.python_path(rediscover_python=True), + 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), + 'remote_name': get_remote_name(spec), + } + } + +def _connect_setns(spec, kind=None): + """ + Return ContextService arguments for a mitogen_setns connection. + """ + return { + 'method': 'setns', + 'kwargs': { + 'container': spec.remote_addr(), + 'username': spec.remote_user(), + 'python_path': spec.python_path(), + 'kind': kind or spec.mitogen_kind(), + 'docker_path': spec.mitogen_docker_path(), + 'lxc_path': spec.mitogen_lxc_path(), + 'lxc_info_path': spec.mitogen_lxc_info_path(), + 'machinectl_path': spec.mitogen_machinectl_path(), + } + } + + +def _connect_su(spec): + """ + Return ContextService arguments for su as a become method. + """ + return { + 'method': 'su', + 'enable_lru': True, + 'kwargs': { + 'username': spec.become_user(), + 'password': spec.become_pass(), + 'python_path': spec.python_path(), + 'su_path': spec.become_exe(), + 'connect_timeout': spec.timeout(), + 'remote_name': get_remote_name(spec), + } + } + + +def _connect_sudo(spec): + """ + Return ContextService arguments for sudo as a become method. + """ + return { + 'method': 'sudo', + 'enable_lru': True, + 'kwargs': { + 'username': spec.become_user(), + 'password': spec.become_pass(), + 'python_path': spec.python_path(), + 'sudo_path': spec.become_exe(), + 'connect_timeout': spec.timeout(), + 'sudo_args': spec.sudo_args(), + 'remote_name': get_remote_name(spec), + } + } + + +def _connect_doas(spec): + """ + Return ContextService arguments for doas as a become method. + """ + return { + 'method': 'doas', + 'enable_lru': True, + 'kwargs': { + 'username': spec.become_user(), + 'password': spec.become_pass(), + 'python_path': spec.python_path(), + 'doas_path': spec.become_exe(), + 'connect_timeout': spec.timeout(), + 'remote_name': get_remote_name(spec), + } + } + + +def _connect_mitogen_su(spec): + """ + Return ContextService arguments for su as a first class connection. + """ + return { + 'method': 'su', + 'kwargs': { + 'username': spec.remote_user(), + 'password': spec.password(), + 'python_path': spec.python_path(), + 'su_path': spec.become_exe(), + 'connect_timeout': spec.timeout(), + 'remote_name': get_remote_name(spec), + } + } + + +def _connect_mitogen_sudo(spec): + """ + Return ContextService arguments for sudo as a first class connection. + """ + return { + 'method': 'sudo', + 'kwargs': { + 'username': spec.remote_user(), + 'password': spec.password(), + 'python_path': spec.python_path(), + 'sudo_path': spec.become_exe(), + 'connect_timeout': spec.timeout(), + 'sudo_args': spec.sudo_args(), + 'remote_name': get_remote_name(spec), + } + } + + +def _connect_mitogen_doas(spec): + """ + Return ContextService arguments for doas as a first class connection. + """ + return { + 'method': 'doas', + 'kwargs': { + 'username': spec.remote_user(), + 'password': spec.password(), + 'python_path': spec.python_path(), + 'doas_path': spec.ansible_doas_exe(), + 'connect_timeout': spec.timeout(), + 'remote_name': get_remote_name(spec), + } + } + + +#: Mapping of connection method names to functions invoked as `func(spec)` +#: generating ContextService keyword arguments matching a connection +#: specification. +CONNECTION_METHOD = { + 'buildah': _connect_buildah, + 'docker': _connect_docker, + 'kubectl': _connect_kubectl, + 'jail': _connect_jail, + 'local': _connect_local, + 'lxc': _connect_lxc, + 'lxd': _connect_lxd, + 'machinectl': _connect_machinectl, + 'podman': _connect_podman, + 'setns': _connect_setns, + 'ssh': _connect_ssh, + 'smart': _connect_ssh, # issue #548. + 'su': _connect_su, + 'sudo': _connect_sudo, + 'doas': _connect_doas, + 'mitogen_su': _connect_mitogen_su, + 'mitogen_sudo': _connect_mitogen_sudo, + 'mitogen_doas': _connect_mitogen_doas, +} + + +class CallChain(mitogen.parent.CallChain): + """ + Extend :class:`mitogen.parent.CallChain` to additionally cause the + associated :class:`Connection` to be reset if a ChannelError occurs. + + This only catches failures that occur while a call is pending, it is a + stop-gap until a more general method is available to notice connection in + every situation. + """ + call_aborted_msg = ( + 'Mitogen was disconnected from the remote environment while a call ' + 'was in-progress. If you feel this is in error, please file a bug. ' + 'Original error was: %s' + ) + + def __init__(self, connection, context, pipelined=False): + super(CallChain, self).__init__(context, pipelined) + #: The connection to reset on CallError. + self._connection = connection + + def _rethrow(self, recv): + try: + return recv.get().unpickle() + except mitogen.core.ChannelError as e: + self._connection.reset() + raise ansible.errors.AnsibleConnectionFailure( + self.call_aborted_msg % (e,) + ) + + def call(self, func, *args, **kwargs): + """ + Like :meth:`mitogen.parent.CallChain.call`, but log timings. + """ + t0 = time.time() + try: + recv = self.call_async(func, *args, **kwargs) + return self._rethrow(recv) + finally: + LOG.debug('Call took %d ms: %r', 1000 * (time.time() - t0), + mitogen.parent.CallSpec(func, args, kwargs)) + + +class Connection(ansible.plugins.connection.ConnectionBase): + #: The :class:`ansible_mitogen.process.Binding` representing the connection + #: multiplexer this connection's target is assigned to. :data:`None` when + #: disconnected. + binding = None + + #: mitogen.parent.Context for the target account on the target, possibly + #: reached via become. + context = None + + #: Context for the login account on the target. This is always the login + #: account, even when become=True. + login_context = None + + #: Only sudo, su, and doas are supported for now. + # Ansible ConnectionBase attribute, removed in Ansible >= 2.8 + become_methods = ['sudo', 'su', 'doas'] + + #: Dict containing init_child() return value as recorded at startup by + #: ContextService. Contains: + #: + #: fork_context: Context connected to the fork parent : process in the + #: target account. + #: home_dir: Target context's home directory. + #: good_temp_dir: A writeable directory where new temporary directories + #: can be created. + init_child_result = None + + #: A :class:`mitogen.parent.CallChain` for calls made to the target + #: account, to ensure subsequent calls fail with the original exception if + #: pipelined directory creation or file transfer fails. + chain = None + + # + # Note: any of the attributes below may be :data:`None` if the connection + # plugin was constructed directly by a non-cooperative action, such as in + # the case of the synchronize module. + # + + #: Set to task_vars by on_action_run(). + _task_vars = None + + #: Set by on_action_run() + delegate_to_hostname = None + + #: Set to '_loader.get_basedir()' by on_action_run(). Used by mitogen_local + #: to change the working directory to that of the current playbook, + #: matching vanilla Ansible behaviour. + loader_basedir = None + + # set by `_get_task_vars()` for interpreter discovery + _action = None + + def on_action_run(self, task_vars, delegate_to_hostname, loader_basedir): + """ + Invoked by ActionModuleMixin to indicate a new task is about to start + executing. We use the opportunity to grab relevant bits from the + task-specific data. + + :param dict task_vars: + Task variable dictionary. + :param str delegate_to_hostname: + :data:`None`, or the template-expanded inventory hostname this task + is being delegated to. A similar variable exists on PlayContext + when ``delegate_to:`` is active, however it is unexpanded. + :param str loader_basedir: + Loader base directory; see :attr:`loader_basedir`. + """ + self._task_vars = task_vars + self.delegate_to_hostname = delegate_to_hostname + self.loader_basedir = loader_basedir + self._put_connection() + + def _get_task_vars(self): + """ + More information is needed than normally provided to an Ansible + connection. For proxied connections, intermediary configuration must + be inferred, and for any connection the configured Python interpreter + must be known. + + There is no clean way to access this information that would not deviate + from the running Ansible version. The least invasive method known is to + reuse the running task's task_vars dict. + + This method walks the stack to find task_vars of the Action plugin's + run(), or if no Action is present, from Strategy's _execute_meta(), as + in the case of 'meta: reset_connection'. The stack is walked in + addition to subclassing Action.run()/on_action_run(), as it is possible + for new connections to be constructed in addition to the preconstructed + connection passed into any running action. + """ + if self._task_vars is not None: + # check for if self._action has already been set or not + # there are some cases where the ansible executor passes in task_vars + # so we don't walk the stack to find them + # TODO: is there a better way to get the ActionModuleMixin object? + # ansible python discovery needs it to run discover_interpreter() + if not isinstance(self._action, ansible_mitogen.mixins.ActionModuleMixin): + f = sys._getframe() + while f: + if f.f_code.co_name == 'run': + f_self = f.f_locals.get('self') + if isinstance(f_self, ansible_mitogen.mixins.ActionModuleMixin): + self._action = f_self + break + elif f.f_code.co_name == '_execute_meta': + break + f = f.f_back + + return self._task_vars + + f = sys._getframe() + while f: + if f.f_code.co_name == 'run': + f_locals = f.f_locals + f_self = f_locals.get('self') + if isinstance(f_self, ansible_mitogen.mixins.ActionModuleMixin): + # backref for python interpreter discovery, should be safe because _get_task_vars + # is always called before running interpreter discovery + self._action = f_self + task_vars = f_locals.get('task_vars') + if task_vars: + LOG.debug('recovered task_vars from Action') + return task_vars + elif f.f_code.co_name == '_execute_meta': + f_all_vars = f.f_locals.get('all_vars') + if isinstance(f_all_vars, dict): + LOG.debug('recovered task_vars from meta:') + return f_all_vars + + f = f.f_back + + raise ansible.errors.AnsibleConnectionFailure(task_vars_msg) + + def get_host_vars(self, inventory_hostname): + """ + Fetch the HostVars for a host. + + :returns: + Variables dictionary or :data:`None`. + :raises ansible.errors.AnsibleConnectionFailure: + Task vars unavailable. + """ + task_vars = self._get_task_vars() + hostvars = task_vars.get('hostvars') + if hostvars: + return hostvars.get(inventory_hostname) + + raise ansible.errors.AnsibleConnectionFailure(task_vars_msg) + + def get_task_var(self, key, default=None): + """ + Fetch the value of a task variable related to connection configuration, + or, if delegate_to is active, fetch the same variable via HostVars for + the delegated-to machine. + + When running with delegate_to, Ansible tasks have variables associated + with the original machine, not the delegated-to machine, therefore it + does not make sense to extract connection-related configuration for the + delegated-to machine from them. + """ + def _fetch_task_var(task_vars, key): + """ + Special helper func in case vars can be templated + """ + SPECIAL_TASK_VARS = [ + 'ansible_python_interpreter' + ] + if key in task_vars: + val = task_vars[key] + if '{' in str(val) and key in SPECIAL_TASK_VARS: + # template every time rather than storing in a cache + # in case a different template value is used in a different task + val = self.templar.template( + val, + preserve_trailing_newlines=True, + escape_backslashes=False + ) + return val + + task_vars = self._get_task_vars() + if self.delegate_to_hostname is None: + return _fetch_task_var(task_vars, key) + else: + delegated_vars = task_vars['ansible_delegated_vars'] + if self.delegate_to_hostname in delegated_vars: + task_vars = delegated_vars[self.delegate_to_hostname] + return _fetch_task_var(task_vars, key) + + return default + + @property + def homedir(self): + self._connect() + return self.init_child_result['home_dir'] + + def get_binding(self): + """ + Return the :class:`ansible_mitogen.process.Binding` representing the + process that hosts the physical connection and services (context + establishment, file transfer, ..) for our desired target. + """ + assert self.binding is not None + return self.binding + + @property + def connected(self): + """ + Ansible connection plugin property. Used by ansible-connection command. + """ + return self.context is not None + + def _spec_from_via(self, proxied_inventory_name, via_spec): + """ + Produce a dict connection specifiction given a string `via_spec`, of + the form `[[become_method:]become_user@]inventory_hostname`. + """ + become_user, _, inventory_name = via_spec.rpartition('@') + become_method, _, become_user = become_user.rpartition(':') + + # must use __contains__ to avoid a TypeError for a missing host on + # Ansible 2.3. + via_vars = self.get_host_vars(inventory_name) + if via_vars is None: + raise ansible.errors.AnsibleConnectionFailure( + self.unknown_via_msg % ( + via_spec, + proxied_inventory_name, + ) + ) + + return ansible_mitogen.transport_config.MitogenViaSpec( + inventory_name=inventory_name, + play_context=self._play_context, + host_vars=dict(via_vars), # TODO: make it lazy + task_vars=self._get_task_vars(), # needed for interpreter discovery in parse_python_path + action=self._action, + become_method=become_method or None, + become_user=become_user or None, + ) + + unknown_via_msg = 'mitogen_via=%s of %s specifies an unknown hostname' + via_cycle_msg = 'mitogen_via=%s of %s creates a cycle (%s)' + + def _stack_from_spec(self, spec, stack=(), seen_names=()): + """ + Return a tuple of ContextService parameter dictionaries corresponding + to the connection described by `spec`, and any connection referenced by + its `mitogen_via` or `become` fields. Each element is a dict of the + form:: + + { + # Optional. If present and `True`, this hop is elegible for + # interpreter recycling. + "enable_lru": True, + # mitogen.master.Router method name. + "method": "ssh", + # mitogen.master.Router method kwargs. + "kwargs": { + "hostname": "..." + } + } + + :param ansible_mitogen.transport_config.Spec spec: + Connection specification. + :param tuple stack: + Stack elements from parent call (used for recursion). + :param tuple seen_names: + Inventory hostnames from parent call (cycle detection). + :returns: + Tuple `(stack, seen_names)`. + """ + if spec.inventory_name() in seen_names: + raise ansible.errors.AnsibleConnectionFailure( + self.via_cycle_msg % ( + spec.mitogen_via(), + spec.inventory_name(), + ' -> '.join(reversed( + seen_names + (spec.inventory_name(),) + )), + ) + ) + + if spec.mitogen_via(): + stack = self._stack_from_spec( + self._spec_from_via(spec.inventory_name(), spec.mitogen_via()), + stack=stack, + seen_names=seen_names + (spec.inventory_name(),), + ) + + stack += (CONNECTION_METHOD[spec.transport()](spec),) + if spec.become() and ((spec.become_user() != spec.remote_user()) or + C.BECOME_ALLOW_SAME_USER): + stack += (CONNECTION_METHOD[spec.become_method()](spec),) + + return stack + + def _build_stack(self): + """ + Construct a list of dictionaries representing the connection + configuration between the controller and the target. This is + additionally used by the integration tests "mitogen_get_stack" action + to fetch the would-be connection configuration. + """ + spec = ansible_mitogen.transport_config.PlayContextSpec( + connection=self, + play_context=self._play_context, + transport=self.transport, + inventory_name=self.get_task_var('inventory_hostname'), + ) + stack = self._stack_from_spec(spec) + return spec.inventory_name(), stack + + def _connect_stack(self, stack): + """ + Pass `stack` to ContextService, requesting a copy of the context object + representing the last tuple element. If no connection exists yet, + ContextService will recursively establish it before returning it or + throwing an error. + + See :meth:`ansible_mitogen.services.ContextService.get` docstring for + description of the returned dictionary. + """ + try: + dct = mitogen.service.call( + call_context=self.binding.get_service_context(), + service_name='ansible_mitogen.services.ContextService', + method_name='get', + stack=ansible_mitogen.utils.unsafe.cast(list(stack)), + ) + except mitogen.core.CallError: + LOG.warning('Connection failed; stack configuration was:\n%s', + pprint.pformat(stack)) + raise + + if dct['msg']: + if dct['method_name'] in self.become_methods: + raise ansible.errors.AnsibleModuleError(dct['msg']) + raise ansible.errors.AnsibleConnectionFailure(dct['msg']) + + self.context = dct['context'] + self.chain = CallChain(self, self.context, pipelined=True) + if self._play_context.become: + self.login_context = dct['via'] + else: + self.login_context = self.context + + self.init_child_result = dct['init_child_result'] + + def get_good_temp_dir(self): + """ + Return the 'good temporary directory' as discovered by + :func:`ansible_mitogen.target.init_child` immediately after + ContextService constructed the target context. + """ + self._connect() + return self.init_child_result['good_temp_dir'] + + def _connect(self): + """ + Establish a connection to the master process's UNIX listener socket, + constructing a mitogen.master.Router to communicate with the master, + and a mitogen.parent.Context to represent it. + + Depending on the original transport we should emulate, trigger one of + the _connect_*() service calls defined above to cause the master + process to establish the real connection on our behalf, or return a + reference to the existing one. + + Ansible connection plugin method. + """ + # In some Ansible connection plugins this method returns self. + # However nothing I've found uses it, it's not even assigned. + if self.connected: + return + + inventory_name, stack = self._build_stack() + worker_model = ansible_mitogen.process.get_worker_model() + self.binding = worker_model.get_binding( + ansible_mitogen.utils.unsafe.cast(inventory_name) + ) + self._connect_stack(stack) + + def _put_connection(self): + """ + Forget everything we know about the connected context. This function + cannot be called _reset() since that name is used as a public API by + Ansible 2.4 wait_for_connection plug-in. + """ + if not self.context: + return + + self.chain.reset() + mitogen.service.call( + call_context=self.binding.get_service_context(), + service_name='ansible_mitogen.services.ContextService', + method_name='put', + context=self.context + ) + + self.context = None + self.login_context = None + self.init_child_result = None + self.chain = None + + def close(self): + """ + Arrange for the mitogen.master.Router running in the worker to + gracefully shut down, and wait for shutdown to complete. Safe to call + multiple times. + + Ansible connection plugin method. + """ + self._put_connection() + if self.binding: + self.binding.close() + self.binding = None + + reset_compat_msg = ( + 'Mitogen only supports "reset_connection" on Ansible 2.5.6 or later' + ) + + def reset(self): + """ + Explicitly terminate the connection to the remote host. This discards + any local state we hold for the connection, returns the Connection to + the 'disconnected' state, and informs ContextService the connection is + bad somehow, and should be shut down and discarded. + + Ansible connection plugin method. + """ + if self._play_context.remote_addr is None: + # <2.5.6 incorrectly populate PlayContext for reset_connection + # https://github.com/ansible/ansible/issues/27520 + raise ansible.errors.AnsibleConnectionFailure( + self.reset_compat_msg + ) + + # Strategy's _execute_meta doesn't have an action obj but we'll need one for + # running interpreter_discovery + # will create a new temporary action obj for this purpose + self._action = ansible_mitogen.mixins.ActionModuleMixin( + task=0, + connection=self, + play_context=self._play_context, + loader=0, + templar=0, + shared_loader_obj=0 + ) + + # Clear out state in case we were ever connected. + self.close() + + inventory_name, stack = self._build_stack() + if self._play_context.become: + stack = stack[:-1] + + worker_model = ansible_mitogen.process.get_worker_model() + binding = worker_model.get_binding(inventory_name) + try: + mitogen.service.call( + call_context=binding.get_service_context(), + service_name='ansible_mitogen.services.ContextService', + method_name='reset', + stack=ansible_mitogen.utils.unsafe.cast(list(stack)), + ) + finally: + binding.close() + + # Compatibility with Ansible 2.4 wait_for_connection plug-in. + _reset = reset + + def get_chain(self, use_login=False, use_fork=False): + """ + Return the :class:`mitogen.parent.CallChain` to use for executing + function calls. + + :param bool use_login: + If :data:`True`, always return the chain for the login account + rather than any active become user. + :param bool use_fork: + If :data:`True`, return the chain for the fork parent. + :returns mitogen.parent.CallChain: + """ + self._connect() + if use_login: + return self.login_context.default_call_chain + # See FORK_SUPPORTED comments in target.py. + if use_fork and self.init_child_result['fork_context'] is not None: + return self.init_child_result['fork_context'].default_call_chain + return self.chain + + def spawn_isolated_child(self): + """ + Fork or launch a new child off the target context. + + :returns: + mitogen.core.Context of the new child. + """ + return self.get_chain(use_fork=True).call( + ansible_mitogen.target.spawn_isolated_child + ) + + def get_extra_args(self): + """ + Overridden by connections/mitogen_kubectl.py to a list of additional + arguments for the command. + """ + # TODO: maybe use this for SSH too. + return [] + + def get_default_cwd(self): + """ + Overridden by connections/mitogen_local.py to emulate behaviour of CWD + being fixed to that of ActionBase._loader.get_basedir(). + """ + return None + + def get_default_env(self): + """ + Overridden by connections/mitogen_local.py to emulate behaviour of + WorkProcess environment inherited from WorkerProcess. + """ + return None + + def exec_command(self, cmd, in_data='', sudoable=True, mitogen_chdir=None): + """ + Implement exec_command() by calling the corresponding + ansible_mitogen.target function in the target. + + :param str cmd: + Shell command to execute. + :param bytes in_data: + Data to supply on ``stdin`` of the process. + :returns: + (return code, stdout bytes, stderr bytes) + + Ansible connection plugin method. + """ + emulate_tty = (not in_data and sudoable) + rc, stdout, stderr = self.get_chain().call( + ansible_mitogen.target.exec_command, + cmd=ansible_mitogen.utils.unsafe.cast(cmd), + in_data=ansible_mitogen.utils.unsafe.cast(in_data), + chdir=mitogen_chdir or self.get_default_cwd(), + emulate_tty=emulate_tty, + ) + + stderr += b'Shared connection to %s closed.%s' % ( + self._play_context.remote_addr.encode(), + (b'\r\n' if emulate_tty else b'\n'), + ) + return rc, stdout, stderr + + def fetch_file(self, in_path, out_path): + """ + Implement fetch_file() by calling the corresponding + ansible_mitogen.target function in the target. + + :param str in_path: + Remote filesystem path to read. + :param str out_path: + Local filesystem path to write. + + Ansible connection plugin method. + """ + self._connect() + ansible_mitogen.target.transfer_file( + context=self.context, + # in_path may be AnsibleUnicode + in_path=ansible_mitogen.utils.unsafe.cast(in_path), + out_path=out_path + ) + + def put_data(self, out_path, data, mode=None, utimes=None): + """ + Implement put_file() by caling the corresponding ansible_mitogen.target + function in the target, transferring small files inline. This is + pipelined and will return immediately; failed transfers are reported as + exceptions in subsequent functon calls. + + :param str out_path: + Remote filesystem path to write. + :param byte data: + File contents to put. + """ + self.get_chain().call_no_reply( + ansible_mitogen.target.write_path, + ansible_mitogen.utils.unsafe.cast(out_path), + mitogen.core.Blob(data), + mode=mode, + utimes=utimes, + ) + + #: Maximum size of a small file before switching to streaming + #: transfer. This should really be the same as + #: mitogen.services.FileService.IO_SIZE, however the message format has + #: slightly more overhead, so just randomly subtract 4KiB. + SMALL_FILE_LIMIT = mitogen.core.CHUNK_SIZE - 4096 + + def _throw_io_error(self, e, path): + if e.args[0] == errno.ENOENT: + s = 'file or module does not exist: ' + path + raise ansible.errors.AnsibleFileNotFound(s) + + def put_file(self, in_path, out_path): + """ + Implement put_file() by streamily transferring the file via + FileService. + + :param str in_path: + Local filesystem path to read. + :param str out_path: + Remote filesystem path to write. + + Ansible connection plugin method. + """ + try: + st = os.stat(in_path) + except OSError as e: + self._throw_io_error(e, in_path) + raise + + if not stat.S_ISREG(st.st_mode): + raise IOError('%r is not a regular file.' % (in_path,)) + + # If the file is sufficiently small, just ship it in the argument list + # rather than introducing an extra RTT for the child to request it from + # FileService. + if st.st_size <= self.SMALL_FILE_LIMIT: + try: + fp = open(in_path, 'rb') + try: + s = fp.read(self.SMALL_FILE_LIMIT + 1) + finally: + fp.close() + except OSError as e: + self._throw_io_error(e, in_path) + raise + + # Ensure did not grow during read. + if len(s) == st.st_size: + return self.put_data(out_path, s, mode=st.st_mode, + utimes=(st.st_atime, st.st_mtime)) + + self._connect() + mitogen.service.call( + call_context=self.binding.get_service_context(), + service_name='mitogen.service.FileService', + method_name='register', + path=ansible_mitogen.utils.unsafe.cast(in_path) + ) + + # For now this must remain synchronous, as the action plug-in may have + # passed us a temporary file to transfer. A future FileService could + # maintain an LRU list of open file descriptors to keep the temporary + # file alive, but that requires more work. + self.get_chain().call( + ansible_mitogen.target.transfer_file, + context=self.binding.get_child_service_context(), + in_path=in_path, + out_path=out_path + ) diff --git a/mitogen-0.3.9/ansible_mitogen/loaders.py b/mitogen-0.3.9/ansible_mitogen/loaders.py new file mode 100644 index 0000000..9d9876a --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/loaders.py @@ -0,0 +1,103 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +Stable names for PluginLoader instances across Ansible versions. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import ansible.errors + +import ansible_mitogen.utils + +__all__ = [ + 'action_loader', + 'become_loader', + 'connection_loader', + 'module_loader', + 'module_utils_loader', + 'shell_loader', + 'strategy_loader', +] + + +ANSIBLE_VERSION_MIN = (2, 10) +ANSIBLE_VERSION_MAX = (2, 17) + +NEW_VERSION_MSG = ( + "Your Ansible version (%s) is too recent. The most recent version\n" + "supported by Mitogen for Ansible is %s.x. Please check the Mitogen\n" + "release notes to see if a new version is available, otherwise\n" + "subscribe to the corresponding GitHub issue to be notified when\n" + "support becomes available.\n" + "\n" + " https://mitogen.rtfd.io/en/latest/changelog.html\n" + " https://github.com/mitogen-hq/mitogen/issues/\n" +) +OLD_VERSION_MSG = ( + "Your version of Ansible (%s) is too old. The oldest version supported by " + "Mitogen for Ansible is %s." +) + + +def assert_supported_release(): + """ + Throw AnsibleError with a descriptive message in case of being loaded into + an unsupported Ansible release. + """ + v = ansible_mitogen.utils.ansible_version + if v[:2] < ANSIBLE_VERSION_MIN: + raise ansible.errors.AnsibleError( + OLD_VERSION_MSG % (v, ANSIBLE_VERSION_MIN) + ) + + if v[:2] > ANSIBLE_VERSION_MAX: + raise ansible.errors.AnsibleError( + NEW_VERSION_MSG % (v, ANSIBLE_VERSION_MAX) + ) + + +# this is the first file our strategy plugins import, so we need to check this here +# in prior Ansible versions, connection_loader.get_with_context didn't exist, so if a user +# is trying to load an old Ansible version, we'll fail and error gracefully +assert_supported_release() + + +from ansible.plugins.loader import action_loader +from ansible.plugins.loader import become_loader +from ansible.plugins.loader import connection_loader +from ansible.plugins.loader import module_loader +from ansible.plugins.loader import module_utils_loader +from ansible.plugins.loader import shell_loader +from ansible.plugins.loader import strategy_loader + +# These are original, unwrapped implementations +action_loader__get = action_loader.get +connection_loader__get = connection_loader.get_with_context diff --git a/mitogen-0.3.9/ansible_mitogen/logging.py b/mitogen-0.3.9/ansible_mitogen/logging.py new file mode 100644 index 0000000..40b2b33 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/logging.py @@ -0,0 +1,130 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import logging +import os + +import mitogen.core +import mitogen.utils + +try: + from __main__ import display +except ImportError: + import ansible.utils.display + display = ansible.utils.display.Display() + + +#: The process name set via :func:`set_process_name`. +_process_name = None + +#: The PID of the process that last called :func:`set_process_name`, so its +#: value can be ignored in unknown fork children. +_process_pid = None + + +def set_process_name(name): + """ + Set a name to adorn log messages with. + """ + global _process_name + _process_name = name + + global _process_pid + _process_pid = os.getpid() + + +class Handler(logging.Handler): + """ + Use Mitogen's log format, but send the result to a Display method. + """ + def __init__(self, normal_method): + logging.Handler.__init__(self) + self.formatter = mitogen.utils.log_get_formatter() + self.normal_method = normal_method + + #: Set of target loggers that produce warnings and errors that spam the + #: console needlessly. Their log level is forced to INFO. A better strategy + #: may simply be to bury all target logs in DEBUG output, but not by + #: overriding their log level as done here. + NOISY_LOGGERS = frozenset([ + 'dnf', # issue #272; warns when a package is already installed. + 'boto', # issue #541; normal boto retry logic can cause ERROR logs. + ]) + + def emit(self, record): + mitogen_name = getattr(record, 'mitogen_name', '') + if mitogen_name == 'stderr': + record.levelno = logging.ERROR + if mitogen_name in self.NOISY_LOGGERS and record.levelno >= logging.WARNING: + record.levelno = logging.DEBUG + + if _process_pid == os.getpid(): + process_name = _process_name + else: + process_name = '?' + + s = '[%-4s %d] %s' % (process_name, os.getpid(), self.format(record)) + if record.levelno >= logging.ERROR: + display.error(s, wrap_text=False) + elif record.levelno >= logging.WARNING: + display.warning(s, formatted=True) + else: + self.normal_method(s) + + +def setup(): + """ + Install handlers for Mitogen loggers to redirect them into the Ansible + display framework. Ansible installs its own logging framework handlers when + C.DEFAULT_LOG_PATH is set, therefore disable propagation for our handlers. + """ + l_mitogen = logging.getLogger('mitogen') + l_mitogen_io = logging.getLogger('mitogen.io') + l_ansible_mitogen = logging.getLogger('ansible_mitogen') + l_operon = logging.getLogger('operon') + + for logger in l_mitogen, l_mitogen_io, l_ansible_mitogen, l_operon: + logger.handlers = [Handler(display.vvv)] + logger.propagate = False + + if display.verbosity > 2: + l_ansible_mitogen.setLevel(logging.DEBUG) + l_mitogen.setLevel(logging.DEBUG) + else: + # Mitogen copies the active log level into new children, allowing them + # to filter tiny messages before they hit the network, and therefore + # before they wake the IO loop. Explicitly setting INFO saves ~4% + # running against just the local machine. + l_mitogen.setLevel(logging.ERROR) + l_ansible_mitogen.setLevel(logging.ERROR) + + if display.verbosity > 3: + l_mitogen_io.setLevel(logging.DEBUG) diff --git a/mitogen-0.3.9/ansible_mitogen/mixins.py b/mitogen-0.3.9/ansible_mitogen/mixins.py new file mode 100644 index 0000000..0ba41aa --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/mixins.py @@ -0,0 +1,533 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import logging +import os +import pwd +import random +import traceback + +try: + from shlex import quote as shlex_quote +except ImportError: + from pipes import quote as shlex_quote + +from ansible.module_utils._text import to_bytes +from ansible.parsing.utils.jsonify import jsonify + +import ansible +import ansible.constants +import ansible.plugins +import ansible.plugins.action + +import mitogen.core +import mitogen.select + +import ansible_mitogen.connection +import ansible_mitogen.planner +import ansible_mitogen.target +import ansible_mitogen.utils +import ansible_mitogen.utils.unsafe + +from ansible.module_utils._text import to_text + +try: + from ansible.utils.unsafe_proxy import wrap_var +except ImportError: + from ansible.vars.unsafe_proxy import wrap_var + +try: + # ansible 2.8 moved remove_internal_keys to the clean module + from ansible.vars.clean import remove_internal_keys +except ImportError: + try: + from ansible.vars.manager import remove_internal_keys + except ImportError: + # ansible 2.3.3 has remove_internal_keys as a protected func on the action class + # we'll fallback to calling self._remove_internal_keys in this case + remove_internal_keys = lambda a: "Not found" + + +LOG = logging.getLogger(__name__) + + +class ActionModuleMixin(ansible.plugins.action.ActionBase): + """ + The Mitogen-patched PluginLoader dynamically mixes this into every action + class that Ansible attempts to load. It exists to override all the + assumptions built into the base action class that should really belong in + some middle layer, or at least in the connection layer. + + Functionality is defined here for: + + * Capturing the final set of task variables and giving Connection a chance + to update its idea of the correct execution environment, before any + attempt is made to call a Connection method. While it's not expected for + the interpreter to change on a per-task basis, Ansible permits this, and + so it must be supported. + + * Overriding lots of methods that try to call out to shell for mundane + reasons, such as copying files around, changing file permissions, + creating temporary directories and suchlike. + + * Short-circuiting any use of Ansiballz or related code for executing a + module remotely using shell commands and SSH. + + * Short-circuiting most of the logic in dealing with the fact that Ansible + always runs become: tasks across at least the SSH user account and the + destination user account, and handling the security permission issues + that crop up due to this. Mitogen always runs a task completely within + the target user account, so it's not a problem for us. + """ + def __init__(self, task, connection, *args, **kwargs): + """ + Verify the received connection is really a Mitogen connection. If not, + transmute this instance back into the original unadorned base class. + + This allows running the Mitogen strategy in mixed-target playbooks, + where some targets use SSH while others use WinRM or some fancier UNIX + connection plug-in. That's because when the Mitogen strategy is active, + ActionModuleMixin is unconditionally mixed into any action module that + is instantiated, and there is no direct way for the monkey-patch to + know what kind of connection will be used upfront. + """ + super(ActionModuleMixin, self).__init__(task, connection, *args, **kwargs) + if not isinstance(connection, ansible_mitogen.connection.Connection): + _, self.__class__ = type(self).__bases__ + + # required for python interpreter discovery + connection.templar = self._templar + self._finding_python_interpreter = False + self._rediscovered_python = False + # redeclaring interpreter discovery vars here in case running ansible < 2.8.0 + self._discovered_interpreter_key = None + self._discovered_interpreter = False + self._discovery_deprecation_warnings = [] + self._discovery_warnings = [] + + def run(self, tmp=None, task_vars=None): + """ + Override run() to notify Connection of task-specific data, so it has a + chance to know e.g. the Python interpreter in use. + """ + self._connection.on_action_run( + task_vars=task_vars, + delegate_to_hostname=self._task.delegate_to, + loader_basedir=self._loader.get_basedir(), + ) + return super(ActionModuleMixin, self).run(tmp, task_vars) + + COMMAND_RESULT = { + 'rc': 0, + 'stdout': '', + 'stdout_lines': [], + 'stderr': '' + } + + def fake_shell(self, func, stdout=False): + """ + Execute a function and decorate its return value in the style of + _low_level_execute_command(). This produces a return value that looks + like some shell command was run, when really func() was implemented + entirely in Python. + + If the function raises :py:class:`mitogen.core.CallError`, this will be + translated into a failed shell command with a non-zero exit status. + + :param func: + Function invoked as `func()`. + :returns: + See :py:attr:`COMMAND_RESULT`. + """ + dct = self.COMMAND_RESULT.copy() + try: + rc = func() + if stdout: + dct['stdout'] = repr(rc) + except mitogen.core.CallError: + LOG.exception('While emulating a shell command') + dct['rc'] = 1 + dct['stderr'] = traceback.format_exc() + + return dct + + def _remote_file_exists(self, path): + """ + Determine if `path` exists by directly invoking os.path.exists() in the + target user account. + """ + LOG.debug('_remote_file_exists(%r)', path) + return self._connection.get_chain().call( + ansible_mitogen.target.file_exists, + ansible_mitogen.utils.unsafe.cast(path) + ) + + def _configure_module(self, module_name, module_args, task_vars=None): + """ + Mitogen does not use the Ansiballz framework. This call should never + happen when ActionMixin is active, so crash if it does. + """ + assert False, "_configure_module() should never be called." + + def _is_pipelining_enabled(self, module_style, wrap_async=False): + """ + Mitogen does not use SSH pipelining. This call should never happen when + ActionMixin is active, so crash if it does. + """ + assert False, "_is_pipelining_enabled() should never be called." + + def _generate_tmp_path(self): + return os.path.join( + self._connection.get_good_temp_dir(), + 'ansible_mitogen_action_%016x' % ( + random.getrandbits(8*8), + ) + ) + + def _make_tmp_path(self, remote_user=None): + """ + Create a temporary subdirectory as a child of the temporary directory + managed by the remote interpreter. + """ + LOG.debug('_make_tmp_path(remote_user=%r)', remote_user) + path = self._generate_tmp_path() + LOG.debug('Temporary directory: %r', path) + self._connection.get_chain().call_no_reply(os.mkdir, path) + self._connection._shell.tmpdir = path + return path + + def _remove_tmp_path(self, tmp_path): + """ + Replace the base implementation's invocation of rm -rf, replacing it + with a pipelined call to :func:`ansible_mitogen.target.prune_tree`. + """ + LOG.debug('_remove_tmp_path(%r)', tmp_path) + if tmp_path is None and ansible_mitogen.utils.ansible_version[:2] >= (2, 6): + tmp_path = self._connection._shell.tmpdir # 06f73ad578d + if tmp_path is not None: + self._connection.get_chain().call_no_reply( + ansible_mitogen.target.prune_tree, + tmp_path, + ) + self._connection._shell.tmpdir = None + + def _transfer_data(self, remote_path, data): + """ + Used by the base _execute_module(), and in <2.4 also by the template + action module, and probably others. + """ + if isinstance(data, dict): + data = jsonify(data) + if not isinstance(data, bytes): + data = to_bytes(data, errors='surrogate_or_strict') + + LOG.debug('_transfer_data(%r, %s ..%d bytes)', + remote_path, type(data), len(data)) + self._connection.put_data(remote_path, data) + return remote_path + + #: Actions listed here cause :func:`_fixup_perms2` to avoid a needless + #: roundtrip, as they modify file modes separately afterwards. This is due + #: to the method prototype having a default of `execute=True`. + FIXUP_PERMS_RED_HERRING = set(['copy']) + + def _fixup_perms2(self, remote_paths, remote_user=None, execute=True): + """ + Mitogen always executes ActionBase helper methods in the context of the + target user account, so it is never necessary to modify permissions + except to ensure the execute bit is set if requested. + """ + LOG.debug('_fixup_perms2(%r, remote_user=%r, execute=%r)', + remote_paths, remote_user, execute) + if execute and self._task.action not in self.FIXUP_PERMS_RED_HERRING: + return self._remote_chmod(remote_paths, mode='u+x') + return self.COMMAND_RESULT.copy() + + def _remote_chmod(self, paths, mode, sudoable=False): + """ + Issue an asynchronous set_file_mode() call for every path in `paths`, + then format the resulting return value list with fake_shell(). + """ + LOG.debug('_remote_chmod(%r, mode=%r, sudoable=%r)', + paths, mode, sudoable) + return self.fake_shell(lambda: mitogen.select.Select.all( + self._connection.get_chain().call_async( + ansible_mitogen.target.set_file_mode, path, mode + ) + for path in paths + )) + + def _remote_chown(self, paths, user, sudoable=False): + """ + Issue an asynchronous os.chown() call for every path in `paths`, then + format the resulting return value list with fake_shell(). + """ + LOG.debug('_remote_chown(%r, user=%r, sudoable=%r)', + paths, user, sudoable) + ent = self._connection.get_chain().call(pwd.getpwnam, user) + return self.fake_shell(lambda: mitogen.select.Select.all( + self._connection.get_chain().call_async( + os.chown, path, ent.pw_uid, ent.pw_gid + ) + for path in paths + )) + + def _remote_expand_user(self, path, sudoable=True): + """ + Replace the base implementation's attempt to emulate + os.path.expanduser() with an actual call to os.path.expanduser(). + + :param bool sudoable: + If :data:`True`, indicate unqualified tilde ("~" with no username) + should be evaluated in the context of the login account, not any + become_user. + """ + LOG.debug('_remote_expand_user(%r, sudoable=%r)', path, sudoable) + if not path.startswith('~'): + # /home/foo -> /home/foo + return path + if sudoable or not self._play_context.become: + if path == '~': + # ~ -> /home/dmw + return self._connection.homedir + if path.startswith('~/'): + # ~/.ansible -> /home/dmw/.ansible + return os.path.join(self._connection.homedir, path[2:]) + # ~root/.ansible -> /root/.ansible + return self._connection.get_chain(use_login=(not sudoable)).call( + os.path.expanduser, + ansible_mitogen.utils.unsafe.cast(path), + ) + + def get_task_timeout_secs(self): + """ + Return the task "async:" value, portable across 2.4-2.5. + """ + try: + return self._task.async_val + except AttributeError: + return getattr(self._task, 'async') + + def _set_temp_file_args(self, module_args, wrap_async): + # Ansible>2.5 module_utils reuses the action's temporary directory if + # one exists. Older versions error if this key is present. + if ansible_mitogen.utils.ansible_version[:2] >= (2, 5): + if wrap_async: + # Sharing is not possible with async tasks, as in that case, + # the directory must outlive the action plug-in. + module_args['_ansible_tmpdir'] = None + else: + module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir + + # If _ansible_tmpdir is unset, Ansible>2.6 module_utils will use + # _ansible_remote_tmp as the location to create the module's temporary + # directory. Older versions error if this key is present. + if ansible_mitogen.utils.ansible_version[:2] >= (2, 6): + module_args['_ansible_remote_tmp'] = ( + self._connection.get_good_temp_dir() + ) + + def _execute_module(self, module_name=None, module_args=None, tmp=None, + task_vars=None, persist_files=False, + delete_remote_tmp=True, wrap_async=False, + ignore_unknown_opts=False, + ): + """ + Collect up a module's execution environment then use it to invoke + target.run_module() or helpers.run_module_async() in the target + context. + """ + if module_name is None: + module_name = self._task.action + if module_args is None: + module_args = self._task.args + if task_vars is None: + task_vars = {} + + if ansible_mitogen.utils.ansible_version[:2] >= (2, 17): + self._update_module_args( + module_name, module_args, task_vars, + ignore_unknown_opts=ignore_unknown_opts, + ) + else: + self._update_module_args(module_name, module_args, task_vars) + env = {} + self._compute_environment_string(env) + self._set_temp_file_args(module_args, wrap_async) + + # there's a case where if a task shuts down the node and then immediately calls + # wait_for_connection, the `ping` test from Ansible won't pass because we lost connection + # clearing out context forces a reconnect + # see https://github.com/dw/mitogen/issues/655 and Ansible's `wait_for_connection` module for more info + if module_name == 'ansible.legacy.ping' and type(self).__name__ == 'wait_for_connection': + self._connection.context = None + + self._connection._connect() + result = ansible_mitogen.planner.invoke( + ansible_mitogen.planner.Invocation( + action=self, + connection=self._connection, + module_name=ansible_mitogen.utils.unsafe.cast(mitogen.core.to_text(module_name)), + module_args=ansible_mitogen.utils.unsafe.cast(module_args), + task_vars=task_vars, + templar=self._templar, + env=ansible_mitogen.utils.unsafe.cast(env), + wrap_async=wrap_async, + timeout_secs=self.get_task_timeout_secs(), + ) + ) + + if tmp and delete_remote_tmp and ansible_mitogen.utils.ansible_version[:2] < (2, 5): + # Built-in actions expected tmpdir to be cleaned up automatically + # on _execute_module(). + self._remove_tmp_path(tmp) + + # prevents things like discovered_interpreter_* or ansible_discovered_interpreter_* from being set + # handle ansible 2.3.3 that has remove_internal_keys in a different place + check = remove_internal_keys(result) + if check == 'Not found': + self._remove_internal_keys(result) + + # taken from _execute_module of ansible 2.8.6 + # propagate interpreter discovery results back to the controller + if self._discovered_interpreter_key: + if result.get('ansible_facts') is None: + result['ansible_facts'] = {} + + # only cache discovered_interpreter if we're not running a rediscovery + # rediscovery happens in places like docker connections that could have different + # python interpreters than the main host + if not self._rediscovered_python: + result['ansible_facts'][self._discovered_interpreter_key] = self._discovered_interpreter + + if self._discovery_warnings: + if result.get('warnings') is None: + result['warnings'] = [] + result['warnings'].extend(self._discovery_warnings) + + if self._discovery_deprecation_warnings: + if result.get('deprecations') is None: + result['deprecations'] = [] + result['deprecations'].extend(self._discovery_deprecation_warnings) + + return wrap_var(result) + + def _postprocess_response(self, result): + """ + Apply fixups mimicking ActionBase._execute_module(); this is copied + verbatim from action/__init__.py, the guts of _parse_returned_data are + garbage and should be removed or reimplemented once tests exist. + + :param dict result: + Dictionary with format:: + + { + "rc": int, + "stdout": "stdout data", + "stderr": "stderr data" + } + """ + data = self._parse_returned_data(result) + + # Cutpasted from the base implementation. + if 'stdout' in data and 'stdout_lines' not in data: + data['stdout_lines'] = (data['stdout'] or u'').splitlines() + if 'stderr' in data and 'stderr_lines' not in data: + data['stderr_lines'] = (data['stderr'] or u'').splitlines() + + return data + + def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, + executable=None, + encoding_errors='surrogate_then_replace', + chdir=None): + """ + Override the base implementation by simply calling + target.exec_command() in the target context. + """ + LOG.debug('_low_level_execute_command(%r, in_data=%r, exe=%r, dir=%r)', + cmd, type(in_data), executable, chdir) + + if executable is None: # executable defaults to False + executable = self._play_context.executable + if executable: + cmd = executable + ' -c ' + shlex_quote(cmd) + + # TODO: HACK: if finding python interpreter then we need to keep + # calling exec_command until we run into the right python we'll use + # chicken-and-egg issue, mitogen needs a python to run low_level_execute_command + # which is required by Ansible's discover_interpreter function + if self._finding_python_interpreter: + possible_pythons = [ + '/usr/bin/python', + 'python3', + 'python3.7', + 'python3.6', + 'python3.5', + 'python2.7', + 'python2.6', + '/usr/libexec/platform-python', + '/usr/bin/python3', + 'python' + ] + else: + # not used, just adding a filler value + possible_pythons = ['python'] + + def _run_cmd(): + return self._connection.exec_command( + cmd=cmd, + in_data=in_data, + sudoable=sudoable, + mitogen_chdir=chdir, + ) + + for possible_python in possible_pythons: + try: + self._possible_python_interpreter = possible_python + rc, stdout, stderr = _run_cmd() + # TODO: what exception is thrown? + except: + # we've reached the last python attempted and failed + # TODO: could use enumerate(), need to check which version of python first had it though + if possible_python == 'python': + raise + else: + continue + + stdout_text = to_text(stdout, errors=encoding_errors) + + return { + 'rc': rc, + 'stdout': stdout_text, + 'stdout_lines': stdout_text.splitlines(), + 'stderr': stderr, + } diff --git a/mitogen-0.3.9/ansible_mitogen/module_finder.py b/mitogen-0.3.9/ansible_mitogen/module_finder.py new file mode 100644 index 0000000..a187083 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/module_finder.py @@ -0,0 +1,281 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +from __future__ import unicode_literals +__metaclass__ = type + +import collections +import logging +import os +import re +import sys + +try: + # Python >= 3.4, PEP 451 ModuleSpec API + import importlib.machinery + import importlib.util +except ImportError: + # Python < 3.4, PEP 302 Import Hooks + import imp + +import mitogen.master + + +LOG = logging.getLogger(__name__) +PREFIX = 'ansible.module_utils.' + + +# Analog of `importlib.machinery.ModuleSpec` or `pkgutil.ModuleInfo`. +# name Unqualified name of the module. +# path Filesystem path of the module. +# kind One of the constants in `imp`, as returned in `imp.find_module()` +# parent `ansible_mitogen.module_finder.Module` of parent package (if any). +Module = collections.namedtuple('Module', 'name path kind parent') + + +def get_fullname(module): + """ + Reconstruct a Module's canonical path by recursing through its parents. + """ + bits = [str(module.name)] + while module.parent: + bits.append(str(module.parent.name)) + module = module.parent + return '.'.join(reversed(bits)) + + +def get_code(module): + """ + Compile and return a Module's code object. + """ + fp = open(module.path, 'rb') + try: + return compile(fp.read(), str(module.name), 'exec') + finally: + fp.close() + + +def is_pkg(module): + """ + Return :data:`True` if a Module represents a package. + """ + return module.kind == imp.PKG_DIRECTORY + + +def find(name, path=(), parent=None): + """ + Return a Module instance describing the first matching module found on the + search path. + + :param str name: + Module name. + :param list path: + List of directory names to search for the module. + :param Module parent: + Optional module parent. + """ + assert isinstance(path, tuple) + head, _, tail = name.partition('.') + try: + tup = imp.find_module(head, list(path)) + except ImportError: + return parent + + fp, modpath, (suffix, mode, kind) = tup + if fp: + fp.close() + + if parent and modpath == parent.path: + # 'from timeout import timeout', where 'timeout' is a function but also + # the name of the module being imported. + return None + + if kind == imp.PKG_DIRECTORY: + modpath = os.path.join(modpath, '__init__.py') + + module = Module(head, modpath, kind, parent) + # TODO: this code is entirely wrong on Python 3.x, but works well enough + # for Ansible. We need a new find_child() that only looks in the package + # directory, never falling back to the parent search path. + if tail and kind == imp.PKG_DIRECTORY: + return find_relative(module, tail, path) + return module + + +def find_relative(parent, name, path=()): + if parent.kind == imp.PKG_DIRECTORY: + path = (os.path.dirname(parent.path),) + path + return find(name, path, parent=parent) + + +def scan_fromlist(code): + """Return an iterator of (level, name) for explicit imports in a code + object. + + Not all names identify a module. `from os import name, path` generates + `(0, 'os.name'), (0, 'os.path')`, but `os.name` is usually a string. + + >>> src = 'import a; import b.c; from d.e import f; from g import h, i\\n' + >>> code = compile(src, '', 'exec') + >>> list(scan_fromlist(code)) + [(0, 'a'), (0, 'b.c'), (0, 'd.e.f'), (0, 'g.h'), (0, 'g.i')] + """ + for level, modname_s, fromlist in mitogen.master.scan_code_imports(code): + for name in fromlist: + yield level, str('%s.%s' % (modname_s, name)) + if not fromlist: + yield level, modname_s + + +def walk_imports(code, prefix=None): + """Return an iterator of names for implicit parent imports & explicit + imports in a code object. + + If a prefix is provided, then only children of that prefix are included. + Not all names identify a module. `from os import name, path` generates + `'os', 'os.name', 'os.path'`, but `os.name` is usually a string. + + >>> source = 'import a; import b; import b.c; from b.d import e, f\\n' + >>> code = compile(source, '', 'exec') + >>> list(walk_imports(code)) + ['a', 'b', 'b', 'b.c', 'b', 'b.d', 'b.d.e', 'b.d.f'] + >>> list(walk_imports(code, prefix='b')) + ['b.c', 'b.d', 'b.d.e', 'b.d.f'] + """ + if prefix is None: + prefix = '' + pattern = re.compile(r'(^|\.)(\w+)') + start = len(prefix) + for _, name, fromlist in mitogen.master.scan_code_imports(code): + if not name.startswith(prefix): + continue + for match in pattern.finditer(name, start): + yield name[:match.end()] + for leaf in fromlist: + yield str('%s.%s' % (name, leaf)) + + +def scan(module_name, module_path, search_path): + # type: (str, str, list[str]) -> list[(str, str, bool)] + """Return a list of (name, path, is_package) for ansible.module_utils + imports used by an Ansible module. + """ + log = LOG.getChild('scan') + log.debug('%r, %r, %r', module_name, module_path, search_path) + + if sys.version_info >= (3, 4): + result = _scan_importlib_find_spec( + module_name, module_path, search_path, + ) + log.debug('_scan_importlib_find_spec %r', result) + else: + result = _scan_imp_find_module(module_name, module_path, search_path) + log.debug('_scan_imp_find_module %r', result) + return result + + +def _scan_importlib_find_spec(module_name, module_path, search_path): + # type: (str, str, list[str]) -> list[(str, str, bool)] + module = importlib.machinery.ModuleSpec( + module_name, loader=None, origin=module_path, + ) + prefix = importlib.machinery.ModuleSpec( + PREFIX.rstrip('.'), loader=None, + ) + prefix.submodule_search_locations = search_path + queue = collections.deque([module]) + specs = {prefix.name: prefix} + while queue: + spec = queue.popleft() + if spec.origin is None: + continue + try: + with open(spec.origin, 'rb') as f: + code = compile(f.read(), spec.name, 'exec') + except Exception as exc: + raise ValueError((exc, module, spec, specs)) + + for name in walk_imports(code, prefix.name): + if name in specs: + continue + + parent_name = name.rpartition('.')[0] + parent = specs[parent_name] + if parent is None or not parent.submodule_search_locations: + specs[name] = None + continue + + child = importlib.util._find_spec( + name, parent.submodule_search_locations, + ) + if child is None or child.origin is None: + specs[name] = None + continue + + specs[name] = child + queue.append(child) + + del specs[prefix.name] + return sorted( + (spec.name, spec.origin, spec.submodule_search_locations is not None) + for spec in specs.values() if spec is not None + ) + + +def _scan_imp_find_module(module_name, module_path, search_path): + # type: (str, str, list[str]) -> list[(str, str, bool)] + module = Module(module_name, module_path, imp.PY_SOURCE, None) + stack = [module] + seen = set() + + while stack: + module = stack.pop(0) + for level, fromname in scan_fromlist(get_code(module)): + if not fromname.startswith(PREFIX): + continue + + imported = find(fromname[len(PREFIX):], search_path) + if imported is None or imported in seen: + continue + + seen.add(imported) + stack.append(imported) + parent = imported.parent + while parent: + fullname = get_fullname(parent) + module = Module(fullname, parent.path, parent.kind, None) + if module not in seen: + seen.add(module) + stack.append(module) + parent = parent.parent + + return sorted( + (PREFIX + get_fullname(module), module.path, is_pkg(module)) + for module in seen + ) diff --git a/mitogen-0.3.9/ansible_mitogen/parsing.py b/mitogen-0.3.9/ansible_mitogen/parsing.py new file mode 100644 index 0000000..2ca5031 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/parsing.py @@ -0,0 +1,77 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +from __future__ import unicode_literals +__metaclass__ = type + +import mitogen.core + + +def parse_script_interpreter(source): + """ + Parse the script interpreter portion of a UNIX hashbang using the rules + Linux uses. + + :param str source: String like "/usr/bin/env python". + + :returns: + Tuple of `(interpreter, arg)`, where `intepreter` is the script + interpreter and `arg` is its sole argument if present, otherwise + :py:data:`None`. + """ + # Find terminating newline. Assume last byte of binprm_buf if absent. + nl = source.find(b'\n', 0, 128) + if nl == -1: + nl = min(128, len(source)) + + # Split once on the first run of whitespace. If no whitespace exists, + # bits just contains the interpreter filename. + bits = source[0:nl].strip().split(None, 1) + if len(bits) == 1: + return mitogen.core.to_text(bits[0]), None + return mitogen.core.to_text(bits[0]), mitogen.core.to_text(bits[1]) + + +def parse_hashbang(source): + """ + Parse a UNIX "hashbang line" using the syntax supported by Linux. + + :param str source: String like "#!/usr/bin/env python". + + :returns: + Tuple of `(interpreter, arg)`, where `intepreter` is the script + interpreter and `arg` is its sole argument if present, otherwise + :py:data:`None`. + """ + # Linux requires first 2 bytes with no whitespace, pretty sure it's the + # same everywhere. See binfmt_script.c. + if not source.startswith(b'#!'): + return None, None + + return parse_script_interpreter(source[2:]) diff --git a/mitogen-0.3.9/ansible_mitogen/planner.py b/mitogen-0.3.9/ansible_mitogen/planner.py new file mode 100644 index 0000000..0a91039 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/planner.py @@ -0,0 +1,631 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +Classes to detect each case from [0] and prepare arguments necessary for the +corresponding Runner class within the target, including preloading requisite +files/modules known missing. + +[0] "Ansible Module Architecture", developing_program_flow_modules.html +""" + +from __future__ import absolute_import, division, print_function +from __future__ import unicode_literals +__metaclass__ = type + +import json +import logging +import os +import random +import re + +import ansible.collections.list +import ansible.errors +import ansible.executor.module_common + +import mitogen.core +import mitogen.select + +import ansible_mitogen.loaders +import ansible_mitogen.parsing +import ansible_mitogen.target +import ansible_mitogen.utils.unsafe + + +LOG = logging.getLogger(__name__) +NO_METHOD_MSG = 'Mitogen: no invocation method found for: ' +NO_INTERPRETER_MSG = 'module (%s) is missing interpreter line' +# NOTE: Ansible 2.10 no longer has a `.` at the end of NO_MODULE_MSG error +NO_MODULE_MSG = 'The module %s was not found in configured module paths' + +_planner_by_path = {} + + +class Invocation(object): + """ + Collect up a module's execution environment then use it to invoke + target.run_module() or helpers.run_module_async() in the target context. + """ + def __init__(self, action, connection, module_name, module_args, + task_vars, templar, env, wrap_async, timeout_secs): + #: ActionBase instance invoking the module. Required to access some + #: output postprocessing methods that don't belong in ActionBase at + #: all. + self.action = action + #: Ansible connection to use to contact the target. Must be an + #: ansible_mitogen connection. + self.connection = connection + #: Name of the module ('command', 'shell', etc.) to execute. + self.module_name = module_name + #: Final module arguments. + self.module_args = module_args + #: Task variables, needed to extract ansible_*_interpreter. + self.task_vars = task_vars + #: Templar, needed to extract ansible_*_interpreter. + self.templar = templar + #: Final module environment. + self.env = env + #: Boolean, if :py:data:`True`, launch the module asynchronously. + self.wrap_async = wrap_async + #: Integer, if >0, limit the time an asynchronous job may run for. + self.timeout_secs = timeout_secs + #: Initially ``None``, but set by :func:`invoke`. The path on the + #: master to the module's implementation file. + self.module_path = None + #: Initially ``None``, but set by :func:`invoke`. The raw source or + #: binary contents of the module. + self._module_source = None + #: Initially ``{}``, but set by :func:`invoke`. Optional source to send + #: to :func:`propagate_paths_and_modules` to fix Python3.5 relative import errors + self._overridden_sources = {} + #: Initially ``set()``, but set by :func:`invoke`. Optional source paths to send + #: to :func:`propagate_paths_and_modules` to handle loading source dependencies from + #: places outside of the main source path, such as collections + self._extra_sys_paths = set() + + def get_module_source(self): + if self._module_source is None: + self._module_source = read_file(self.module_path) + return self._module_source + + def __repr__(self): + return 'Invocation(module_name=%s)' % (self.module_name,) + + +class Planner(object): + """ + A Planner receives a module name and the contents of its implementation + file, indicates whether or not it understands how to run the module, and + exports a method to run the module. + """ + def __init__(self, invocation): + self._inv = invocation + + @classmethod + def detect(cls, path, source): + """ + Return true if the supplied `invocation` matches the module type + implemented by this planner. + """ + raise NotImplementedError() + + def should_fork(self): + """ + Asynchronous tasks must always be forked. + """ + return self._inv.wrap_async + + def get_push_files(self): + """ + Return a list of files that should be propagated to the target context + using PushFileService. The default implementation pushes nothing. + """ + return [] + + def get_module_deps(self): + """ + Return a list of the Python module names imported by the module. + """ + return [] + + def get_kwargs(self, **kwargs): + """ + If :meth:`detect` returned :data:`True`, plan for the module's + execution, including granting access to or delivering any files to it + that are known to be absent, and finally return a dict:: + + { + # Name of the class from runners.py that implements the + # target-side execution of this module type. + "runner_name": "...", + + # Remaining keys are passed to the constructor of the class + # named by `runner_name`. + } + """ + binding = self._inv.connection.get_binding() + + new = dict((mitogen.core.UnicodeType(k), kwargs[k]) + for k in kwargs) + new.setdefault('good_temp_dir', + self._inv.connection.get_good_temp_dir()) + new.setdefault('cwd', self._inv.connection.get_default_cwd()) + new.setdefault('extra_env', self._inv.connection.get_default_env()) + new.setdefault('emulate_tty', True) + new.setdefault('service_context', binding.get_child_service_context()) + return new + + def __repr__(self): + return '%s()' % (type(self).__name__,) + + +class BinaryPlanner(Planner): + """ + Binary modules take their arguments and will return data to Ansible in the + same way as want JSON modules. + """ + runner_name = 'BinaryRunner' + + @classmethod + def detect(cls, path, source): + return ansible.executor.module_common._is_binary(source) + + def get_push_files(self): + return [mitogen.core.to_text(self._inv.module_path)] + + def get_kwargs(self, **kwargs): + return super(BinaryPlanner, self).get_kwargs( + runner_name=self.runner_name, + module=self._inv.module_name, + path=self._inv.module_path, + json_args=json.dumps(self._inv.module_args), + env=self._inv.env, + **kwargs + ) + + +class ScriptPlanner(BinaryPlanner): + """ + Common functionality for script module planners -- handle interpreter + detection and rewrite. + """ + def _rewrite_interpreter(self, path): + """ + Given the interpreter path (from the script's hashbang line), return + the desired interpreter path. This tries, in order + + 1. Look up & render the `ansible_*_interpreter` variable, if set + 2. Look up the `discovered_interpreter_*` fact, if present + 3. The unmodified path from the hashbang line. + + :param str path: + Absolute path to original interpreter (e.g. '/usr/bin/python'). + + :returns: + Shell fragment prefix used to execute the script via "/bin/sh -c". + While `ansible_*_interpreter` documentation suggests shell isn't + involved here, the vanilla implementation uses it and that use is + exploited in common playbooks. + """ + interpreter_name = os.path.basename(path).strip() + key = u'ansible_%s_interpreter' % interpreter_name + try: + template = self._inv.task_vars[key] + except KeyError: + pass + else: + configured_interpreter = self._inv.templar.template(template) + return ansible_mitogen.utils.unsafe.cast(configured_interpreter) + + key = u'discovered_interpreter_%s' % interpreter_name + try: + discovered_interpreter = self._inv.task_vars['ansible_facts'][key] + except KeyError: + pass + else: + return ansible_mitogen.utils.unsafe.cast(discovered_interpreter) + + return path + + def _get_interpreter(self): + path, arg = ansible_mitogen.parsing.parse_hashbang( + self._inv.get_module_source() + ) + if path is None: + raise ansible.errors.AnsibleError(NO_INTERPRETER_MSG % ( + self._inv.module_name, + )) + + fragment = self._rewrite_interpreter(path) + if arg: + fragment += ' ' + arg + + is_python = path.startswith('python') + return fragment, is_python + + def get_kwargs(self, **kwargs): + interpreter_fragment, is_python = self._get_interpreter() + return super(ScriptPlanner, self).get_kwargs( + interpreter_fragment=interpreter_fragment, + is_python=is_python, + **kwargs + ) + + +class JsonArgsPlanner(ScriptPlanner): + """ + Script that has its interpreter directive and the task arguments + substituted into its source as a JSON string. + """ + runner_name = 'JsonArgsRunner' + + @classmethod + def detect(cls, path, source): + return ansible.executor.module_common.REPLACER_JSONARGS in source + + +class WantJsonPlanner(ScriptPlanner): + """ + If a module has the string WANT_JSON in it anywhere, Ansible treats it as a + non-native module that accepts a filename as its only command line + parameter. The filename is for a temporary file containing a JSON string + containing the module's parameters. The module needs to open the file, read + and parse the parameters, operate on the data, and print its return data as + a JSON encoded dictionary to stdout before exiting. + + These types of modules are self-contained entities. As of Ansible 2.1, + Ansible only modifies them to change a shebang line if present. + """ + runner_name = 'WantJsonRunner' + + @classmethod + def detect(cls, path, source): + return b'WANT_JSON' in source + + +class NewStylePlanner(ScriptPlanner): + """ + The Ansiballz framework differs from module replacer in that it uses real + Python imports of things in ansible/module_utils instead of merely + preprocessing the module. + """ + runner_name = 'NewStyleRunner' + MARKER = re.compile(br'from ansible(?:_collections|\.module_utils)\.') + + @classmethod + def detect(cls, path, source): + return cls.MARKER.search(source) is not None + + def _get_interpreter(self): + return None, None + + def get_push_files(self): + return super(NewStylePlanner, self).get_push_files() + [ + mitogen.core.to_text(path) + for fullname, path, is_pkg in self.get_module_map()['custom'] + ] + + def get_module_deps(self): + return self.get_module_map()['builtin'] + + #: Module names appearing in this set always require forking, usually due + #: to some terminal leakage that cannot be worked around in any sane + #: manner. + ALWAYS_FORK_MODULES = frozenset([ + 'dnf', # issue #280; py-dnf/hawkey need therapy + 'firewalld', # issue #570: ansible module_utils caches dbus conn + 'ansible.legacy.dnf', # issue #776 + 'ansible.builtin.dnf', # issue #832 + ]) + + def should_fork(self): + """ + In addition to asynchronous tasks, new-style modules should be forked + if: + + * the user specifies mitogen_task_isolation=fork, or + * the new-style module has a custom module search path, or + * the module is known to leak like a sieve. + """ + return ( + super(NewStylePlanner, self).should_fork() or + (self._inv.task_vars.get('mitogen_task_isolation') == 'fork') or + (self._inv.module_name in self.ALWAYS_FORK_MODULES) or + (len(self.get_module_map()['custom']) > 0) + ) + + def get_search_path(self): + return tuple( + path + for path in ansible_mitogen.loaders.module_utils_loader._get_paths( + subdirs=False + ) + ) + + _module_map = None + + def get_module_map(self): + if self._module_map is None: + binding = self._inv.connection.get_binding() + self._module_map = mitogen.service.call( + call_context=binding.get_service_context(), + service_name='ansible_mitogen.services.ModuleDepService', + method_name='scan', + + module_name='ansible_module_%s' % (self._inv.module_name,), + module_path=self._inv.module_path, + search_path=self.get_search_path(), + builtin_path=ansible.executor.module_common._MODULE_UTILS_PATH, + context=self._inv.connection.context, + ) + return self._module_map + + def get_kwargs(self): + return super(NewStylePlanner, self).get_kwargs( + module_map=self.get_module_map(), + py_module_name=py_modname_from_path( + self._inv.module_name, + self._inv.module_path, + ), + ) + + +class ReplacerPlanner(NewStylePlanner): + """ + The Module Replacer framework is the original framework implementing + new-style modules. It is essentially a preprocessor (like the C + Preprocessor for those familiar with that programming language). It does + straight substitutions of specific substring patterns in the module file. + There are two types of substitutions. + + * Replacements that only happen in the module file. These are public + replacement strings that modules can utilize to get helpful boilerplate + or access to arguments. + + "from ansible.module_utils.MOD_LIB_NAME import *" is replaced with the + contents of the ansible/module_utils/MOD_LIB_NAME.py. These should only + be used with new-style Python modules. + + "#<>" is equivalent to + "from ansible.module_utils.basic import *" and should also only apply to + new-style Python modules. + + "# POWERSHELL_COMMON" substitutes the contents of + "ansible/module_utils/powershell.ps1". It should only be used with + new-style Powershell modules. + """ + runner_name = 'ReplacerRunner' + + @classmethod + def detect(cls, path, source): + return ansible.executor.module_common.REPLACER in source + + +class OldStylePlanner(ScriptPlanner): + runner_name = 'OldStyleRunner' + + @classmethod + def detect(cls, path, source): + # Everything else. + return True + + +_planners = [ + BinaryPlanner, + # ReplacerPlanner, + NewStylePlanner, + JsonArgsPlanner, + WantJsonPlanner, + OldStylePlanner, +] + + +def py_modname_from_path(name, path): + """ + Fetch the logical name of a new-style module as it might appear in + :data:`sys.modules` of the target's Python interpreter. + + * Since Ansible 2.9, modules appearing within a package have the original + package hierarchy approximated on the target, enabling relative imports + to function correctly. For example, "ansible.modules.system.setup". + """ + try: + return ansible.executor.module_common._get_ansible_module_fqn(path) + except AttributeError: + pass + except ValueError: + pass + + return 'ansible.modules.' + name + + +def read_file(path): + fd = os.open(path, os.O_RDONLY) + try: + bits = [] + chunk = True + while True: + chunk = os.read(fd, 65536) + if not chunk: + break + bits.append(chunk) + finally: + os.close(fd) + + return mitogen.core.b('').join(bits) + + +def _propagate_deps(invocation, planner, context): + binding = invocation.connection.get_binding() + mitogen.service.call( + call_context=binding.get_service_context(), + service_name='mitogen.service.PushFileService', + method_name='propagate_paths_and_modules', + + context=context, + paths=planner.get_push_files(), + # modules=planner.get_module_deps(), TODO + overridden_sources=invocation._overridden_sources, + # needs to be a list because can't unpickle() a set() + extra_sys_paths=list(invocation._extra_sys_paths), + ) + + +def _invoke_async_task(invocation, planner): + job_id = '%016x' % random.randint(0, 2**64) + context = invocation.connection.spawn_isolated_child() + _propagate_deps(invocation, planner, context) + + with mitogen.core.Receiver(context.router) as started_recv: + call_recv = context.call_async( + ansible_mitogen.target.run_module_async, + job_id=job_id, + timeout_secs=invocation.timeout_secs, + started_sender=started_recv.to_sender(), + kwargs=planner.get_kwargs(), + ) + + # Wait for run_module_async() to crash, or for AsyncRunner to indicate + # the job file has been written. + for msg in mitogen.select.Select([started_recv, call_recv]): + if msg.receiver is call_recv: + # It can only be an exception. + raise msg.unpickle() + break + + return { + 'stdout': json.dumps({ + # modules/utilities/logic/async_wrapper.py::_run_module(). + 'changed': True, + 'started': 1, + 'finished': 0, + 'ansible_job_id': job_id, + }) + } + + +def _invoke_isolated_task(invocation, planner): + context = invocation.connection.spawn_isolated_child() + _propagate_deps(invocation, planner, context) + try: + return context.call( + ansible_mitogen.target.run_module, + kwargs=planner.get_kwargs(), + ) + finally: + context.shutdown() + + +def _get_planner(invocation, source): + for klass in _planners: + if klass.detect(invocation.module_path, source): + LOG.debug( + '%r accepted %r (filename %r)', + klass, invocation.module_name, invocation.module_path, + ) + return klass + LOG.debug('%r rejected %r', klass, invocation.module_name) + raise ansible.errors.AnsibleError(NO_METHOD_MSG + repr(invocation)) + + +def _fix_py35(invocation, module_source): + """ + super edge case with a relative import error in Python 3.5.1-3.5.3 + in Ansible's setup module when using Mitogen + https://github.com/dw/mitogen/issues/672#issuecomment-636408833 + We replace a relative import in the setup module with the actual full file path + This works in vanilla Ansible but not in Mitogen otherwise + """ + if invocation.module_name in {'ansible.builtin.setup', 'ansible.legacy.setup', 'setup'} and \ + invocation.module_path not in invocation._overridden_sources: + # in-memory replacement of setup module's relative import + # would check for just python3.5 and run this then but we don't know the + # target python at this time yet + # NOTE: another ansible 2.10-specific fix: `from ..module_utils` used to be `from ...module_utils` + module_source = module_source.replace( + b"from ..module_utils.basic import AnsibleModule", + b"from ansible.module_utils.basic import AnsibleModule" + ) + invocation._overridden_sources[invocation.module_path] = module_source + + +def _load_collections(invocation): + """ + Special loader that ensures that `ansible_collections` exist as a module path for import + Goes through all collection path possibilities and stores paths to installed collections + Stores them on the current invocation to later be passed to the master service + """ + for collection_path in ansible.collections.list.list_collection_dirs(): + invocation._extra_sys_paths.add(collection_path.decode('utf-8')) + + +def invoke(invocation): + """ + Find a Planner subclass corresponding to `invocation` and use it to invoke + the module. + + :param Invocation invocation: + :returns: + Module return dict. + :raises ansible.errors.AnsibleError: + Unrecognized/unsupported module type. + """ + path = ansible_mitogen.loaders.module_loader.find_plugin( + invocation.module_name, + '', + ) + if path is None: + raise ansible.errors.AnsibleError(NO_MODULE_MSG % ( + invocation.module_name, + )) + + invocation.module_path = mitogen.core.to_text(path) + if invocation.module_path not in _planner_by_path: + if 'ansible_collections' in invocation.module_path: + _load_collections(invocation) + + module_source = invocation.get_module_source() + _fix_py35(invocation, module_source) + _planner_by_path[invocation.module_path] = _get_planner( + invocation, + module_source + ) + + planner = _planner_by_path[invocation.module_path](invocation) + if invocation.wrap_async: + response = _invoke_async_task(invocation, planner) + elif planner.should_fork(): + response = _invoke_isolated_task(invocation, planner) + else: + _propagate_deps(invocation, planner, invocation.connection.context) + response = invocation.connection.get_chain().call( + ansible_mitogen.target.run_module, + kwargs=planner.get_kwargs(), + ) + + return invocation.action._postprocess_response(response) diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/__init__.py b/mitogen-0.3.9/ansible_mitogen/plugins/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/action/__init__.py b/mitogen-0.3.9/ansible_mitogen/plugins/action/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/action/mitogen_fetch.py b/mitogen-0.3.9/ansible_mitogen/plugins/action/mitogen_fetch.py new file mode 100644 index 0000000..c1ef190 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/action/mitogen_fetch.py @@ -0,0 +1,207 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import base64 +from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleActionSkip +from ansible.module_utils.common.text.converters import to_bytes, to_text +from ansible.module_utils.six import string_types +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display +from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash +from ansible.utils.path import makedirs_safe, is_subpath + +display = Display() + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + ''' handler for fetch operations ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + try: + if self._play_context.check_mode: + raise AnsibleActionSkip('check mode not (yet) supported for this module') + + source = self._task.args.get('src', None) + original_dest = dest = self._task.args.get('dest', None) + flat = boolean(self._task.args.get('flat'), strict=False) + fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False) + validate_checksum = boolean(self._task.args.get('validate_checksum', True), strict=False) + + msg = '' + # validate source and dest are strings FIXME: use basic.py and module specs + if not isinstance(source, string_types): + msg = "Invalid type supplied for source option, it must be a string" + + if not isinstance(dest, string_types): + msg = "Invalid type supplied for dest option, it must be a string" + + if source is None or dest is None: + msg = "src and dest are required" + + if msg: + raise AnsibleActionFail(msg) + + source = self._connection._shell.join_path(source) + source = self._remote_expand_user(source) + + remote_stat = {} + remote_checksum = None + if True: + # Get checksum for the remote file even using become. Mitogen doesn't need slurp. + # Follow symlinks because fetch always follows symlinks + try: + remote_stat = self._execute_remote_stat(source, all_vars=task_vars, follow=True) + except AnsibleError as ae: + result['changed'] = False + result['file'] = source + if fail_on_missing: + result['failed'] = True + result['msg'] = to_text(ae) + else: + result['msg'] = "%s, ignored" % to_text(ae, errors='surrogate_or_replace') + + return result + + remote_checksum = remote_stat.get('checksum') + if remote_stat.get('exists'): + if remote_stat.get('isdir'): + result['failed'] = True + result['changed'] = False + result['msg'] = "remote file is a directory, fetch cannot work on directories" + + # Historically, these don't fail because you may want to transfer + # a log file that possibly MAY exist but keep going to fetch other + # log files. Today, this is better achieved by adding + # ignore_errors or failed_when to the task. Control the behaviour + # via fail_when_missing + if not fail_on_missing: + result['msg'] += ", not transferring, ignored" + del result['changed'] + del result['failed'] + + return result + + # use slurp if permissions are lacking or privilege escalation is needed + remote_data = None + if remote_checksum in (None, '1', ''): + slurpres = self._execute_module(module_name='ansible.legacy.slurp', module_args=dict(src=source), task_vars=task_vars) + if slurpres.get('failed'): + if not fail_on_missing: + result['file'] = source + result['changed'] = False + else: + result.update(slurpres) + + if 'not found' in slurpres.get('msg', ''): + result['msg'] = "the remote file does not exist, not transferring, ignored" + elif slurpres.get('msg', '').startswith('source is a directory'): + result['msg'] = "remote file is a directory, fetch cannot work on directories" + + return result + else: + if slurpres['encoding'] == 'base64': + remote_data = base64.b64decode(slurpres['content']) + if remote_data is not None: + remote_checksum = checksum_s(remote_data) + + # calculate the destination name + if os.path.sep not in self._connection._shell.join_path('a', ''): + source = self._connection._shell._unquote(source) + source_local = source.replace('\\', '/') + else: + source_local = source + + # ensure we only use file name, avoid relative paths + if not is_subpath(dest, original_dest): + # TODO: ? dest = os.path.expanduser(dest.replace(('../',''))) + raise AnsibleActionFail("Detected directory traversal, expected to be contained in '%s' but got '%s'" % (original_dest, dest)) + + if flat: + if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep): + raise AnsibleActionFail("dest is an existing directory, use a trailing slash if you want to fetch src into that directory") + if dest.endswith(os.sep): + # if the path ends with "/", we'll use the source filename as the + # destination filename + base = os.path.basename(source_local) + dest = os.path.join(dest, base) + if not dest.startswith("/"): + # if dest does not start with "/", we'll assume a relative path + dest = self._loader.path_dwim(dest) + else: + # files are saved in dest dir, with a subdir for each host, then the filename + if 'inventory_hostname' in task_vars: + target_name = task_vars['inventory_hostname'] + else: + target_name = self._play_context.remote_addr + dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) + + dest = os.path.normpath(dest) + + # calculate checksum for the local file + local_checksum = checksum(dest) + + if remote_checksum != local_checksum: + # create the containing directories, if needed + makedirs_safe(os.path.dirname(dest)) + + # fetch the file and check for changes + if remote_data is None: + self._connection.fetch_file(source, dest) + else: + try: + f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb') + f.write(remote_data) + f.close() + except (IOError, OSError) as e: + raise AnsibleActionFail("Failed to fetch the file: %s" % e) + new_checksum = secure_hash(dest) + # For backwards compatibility. We'll return None on FIPS enabled systems + try: + new_md5 = md5(dest) + except ValueError: + new_md5 = None + + if validate_checksum and new_checksum != remote_checksum: + result.update(dict(failed=True, md5sum=new_md5, + msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, + checksum=new_checksum, remote_checksum=remote_checksum)) + else: + result.update({'changed': True, 'md5sum': new_md5, 'dest': dest, + 'remote_md5sum': None, 'checksum': new_checksum, + 'remote_checksum': remote_checksum}) + else: + # For backwards compatibility. We'll return None on FIPS enabled systems + try: + local_md5 = md5(dest) + except ValueError: + local_md5 = None + result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)) + + finally: + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/action/mitogen_get_stack.py b/mitogen-0.3.9/ansible_mitogen/plugins/action/mitogen_get_stack.py new file mode 100644 index 0000000..a8634e5 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/action/mitogen_get_stack.py @@ -0,0 +1,58 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +Fetch the connection configuration stack that would be used to connect to a +target, without actually connecting to it. +""" + +from __future__ import absolute_import, division, print_function +from __future__ import unicode_literals +__metaclass__ = type + +import ansible_mitogen.connection + +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + def run(self, tmp=None, task_vars=None): + if not isinstance(self._connection, + ansible_mitogen.connection.Connection): + return { + 'skipped': True, + } + + _, stack = self._connection._build_stack() + return { + 'changed': True, + 'result': stack, + '_ansible_verbose_always': True, + # for ansible < 2.8, we'll default to /usr/bin/python like before + 'discovered_interpreter': self._connection._action._discovered_interpreter + } diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/__init__.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/__pycache__/mitogen_local.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/plugins/connection/__pycache__/mitogen_local.cpython-310.pyc new file mode 100644 index 0000000..9e9ebd4 Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/plugins/connection/__pycache__/mitogen_local.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/__pycache__/mitogen_ssh.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/plugins/connection/__pycache__/mitogen_ssh.cpython-310.pyc new file mode 100644 index 0000000..3624f8a Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/plugins/connection/__pycache__/mitogen_ssh.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_buildah.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_buildah.py new file mode 100644 index 0000000..10ab6b4 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_buildah.py @@ -0,0 +1,46 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'buildah' diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_doas.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_doas.py new file mode 100644 index 0000000..963ec59 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_doas.py @@ -0,0 +1,46 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen.connection +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'mitogen_doas' diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_docker.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_docker.py new file mode 100644 index 0000000..e0dd212 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_docker.py @@ -0,0 +1,53 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'docker' + + @property + def docker_cmd(self): + """ + Ansible 2.3 synchronize module wants to know how we run Docker. + """ + return 'docker' diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_jail.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_jail.py new file mode 100644 index 0000000..a432e17 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_jail.py @@ -0,0 +1,46 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'jail' diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_kubectl.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_kubectl.py new file mode 100644 index 0000000..bae4160 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_kubectl.py @@ -0,0 +1,82 @@ +# coding: utf-8 +# Copyright 2018, Yannig Perré +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +import ansible.errors + +try: + import ansible_mitogen +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection +import ansible_mitogen.loaders + + +_get_result = ansible_mitogen.loaders.connection_loader__get( + 'kubectl', + class_only=True, +) + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'kubectl' + + not_supported_msg = ( + 'The "mitogen_kubectl" plug-in requires a version of Ansible ' + 'that ships with the "kubectl" connection plug-in.' + ) + + def __init__(self, *args, **kwargs): + if not _get_result: + raise ansible.errors.AnsibleConnectionFailure(self.not_supported_msg) + super(Connection, self).__init__(*args, **kwargs) + + def get_extra_args(self): + try: + # Ansible < 2.10, _get_result is the connection class + connection_options = _get_result.connection_options + except AttributeError: + # Ansible >= 2.10, _get_result is a get_with_context_result + connection_options = _get_result.object.connection_options + parameters = [] + for key in connection_options: + task_var_name = 'ansible_%s' % key + task_var = self.get_task_var(task_var_name) + if task_var is not None: + parameters += [connection_options[key], task_var] + + return parameters diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_local.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_local.py new file mode 100644 index 0000000..6ff8673 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_local.py @@ -0,0 +1,88 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen.connection +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection +import ansible_mitogen.process + + +if sys.version_info > (3,): + viewkeys = dict.keys +elif sys.version_info > (2, 7): + viewkeys = dict.viewkeys +else: + viewkeys = lambda dct: set(dct) + + +def dict_diff(old, new): + """ + Return a dict representing the differences between the dicts `old` and + `new`. Deleted keys appear as a key with the value :data:`None`, added and + changed keys appear as a key with the new value. + """ + old_keys = viewkeys(old) + new_keys = viewkeys(dict(new)) + out = {} + for key in new_keys - old_keys: + out[key] = new[key] + for key in old_keys - new_keys: + out[key] = None + for key in old_keys & new_keys: + if old[key] != new[key]: + out[key] = new[key] + return out + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'local' + + def get_default_cwd(self): + # https://github.com/ansible/ansible/issues/14489 + return self.loader_basedir + + def get_default_env(self): + """ + Vanilla Ansible local commands execute with an environment inherited + from WorkerProcess, we must emulate that. + """ + return dict_diff( + old=ansible_mitogen.process.MuxProcess.cls_original_env, + new=os.environ, + ) diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_lxc.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_lxc.py new file mode 100644 index 0000000..8850b3d --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_lxc.py @@ -0,0 +1,46 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'lxc' diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_lxd.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_lxd.py new file mode 100644 index 0000000..25370ef --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_lxd.py @@ -0,0 +1,46 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'lxd' diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_machinectl.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_machinectl.py new file mode 100644 index 0000000..1f53879 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_machinectl.py @@ -0,0 +1,46 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen.connection +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'machinectl' diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_podman.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_podman.py new file mode 100644 index 0000000..e423aac --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_podman.py @@ -0,0 +1,46 @@ +# Copyright 2022, Mitogen contributers +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'podman' diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_setns.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_setns.py new file mode 100644 index 0000000..4d70892 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_setns.py @@ -0,0 +1,46 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen.connection +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'setns' diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_ssh.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_ssh.py new file mode 100644 index 0000000..75f2d42 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_ssh.py @@ -0,0 +1,87 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +DOCUMENTATION = """ + author: David Wilson + connection: mitogen_ssh + short_description: Connect over SSH via Mitogen + description: + - This connects using an OpenSSH client controlled by the Mitogen for + Ansible extension. It accepts every option the vanilla ssh plugin + accepts. + version_added: "2.5" + options: + ssh_args: + type: str + vars: + - name: ssh_args + - name: ansible_ssh_args + - name: ansible_mitogen_ssh_args + ssh_common_args: + type: str + vars: + - name: ssh_args + - name: ansible_ssh_common_args + - name: ansible_mitogen_ssh_common_args + ssh_extra_args: + type: str + vars: + - name: ssh_args + - name: ansible_ssh_extra_args + - name: ansible_mitogen_ssh_extra_args +""" + +try: + import ansible_mitogen +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection +import ansible_mitogen.loaders + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'ssh' + vanilla_class = ansible_mitogen.loaders.connection_loader__get( + 'ssh', + class_only=True, + ) + + @staticmethod + def _create_control_path(*args, **kwargs): + """Forward _create_control_path() to the implementation in ssh.py.""" + # https://github.com/dw/mitogen/issues/342 + return Connection.vanilla_class._create_control_path(*args, **kwargs) diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_su.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_su.py new file mode 100644 index 0000000..9395cc5 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_su.py @@ -0,0 +1,46 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen.connection +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'mitogen_su' diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_sudo.py b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_sudo.py new file mode 100644 index 0000000..ef54942 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/connection/mitogen_sudo.py @@ -0,0 +1,46 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +try: + import ansible_mitogen.connection +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'mitogen_sudo' diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/strategy/__init__.py b/mitogen-0.3.9/ansible_mitogen/plugins/strategy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/strategy/__pycache__/mitogen_linear.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/plugins/strategy/__pycache__/mitogen_linear.cpython-310.pyc new file mode 100644 index 0000000..d8a8e2b Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/plugins/strategy/__pycache__/mitogen_linear.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen.py b/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen.py new file mode 100644 index 0000000..abbe767 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen.py @@ -0,0 +1,63 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +# +# This is not the real Strategy implementation module, it simply exists as a +# proxy to the real module, which is loaded using Python's regular import +# mechanism, to prevent Ansible's PluginLoader from making up a fake name that +# results in ansible_mitogen plugin modules being loaded twice: once by +# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is +# stuffed into sys.modules even though attempting to import it will trigger an +# ImportError, and once under its canonical name, "ansible_mitogen.strategy". +# +# Therefore we have a proxy module that imports it under the real name, and +# sets up the duff PluginLoader-imported module to just contain objects from +# the real module, so duplicate types don't exist in memory, and things like +# debuggers and isinstance() work predictably. +# + +BASE_DIR = os.path.abspath( + os.path.join(os.path.dirname(__file__), '../../..') +) + +if BASE_DIR not in sys.path: + sys.path.insert(0, BASE_DIR) + +import ansible_mitogen.strategy +import ansible.plugins.strategy.linear + + +class StrategyModule(ansible_mitogen.strategy.StrategyMixin, + ansible.plugins.strategy.linear.StrategyModule): + pass diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen_free.py b/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen_free.py new file mode 100644 index 0000000..4f4e1f8 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen_free.py @@ -0,0 +1,64 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +# +# This is not the real Strategy implementation module, it simply exists as a +# proxy to the real module, which is loaded using Python's regular import +# mechanism, to prevent Ansible's PluginLoader from making up a fake name that +# results in ansible_mitogen plugin modules being loaded twice: once by +# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is +# stuffed into sys.modules even though attempting to import it will trigger an +# ImportError, and once under its canonical name, "ansible_mitogen.strategy". +# +# Therefore we have a proxy module that imports it under the real name, and +# sets up the duff PluginLoader-imported module to just contain objects from +# the real module, so duplicate types don't exist in memory, and things like +# debuggers and isinstance() work predictably. +# + +BASE_DIR = os.path.abspath( + os.path.join(os.path.dirname(__file__), '../../..') +) + +if BASE_DIR not in sys.path: + sys.path.insert(0, BASE_DIR) + +import ansible_mitogen.loaders +import ansible_mitogen.strategy + + +Base = ansible_mitogen.loaders.strategy_loader.get('free', class_only=True) + +class StrategyModule(ansible_mitogen.strategy.StrategyMixin, Base): + pass diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py b/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py new file mode 100644 index 0000000..c3396c5 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py @@ -0,0 +1,69 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +# +# This is not the real Strategy implementation module, it simply exists as a +# proxy to the real module, which is loaded using Python's regular import +# mechanism, to prevent Ansible's PluginLoader from making up a fake name that +# results in ansible_mitogen plugin modules being loaded twice: once by +# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is +# stuffed into sys.modules even though attempting to import it will trigger an +# ImportError, and once under its canonical name, "ansible_mitogen.strategy". +# +# Therefore we have a proxy module that imports it under the real name, and +# sets up the duff PluginLoader-imported module to just contain objects from +# the real module, so duplicate types don't exist in memory, and things like +# debuggers and isinstance() work predictably. +# + +BASE_DIR = os.path.abspath( + os.path.join(os.path.dirname(__file__), '../../..') +) + +if BASE_DIR not in sys.path: + sys.path.insert(0, BASE_DIR) + +import ansible_mitogen.loaders +import ansible_mitogen.strategy + + +Base = ansible_mitogen.loaders.strategy_loader.get('host_pinned', class_only=True) + +if Base is None: + raise ImportError( + 'The host_pinned strategy is only available in Ansible 2.7 or newer.' + ) + +class StrategyModule(ansible_mitogen.strategy.StrategyMixin, Base): + pass diff --git a/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen_linear.py b/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen_linear.py new file mode 100644 index 0000000..b1b03ae --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/plugins/strategy/mitogen_linear.py @@ -0,0 +1,64 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os.path +import sys + +# +# This is not the real Strategy implementation module, it simply exists as a +# proxy to the real module, which is loaded using Python's regular import +# mechanism, to prevent Ansible's PluginLoader from making up a fake name that +# results in ansible_mitogen plugin modules being loaded twice: once by +# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is +# stuffed into sys.modules even though attempting to import it will trigger an +# ImportError, and once under its canonical name, "ansible_mitogen.strategy". +# +# Therefore we have a proxy module that imports it under the real name, and +# sets up the duff PluginLoader-imported module to just contain objects from +# the real module, so duplicate types don't exist in memory, and things like +# debuggers and isinstance() work predictably. +# + +BASE_DIR = os.path.abspath( + os.path.join(os.path.dirname(__file__), '../../..') +) + +if BASE_DIR not in sys.path: + sys.path.insert(0, BASE_DIR) + +import ansible_mitogen.loaders +import ansible_mitogen.strategy + + +Base = ansible_mitogen.loaders.strategy_loader.get('linear', class_only=True) + +class StrategyModule(ansible_mitogen.strategy.StrategyMixin, Base): + pass diff --git a/mitogen-0.3.9/ansible_mitogen/process.py b/mitogen-0.3.9/ansible_mitogen/process.py new file mode 100644 index 0000000..3a41a43 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/process.py @@ -0,0 +1,710 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import atexit +import logging +import multiprocessing +import os +import resource +import socket +import signal +import sys + +try: + import faulthandler +except ImportError: + faulthandler = None + +try: + import setproctitle +except ImportError: + setproctitle = None + +import mitogen +import mitogen.core +import mitogen.debug +import mitogen.fork +import mitogen.master +import mitogen.parent +import mitogen.service +import mitogen.unix +import mitogen.utils + +import ansible +import ansible.constants as C +import ansible.errors +import ansible_mitogen.logging +import ansible_mitogen.services + +from mitogen.core import b +import ansible_mitogen.affinity + + +LOG = logging.getLogger(__name__) + +ANSIBLE_PKG_OVERRIDE = ( + u"__version__ = %r\n" + u"__author__ = %r\n" +) + +MAX_MESSAGE_SIZE = 4096 * 1048576 + +worker_model_msg = ( + 'Mitogen connection types may only be instantiated when one of the ' + '"mitogen_*" or "operon_*" strategies are active.' +) + +shutting_down_msg = ( + 'The task worker cannot connect. Ansible may be shutting down, or ' + 'the maximum open files limit may have been exceeded. If this occurs ' + 'midway through a run, please retry after increasing the open file ' + 'limit (ulimit -n). Original error: %s' +) + + +#: The worker model as configured by the currently running strategy. This is +#: managed via :func:`get_worker_model` / :func:`set_worker_model` functions by +#: :class:`StrategyMixin`. +_worker_model = None + + +#: A copy of the sole :class:`ClassicWorkerModel` that ever exists during a +#: classic run, as return by :func:`get_classic_worker_model`. +_classic_worker_model = None + + +def set_worker_model(model): + """ + To remove process model-wiring from + :class:`ansible_mitogen.connection.Connection`, it is necessary to track + some idea of the configured execution environment outside the connection + plug-in. + + That is what :func:`set_worker_model` and :func:`get_worker_model` are for. + """ + global _worker_model + assert model is None or _worker_model is None + _worker_model = model + + +def get_worker_model(): + """ + Return the :class:`WorkerModel` currently configured by the running + strategy. + """ + if _worker_model is None: + raise ansible.errors.AnsibleConnectionFailure(worker_model_msg) + return _worker_model + + +def get_classic_worker_model(**kwargs): + """ + Return the single :class:`ClassicWorkerModel` instance, constructing it if + necessary. + """ + global _classic_worker_model + assert _classic_worker_model is None or (not kwargs), \ + "ClassicWorkerModel kwargs supplied but model already constructed" + + if _classic_worker_model is None: + _classic_worker_model = ClassicWorkerModel(**kwargs) + return _classic_worker_model + + +def getenv_int(key, default=0): + """ + Get an integer-valued environment variable `key`, if it exists and parses + as an integer, otherwise return `default`. + """ + try: + return int(os.environ.get(key, str(default))) + except ValueError: + return default + + +def save_pid(name): + """ + When debugging and profiling, it is very annoying to poke through the + process list to discover the currently running Ansible and MuxProcess IDs, + especially when trying to catch an issue during early startup. So here, if + a magic environment variable set, stash them in hidden files in the CWD:: + + alias muxpid="cat .ansible-mux.pid" + alias anspid="cat .ansible-controller.pid" + + gdb -p $(muxpid) + perf top -p $(anspid) + """ + if os.environ.get('MITOGEN_SAVE_PIDS'): + with open('.ansible-%s.pid' % (name,), 'w') as fp: + fp.write(str(os.getpid())) + + +def setup_pool(pool): + """ + Configure a connection multiplexer's :class:`mitogen.service.Pool` with + services accessed by clients and WorkerProcesses. + """ + pool.add(mitogen.service.FileService(router=pool.router)) + pool.add(mitogen.service.PushFileService(router=pool.router)) + pool.add(ansible_mitogen.services.ContextService(router=pool.router)) + pool.add(ansible_mitogen.services.ModuleDepService(pool.router)) + LOG.debug('Service pool configured: size=%d', pool.size) + + +def _setup_responder(responder): + """ + Configure :class:`mitogen.master.ModuleResponder` to only permit + certain packages, and to generate custom responses for certain modules. + """ + responder.whitelist_prefix('ansible') + responder.whitelist_prefix('ansible_mitogen') + + # Ansible 2.3 is compatible with Python 2.4 targets, however + # ansible/__init__.py is not. Instead, executor/module_common.py writes + # out a 2.4-compatible namespace package for unknown reasons. So we + # copy it here. + responder.add_source_override( + fullname='ansible', + path=ansible.__file__, + source=(ANSIBLE_PKG_OVERRIDE % ( + ansible.__version__, + ansible.__author__, + )).encode(), + is_pkg=True, + ) + + +def increase_open_file_limit(): + """ + #549: in order to reduce the possibility of hitting an open files limit, + increase :data:`resource.RLIMIT_NOFILE` from its soft limit to its hard + limit, if they differ. + + It is common that a low soft limit is configured by default, where the hard + limit is much higher. + """ + soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) + if hard == resource.RLIM_INFINITY: + hard_s = '(infinity)' + # cap in case of O(RLIMIT_NOFILE) algorithm in some subprocess. + hard = 524288 + else: + hard_s = str(hard) + + LOG.debug('inherited open file limits: soft=%d hard=%s', soft, hard_s) + if soft >= hard: + LOG.debug('max open files already set to hard limit: %d', hard) + return + + # OS X is limited by kern.maxfilesperproc sysctl, rather than the + # advertised unlimited hard RLIMIT_NOFILE. Just hard-wire known defaults + # for that sysctl, to avoid the mess of querying it. + for value in (hard, 10240): + try: + resource.setrlimit(resource.RLIMIT_NOFILE, (value, hard)) + LOG.debug('raised soft open file limit from %d to %d', soft, value) + break + except ValueError as e: + LOG.debug('could not raise soft open file limit from %d to %d: %s', + soft, value, e) + + +def common_setup(enable_affinity=True, _init_logging=True): + save_pid('controller') + ansible_mitogen.logging.set_process_name('top') + + if _init_logging: + ansible_mitogen.logging.setup() + + if enable_affinity: + ansible_mitogen.affinity.policy.assign_controller() + + mitogen.utils.setup_gil() + if faulthandler is not None: + faulthandler.enable() + + MuxProcess.profiling = getenv_int('MITOGEN_PROFILING') > 0 + if MuxProcess.profiling: + mitogen.core.enable_profiling() + + MuxProcess.cls_original_env = dict(os.environ) + increase_open_file_limit() + + +def get_cpu_count(default=None): + """ + Get the multiplexer CPU count from the MITOGEN_CPU_COUNT environment + variable, returning `default` if one isn't set, or is out of range. + + :param int default: + Default CPU, or :data:`None` to use all available CPUs. + """ + max_cpus = multiprocessing.cpu_count() + if default is None: + default = max_cpus + + cpu_count = getenv_int('MITOGEN_CPU_COUNT', default=default) + if cpu_count < 1 or cpu_count > max_cpus: + cpu_count = default + + return cpu_count + + +class Broker(mitogen.master.Broker): + """ + WorkerProcess maintains fewer file descriptors, therefore does not need + the exuberant syscall expense of EpollPoller, so override it and restore + the poll() poller. + """ + poller_class = mitogen.parent.POLLER_LIGHTWEIGHT + + +class Binding(object): + """ + Represent a bound connection for a particular inventory hostname. When + operating in sharded mode, the actual MuxProcess implementing a connection + varies according to the target machine. Depending on the particular + implementation, this class represents a binding to the correct MuxProcess. + """ + def get_child_service_context(self): + """ + Return the :class:`mitogen.core.Context` to which children should + direct requests for services such as FileService, or :data:`None` for + the local process. + + This can be different from :meth:`get_service_context` where MuxProcess + and WorkerProcess are combined, and it is discovered a task is + delegated after being assigned to its initial worker for the original + un-delegated hostname. In that case, connection management and + expensive services like file transfer must be implemented by the + MuxProcess connected to the target, rather than routed to the + MuxProcess responsible for executing the task. + """ + raise NotImplementedError() + + def get_service_context(self): + """ + Return the :class:`mitogen.core.Context` to which this process should + direct ContextService requests, or :data:`None` for the local process. + """ + raise NotImplementedError() + + def close(self): + """ + Finalize any associated resources. + """ + raise NotImplementedError() + + +class WorkerModel(object): + """ + Interface used by StrategyMixin to manage various Mitogen services, by + default running in one or more connection multiplexer subprocesses spawned + off the top-level Ansible process. + """ + def on_strategy_start(self): + """ + Called prior to strategy start in the top-level process. Responsible + for preparing any worker/connection multiplexer state. + """ + raise NotImplementedError() + + def on_strategy_complete(self): + """ + Called after strategy completion in the top-level process. Must place + Ansible back in a "compatible" state where any other strategy plug-in + may execute. + """ + raise NotImplementedError() + + def get_binding(self, inventory_name): + """ + Return a :class:`Binding` to access Mitogen services for + `inventory_name`. Usually called from worker processes, but may also be + called from top-level process to handle "meta: reset_connection". + """ + raise NotImplementedError() + + +class ClassicBinding(Binding): + """ + Only one connection may be active at a time in a classic worker, so its + binding just provides forwarders back to :class:`ClassicWorkerModel`. + """ + def __init__(self, model): + self.model = model + + def get_service_context(self): + """ + See Binding.get_service_context(). + """ + return self.model.parent + + def get_child_service_context(self): + """ + See Binding.get_child_service_context(). + """ + return self.model.parent + + def close(self): + """ + See Binding.close(). + """ + self.model.on_binding_close() + + +class ClassicWorkerModel(WorkerModel): + #: In the top-level process, this references one end of a socketpair(), + #: whose other end child MuxProcesses block reading from to determine when + #: the master process dies. When the top-level exits abnormally, or + #: normally but where :func:`_on_process_exit` has been called, this socket + #: will be closed, causing all the children to wake. + parent_sock = None + + #: In the mux process, this is the other end of :attr:`cls_parent_sock`. + #: The main thread blocks on a read from it until :attr:`cls_parent_sock` + #: is closed. + child_sock = None + + #: mitogen.master.Router for this worker. + router = None + + #: mitogen.master.Broker for this worker. + broker = None + + #: Name of multiplexer process socket we are currently connected to. + listener_path = None + + #: mitogen.parent.Context representing the parent Context, which is the + #: connection multiplexer process when running in classic mode, or the + #: top-level process when running a new-style mode. + parent = None + + def __init__(self, _init_logging=True): + """ + Arrange for classic model multiplexers to be started. The parent choses + UNIX socket paths each child will use prior to fork, creates a + socketpair used essentially as a semaphore, then blocks waiting for the + child to indicate the UNIX socket is ready for use. + + :param bool _init_logging: + For testing, if :data:`False`, don't initialize logging. + """ + # #573: The process ID that installed the :mod:`atexit` handler. If + # some unknown Ansible plug-in forks the Ansible top-level process and + # later performs a graceful Python exit, it may try to wait for child + # PIDs it never owned, causing a crash. We want to avoid that. + self._pid = os.getpid() + + common_setup(_init_logging=_init_logging) + + self.parent_sock, self.child_sock = socket.socketpair() + mitogen.core.set_cloexec(self.parent_sock.fileno()) + mitogen.core.set_cloexec(self.child_sock.fileno()) + + self._muxes = [ + MuxProcess(self, index) + for index in range(get_cpu_count(default=1)) + ] + for mux in self._muxes: + mux.start() + + atexit.register(self._on_process_exit) + self.child_sock.close() + self.child_sock = None + + def _listener_for_name(self, name): + """ + Given an inventory hostname, return the UNIX listener that should + communicate with it. This is a simple hash of the inventory name. + """ + mux = self._muxes[abs(hash(name)) % len(self._muxes)] + LOG.debug('will use multiplexer %d (%s) to connect to "%s"', + mux.index, mux.path, name) + return mux.path + + def _reconnect(self, path): + if self.router is not None: + # Router can just be overwritten, but the previous parent + # connection must explicitly be removed from the broker first. + self.router.disconnect(self.parent) + self.parent = None + self.router = None + + try: + self.router, self.parent = mitogen.unix.connect( + path=path, + broker=self.broker, + ) + except mitogen.unix.ConnectError as e: + # This is not AnsibleConnectionFailure since we want to break + # with_items loops. + raise ansible.errors.AnsibleError(shutting_down_msg % (e,)) + + self.router.max_message_size = MAX_MESSAGE_SIZE + self.listener_path = path + + def _on_process_exit(self): + """ + This is an :mod:`atexit` handler installed in the top-level process. + + Shut the write end of `sock`, causing the receive side of the socket in + every :class:`MuxProcess` to return 0-byte reads, and causing their + main threads to wake and initiate shutdown. After shutting the socket + down, wait on each child to finish exiting. + + This is done using :mod:`atexit` since Ansible lacks any better hook to + run code during exit, and unless some synchronization exists with + MuxProcess, debug logs may appear on the user's terminal *after* the + prompt has been printed. + """ + if self._pid != os.getpid(): + return + + try: + self.parent_sock.shutdown(socket.SHUT_WR) + except socket.error: + # Already closed. This is possible when tests are running. + LOG.debug('_on_process_exit: ignoring duplicate call') + return + + mitogen.core.io_op(self.parent_sock.recv, 1) + self.parent_sock.close() + + for mux in self._muxes: + _, status = os.waitpid(mux.pid, 0) + status = mitogen.fork._convert_exit_status(status) + LOG.debug('multiplexer %d PID %d %s', mux.index, mux.pid, + mitogen.parent.returncode_to_str(status)) + + def _test_reset(self): + """ + Used to clean up in unit tests. + """ + self.on_binding_close() + self._on_process_exit() + set_worker_model(None) + + global _classic_worker_model + _classic_worker_model = None + + def on_strategy_start(self): + """ + See WorkerModel.on_strategy_start(). + """ + + def on_strategy_complete(self): + """ + See WorkerModel.on_strategy_complete(). + """ + + def get_binding(self, inventory_name): + """ + See WorkerModel.get_binding(). + """ + if self.broker is None: + self.broker = Broker() + + path = self._listener_for_name(inventory_name) + if path != self.listener_path: + self._reconnect(path) + + return ClassicBinding(self) + + def on_binding_close(self): + if not self.broker: + return + + self.broker.shutdown() + self.broker.join() + self.router = None + self.broker = None + self.parent = None + self.listener_path = None + + # #420: Ansible executes "meta" actions in the top-level process, + # meaning "reset_connection" will cause :class:`mitogen.core.Latch` FDs + # to be cached and erroneously shared by children on subsequent + # WorkerProcess forks. To handle that, call on_fork() to ensure any + # shared state is discarded. + # #490: only attempt to clean up when it's known that some resources + # exist to cleanup, otherwise later __del__ double-call to close() due + # to GC at random moment may obliterate an unrelated Connection's + # related resources. + mitogen.fork.on_fork() + + +class MuxProcess(object): + """ + Implement a subprocess forked from the Ansible top-level, as a safe place + to contain the Mitogen IO multiplexer thread, keeping its use of the + logging package (and the logging package's heavy use of locks) far away + from os.fork(), which is used continuously by the multiprocessing package + in the top-level process. + + The problem with running the multiplexer in that process is that should the + multiplexer thread be in the process of emitting a log entry (and holding + its lock) at the point of fork, in the child, the first attempt to log any + log entry using the same handler will deadlock the child, as in the memory + image the child received, the lock will always be marked held. + + See https://bugs.python.org/issue6721 for a thorough description of the + class of problems this worker is intended to avoid. + """ + #: A copy of :data:`os.environ` at the time the multiplexer process was + #: started. It's used by mitogen_local.py to find changes made to the + #: top-level environment (e.g. vars plugins -- issue #297) that must be + #: applied to locally executed commands and modules. + cls_original_env = None + + def __init__(self, model, index): + #: :class:`ClassicWorkerModel` instance we were created by. + self.model = model + #: MuxProcess CPU index. + self.index = index + #: Individual path of this process. + self.path = mitogen.unix.make_socket_path() + + def start(self): + self.pid = os.fork() + if self.pid: + # Wait for child to boot before continuing. + mitogen.core.io_op(self.model.parent_sock.recv, 1) + return + + ansible_mitogen.logging.set_process_name('mux:' + str(self.index)) + if setproctitle: + setproctitle.setproctitle('mitogen mux:%s (%s)' % ( + self.index, + os.path.basename(self.path), + )) + + self.model.parent_sock.close() + self.model.parent_sock = None + try: + try: + self.worker_main() + except Exception: + LOG.exception('worker_main() crashed') + finally: + sys.exit() + + def worker_main(self): + """ + The main function of the mux process: setup the Mitogen broker thread + and ansible_mitogen services, then sleep waiting for the socket + connected to the parent to be closed (indicating the parent has died). + """ + save_pid('mux') + + # #623: MuxProcess ignores SIGINT because it wants to live until every + # Ansible worker process has been cleaned up by + # TaskQueueManager.cleanup(), otherwise harmles yet scary warnings + # about being unable connect to MuxProess could be printed. + signal.signal(signal.SIGINT, signal.SIG_IGN) + ansible_mitogen.logging.set_process_name('mux') + ansible_mitogen.affinity.policy.assign_muxprocess(self.index) + + self._setup_master() + self._setup_services() + + try: + # Let the parent know our listening socket is ready. + mitogen.core.io_op(self.model.child_sock.send, b('1')) + # Block until the socket is closed, which happens on parent exit. + mitogen.core.io_op(self.model.child_sock.recv, 1) + finally: + self.broker.shutdown() + self.broker.join() + + # Test frameworks living somewhere higher on the stack of the + # original parent process may try to catch sys.exit(), so do a C + # level exit instead. + os._exit(0) + + def _enable_router_debug(self): + if 'MITOGEN_ROUTER_DEBUG' in os.environ: + self.router.enable_debug() + + def _enable_stack_dumps(self): + secs = getenv_int('MITOGEN_DUMP_THREAD_STACKS', default=0) + if secs: + mitogen.debug.dump_to_logger(secs=secs) + + def _setup_master(self): + """ + Construct a Router, Broker, and mitogen.unix listener + """ + self.broker = mitogen.master.Broker(install_watcher=False) + self.router = mitogen.master.Router( + broker=self.broker, + max_message_size=MAX_MESSAGE_SIZE, + ) + _setup_responder(self.router.responder) + mitogen.core.listen(self.broker, 'shutdown', self._on_broker_shutdown) + mitogen.core.listen(self.broker, 'exit', self._on_broker_exit) + self.listener = mitogen.unix.Listener.build_stream( + router=self.router, + path=self.path, + backlog=C.DEFAULT_FORKS, + ) + self._enable_router_debug() + self._enable_stack_dumps() + + def _setup_services(self): + """ + Construct a ContextService and a thread to service requests for it + arriving from worker processes. + """ + self.pool = mitogen.service.Pool( + router=self.router, + size=getenv_int('MITOGEN_POOL_SIZE', default=32), + ) + setup_pool(self.pool) + + def _on_broker_shutdown(self): + """ + Respond to broker shutdown by shutting down the pool. Do not join on it + yet, since that would block the broker thread which then cannot clean + up pending handlers and connections, which is required for the threads + to exit gracefully. + """ + self.pool.stop(join=False) + + def _on_broker_exit(self): + """ + Respond to the broker thread about to exit by finally joining on the + pool. This is safe since pools only block in connection attempts, and + connection attempts fail with CancelledError when broker shutdown + begins. + """ + self.pool.join() diff --git a/mitogen-0.3.9/ansible_mitogen/runner.py b/mitogen-0.3.9/ansible_mitogen/runner.py new file mode 100644 index 0000000..8da1b67 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/runner.py @@ -0,0 +1,1101 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +These classes implement execution for each style of Ansible module. They are +instantiated in the target context by way of target.py::run_module(). + +Each class in here has a corresponding Planner class in planners.py that knows +how to build arguments for it, preseed related data, etc. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import atexit +import json +import os +import re +import shlex +import shutil +import sys +import tempfile +import traceback +import types + +import mitogen.core +import ansible_mitogen.target # TODO: circular import +from mitogen.core import b +from mitogen.core import bytes_partition +from mitogen.core import str_rpartition +from mitogen.core import to_text + +try: + import ctypes +except ImportError: + # Python 2.4 + ctypes = None + +try: + # Python >= 3.4, PEP 451 ModuleSpec API + import importlib.machinery + import importlib.util +except ImportError: + # Python < 3.4, PEP 302 Import Hooks + import imp + +try: + # Cannot use cStringIO as it does not support Unicode. + from StringIO import StringIO +except ImportError: + from io import StringIO + +try: + from shlex import quote as shlex_quote +except ImportError: + from pipes import quote as shlex_quote + +# Absolute imports for <2.5. +logging = __import__('logging') + + +# Prevent accidental import of an Ansible module from hanging on stdin read. +import ansible.module_utils.basic +ansible.module_utils.basic._ANSIBLE_ARGS = '{}' + +# For tasks that modify /etc/resolv.conf, non-Debian derivative glibcs cache +# resolv.conf at startup and never implicitly reload it. Cope with that via an +# explicit call to res_init() on each task invocation. BSD-alikes export it +# directly, Linux #defines it as "__res_init". +libc__res_init = None +if ctypes: + libc = ctypes.CDLL(None) + for symbol in 'res_init', '__res_init': + try: + libc__res_init = getattr(libc, symbol) + except AttributeError: + pass + +iteritems = getattr(dict, 'iteritems', dict.items) +LOG = logging.getLogger(__name__) + + +def shlex_split_b(s): + """ + Use shlex.split() to split characters in some single-byte encoding, without + knowing what that encoding is. The input is bytes, the output is a list of + bytes. + """ + assert isinstance(s, mitogen.core.BytesType) + if mitogen.core.PY3: + return [ + t.encode('latin1') + for t in shlex.split(s.decode('latin1'), comments=True) + ] + + return [t for t in shlex.split(s, comments=True)] + + +class TempFileWatcher(object): + """ + Since Ansible 2.7.0, lineinfile leaks file descriptors returned by + :func:`tempfile.mkstemp` (ansible/ansible#57327). Handle this and all + similar cases by recording descriptors produced by mkstemp during module + execution, and cleaning up any leaked descriptors on completion. + """ + def __init__(self): + self._real_mkstemp = tempfile.mkstemp + # (fd, st.st_dev, st.st_ino) + self._fd_dev_inode = [] + tempfile.mkstemp = self._wrap_mkstemp + + def _wrap_mkstemp(self, *args, **kwargs): + fd, path = self._real_mkstemp(*args, **kwargs) + st = os.fstat(fd) + self._fd_dev_inode.append((fd, st.st_dev, st.st_ino)) + return fd, path + + def revert(self): + tempfile.mkstemp = self._real_mkstemp + for tup in self._fd_dev_inode: + self._revert_one(*tup) + + def _revert_one(self, fd, st_dev, st_ino): + try: + st = os.fstat(fd) + except OSError: + # FD no longer exists. + return + + if not (st.st_dev == st_dev and st.st_ino == st_ino): + # FD reused. + return + + LOG.info("a tempfile.mkstemp() FD was leaked during the last task") + os.close(fd) + + +class EnvironmentFileWatcher(object): + """ + Usually Ansible edits to /etc/environment and ~/.pam_environment are + reflected in subsequent tasks if become:true or SSH multiplexing is + disabled, due to sudo and/or SSH reinvoking pam_env. Rather than emulate + existing semantics, do our best to ensure edits are always reflected. + + This can't perfectly replicate the existing behaviour, but it can safely + update and remove keys that appear to originate in `path`, and that do not + conflict with any existing environment key inherited from elsewhere. + + A more robust future approach may simply be to arrange for the persistent + interpreter to restart when a change is detected. + """ + # We know nothing about the character set of /etc/environment or the + # process environment. + environ = getattr(os, 'environb', os.environ) + + def __init__(self, path): + self.path = os.path.expanduser(path) + #: Inode data at time of last check. + self._st = self._stat() + #: List of inherited keys appearing to originated from this file. + self._keys = [ + key for key, value in self._load() + if value == self.environ.get(key) + ] + LOG.debug('%r installed; existing keys: %r', self, self._keys) + + def __repr__(self): + return 'EnvironmentFileWatcher(%r)' % (self.path,) + + def _stat(self): + try: + return os.stat(self.path) + except OSError: + return None + + def _load(self): + try: + fp = open(self.path, 'rb') + try: + return list(self._parse(fp)) + finally: + fp.close() + except IOError: + return [] + + def _parse(self, fp): + """ + linux-pam-1.3.1/modules/pam_env/pam_env.c#L207 + """ + for line in fp: + # ' #export foo=some var ' -> ['#export', 'foo=some var '] + bits = shlex_split_b(line) + if (not bits) or bits[0].startswith(b('#')): + continue + + if bits[0] == b('export'): + bits.pop(0) + + key, sep, value = bytes_partition(b(' ').join(bits), b('=')) + if key and sep: + yield key, value + + def _on_file_changed(self): + LOG.debug('%r: file changed, reloading', self) + for key, value in self._load(): + if key in self.environ: + LOG.debug('%r: existing key %r=%r exists, not setting %r', + self, key, self.environ[key], value) + else: + LOG.debug('%r: setting key %r to %r', self, key, value) + self._keys.append(key) + self.environ[key] = value + + def _remove_existing(self): + """ + When a change is detected, remove keys that existed in the old file. + """ + for key in self._keys: + if key in self.environ: + LOG.debug('%r: removing old key %r', self, key) + del self.environ[key] + self._keys = [] + + def check(self): + """ + Compare the :func:`os.stat` for the pam_env style environmnt file + `path` with the previous result `old_st`, which may be :data:`None` if + the previous stat attempt failed. Reload its contents if the file has + changed or appeared since last attempt. + + :returns: + New :func:`os.stat` result. The new call to :func:`reload_env` should + pass it as the value of `old_st`. + """ + st = self._stat() + if self._st == st: + return + + self._st = st + self._remove_existing() + + if st is None: + LOG.debug('%r: file has disappeared', self) + else: + self._on_file_changed() + +_pam_env_watcher = EnvironmentFileWatcher('~/.pam_environment') +_etc_env_watcher = EnvironmentFileWatcher('/etc/environment') + + +def utf8(s): + """ + Coerce an object to bytes if it is Unicode. + """ + if isinstance(s, mitogen.core.UnicodeType): + s = s.encode('utf-8') + return s + + +def reopen_readonly(fp): + """ + Replace the file descriptor belonging to the file object `fp` with one + open on the same file (`fp.name`), but opened with :py:data:`os.O_RDONLY`. + This enables temporary files to be executed on Linux, which usually throws + ``ETXTBUSY`` if any writeable handle exists pointing to a file passed to + `execve()`. + """ + fd = os.open(fp.name, os.O_RDONLY) + os.dup2(fd, fp.fileno()) + os.close(fd) + + +class Runner(object): + """ + Ansible module runner. After instantiation (with kwargs supplied by the + corresponding Planner), `.run()` is invoked, upon which `setup()`, + `_run()`, and `revert()` are invoked, with the return value of `_run()` + returned by `run()`. + + Subclasses may override `_run`()` and extend `setup()` and `revert()`. + + :param str module: + Name of the module to execute, e.g. "shell" + :param mitogen.core.Context service_context: + Context to which we should direct FileService calls. For now, always + the connection multiplexer process on the controller. + :param str json_args: + Ansible module arguments. A mixture of user and internal keys created + by :meth:`ansible.plugins.action.ActionBase._execute_module`. + + This is passed as a string rather than a dict in order to mimic the + implicit bytes/str conversion behaviour of a 2.x controller running + against a 3.x target. + :param str good_temp_dir: + The writeable temporary directory for this user account reported by + :func:`ansible_mitogen.target.init_child` passed via the controller. + This is specified explicitly to remain compatible with Ansible<2.5, and + for forked tasks where init_child never runs. + :param dict env: + Additional environment variables to set during the run. Keys with + :data:`None` are unset if present. + :param str cwd: + If not :data:`None`, change to this directory before executing. + :param mitogen.core.ExternalContext econtext: + When `detach` is :data:`True`, a reference to the ExternalContext the + runner is executing in. + :param bool detach: + When :data:`True`, indicate the runner should detach the context from + its parent after setup has completed successfully. + """ + def __init__(self, module, service_context, json_args, good_temp_dir, + extra_env=None, cwd=None, env=None, econtext=None, + detach=False): + self.module = module + self.service_context = service_context + self.econtext = econtext + self.detach = detach + self.args = json.loads(mitogen.core.to_text(json_args)) + self.good_temp_dir = good_temp_dir + self.extra_env = extra_env + self.env = env + self.cwd = cwd + #: If not :data:`None`, :meth:`get_temp_dir` had to create a temporary + #: directory for this run, because we're in an asynchronous task, or + #: because the originating action did not create a directory. + self._temp_dir = None + + def get_temp_dir(self): + path = self.args.get('_ansible_tmpdir') + if path is not None: + return path + + if self._temp_dir is None: + self._temp_dir = tempfile.mkdtemp( + prefix='ansible_mitogen_runner_', + dir=self.good_temp_dir, + ) + + return self._temp_dir + + def revert_temp_dir(self): + if self._temp_dir is not None: + ansible_mitogen.target.prune_tree(self._temp_dir) + self._temp_dir = None + + def setup(self): + """ + Prepare for running a module, including fetching necessary dependencies + from the parent, as :meth:`run` may detach prior to beginning + execution. The base implementation simply prepares the environment. + """ + self._setup_cwd() + self._setup_environ() + + def _setup_cwd(self): + """ + For situations like sudo to a non-privileged account, CWD could be + $HOME of the old account, which could have mode go=, which means it is + impossible to restore the old directory, so don't even try. + """ + if self.cwd: + os.chdir(self.cwd) + + def _setup_environ(self): + """ + Apply changes from /etc/environment files before creating a + TemporaryEnvironment to snapshot environment state prior to module run. + """ + _pam_env_watcher.check() + _etc_env_watcher.check() + env = dict(self.extra_env or {}) + if self.env: + env.update(self.env) + self._env = TemporaryEnvironment(env) + + def _revert_cwd(self): + """ + #591: make a best-effort attempt to return to :attr:`good_temp_dir`. + """ + try: + os.chdir(self.good_temp_dir) + except OSError: + LOG.debug('%r: could not restore CWD to %r', + self, self.good_temp_dir) + + def revert(self): + """ + Revert any changes made to the process after running a module. The base + implementation simply restores the original environment. + """ + self._revert_cwd() + self._env.revert() + self.revert_temp_dir() + + def _run(self): + """ + The _run() method is expected to return a dictionary in the form of + ActionBase._low_level_execute_command() output, i.e. having:: + + { + "rc": int, + "stdout": "stdout data", + "stderr": "stderr data" + } + """ + raise NotImplementedError() + + def run(self): + """ + Set up the process environment in preparation for running an Ansible + module. This monkey-patches the Ansible libraries in various places to + prevent it from trying to kill the process on completion, and to + prevent it from reading sys.stdin. + + :returns: + Module result dictionary. + """ + self.setup() + if self.detach: + self.econtext.detach() + + try: + return self._run() + finally: + self.revert() + + +class AtExitWrapper(object): + """ + issue #397, #454: Newer Ansibles use :func:`atexit.register` to trigger + tmpdir cleanup when AnsibleModule.tmpdir is responsible for creating its + own temporary directory, however with Mitogen processes are preserved + across tasks, meaning cleanup must happen earlier. + + Patch :func:`atexit.register`, catching :func:`shutil.rmtree` calls so they + can be executed on task completion, rather than on process shutdown. + """ + # Wrapped in a dict to avoid instance method decoration. + original = { + 'register': atexit.register + } + + def __init__(self): + assert atexit.register == self.original['register'], \ + "AtExitWrapper installed twice." + atexit.register = self._atexit__register + self.deferred = [] + + def revert(self): + """ + Restore the original :func:`atexit.register`. + """ + assert atexit.register == self._atexit__register, \ + "AtExitWrapper not installed." + atexit.register = self.original['register'] + + def run_callbacks(self): + while self.deferred: + func, targs, kwargs = self.deferred.pop() + try: + func(*targs, **kwargs) + except Exception: + LOG.exception('While running atexit callbacks') + + def _atexit__register(self, func, *targs, **kwargs): + """ + Intercept :func:`atexit.register` calls, diverting any to + :func:`shutil.rmtree` into a private list. + """ + if func == shutil.rmtree: + self.deferred.append((func, targs, kwargs)) + return + + self.original['register'](func, *targs, **kwargs) + + +class ModuleUtilsImporter(object): + """ + :param list module_utils: + List of `(fullname, path, is_pkg)` tuples. + """ + def __init__(self, context, module_utils): + self._context = context + self._by_fullname = dict( + (fullname, (path, is_pkg)) + for fullname, path, is_pkg in module_utils + ) + self._loaded = set() + sys.meta_path.insert(0, self) + + def revert(self): + sys.meta_path.remove(self) + for fullname in self._loaded: + sys.modules.pop(fullname, None) + + def find_module(self, fullname, path=None): + """ + Return a loader for the module with fullname, if we will load it. + + Implements importlib.abc.MetaPathFinder.find_module(). + Deprecrated in Python 3.4+, replaced by find_spec(). + Raises ImportWarning in Python 3.10+. Removed in Python 3.12. + """ + if fullname in self._by_fullname: + return self + + def find_spec(self, fullname, path, target=None): + """ + Return a `ModuleSpec` for module with `fullname` if we will load it. + Otherwise return `None`. + + Implements importlib.abc.MetaPathFinder.find_spec(). Python 3.4+. + """ + if fullname.endswith('.'): + return None + + try: + module_path, is_package = self._by_fullname[fullname] + except KeyError: + LOG.debug('Skipping %s: not present', fullname) + return None + + LOG.debug('Handling %s', fullname) + origin = 'master:%s' % (module_path,) + return importlib.machinery.ModuleSpec( + fullname, loader=self, origin=origin, is_package=is_package, + ) + + def create_module(self, spec): + """ + Return a module object for the given ModuleSpec. + + Implements PEP-451 importlib.abc.Loader API introduced in Python 3.4. + Unlike Loader.load_module() this shouldn't populate sys.modules or + set module attributes. Both are done by Python. + """ + module = types.ModuleType(spec.name) + # FIXME create_module() shouldn't initialise module attributes + module.__file__ = spec.origin + return module + + def exec_module(self, module): + """ + Execute the module to initialise it. Don't return anything. + + Implements PEP-451 importlib.abc.Loader API, introduced in Python 3.4. + """ + spec = module.__spec__ + path, _ = self._by_fullname[spec.name] + source = ansible_mitogen.target.get_small_file(self._context, path) + code = compile(source, path, 'exec', 0, 1) + exec(code, module.__dict__) + self._loaded.add(spec.name) + + def load_module(self, fullname): + """ + Return the loaded module specified by fullname. + + Implements PEP 302 importlib.abc.Loader.load_module(). + Deprecated in Python 3.4+, replaced by create_module() & exec_module(). + """ + path, is_pkg = self._by_fullname[fullname] + source = ansible_mitogen.target.get_small_file(self._context, path) + code = compile(source, path, 'exec', 0, 1) + mod = sys.modules.setdefault(fullname, imp.new_module(fullname)) + mod.__file__ = "master:%s" % (path,) + mod.__loader__ = self + if is_pkg: + mod.__path__ = [] + mod.__package__ = str(fullname) + else: + mod.__package__ = str(str_rpartition(to_text(fullname), '.')[0]) + exec(code, mod.__dict__) + self._loaded.add(fullname) + return mod + + +class TemporaryEnvironment(object): + """ + Apply environment changes from `env` until :meth:`revert` is called. Values + in the dict may be :data:`None` to indicate the relevant key should be + deleted. + """ + def __init__(self, env=None): + self.original = dict(os.environ) + self.env = env or {} + for key, value in iteritems(self.env): + key = mitogen.core.to_text(key) + value = mitogen.core.to_text(value) + if value is None: + os.environ.pop(key, None) + else: + os.environ[key] = str(value) + + def revert(self): + """ + Revert changes made by the module to the process environment. This must + always run, as some modules (e.g. git.py) set variables like GIT_SSH + that must be cleared out between runs. + """ + os.environ.clear() + os.environ.update(self.original) + + +class TemporaryArgv(object): + def __init__(self, argv): + self.original = sys.argv[:] + sys.argv[:] = map(str, argv) + + def revert(self): + sys.argv[:] = self.original + + +class NewStyleStdio(object): + """ + Patch ansible.module_utils.basic argument globals. + """ + def __init__(self, args, temp_dir): + self.temp_dir = temp_dir + self.original_stdout = sys.stdout + self.original_stderr = sys.stderr + self.original_stdin = sys.stdin + sys.stdout = StringIO() + sys.stderr = StringIO() + encoded = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + ansible.module_utils.basic._ANSIBLE_ARGS = utf8(encoded) + sys.stdin = StringIO(mitogen.core.to_text(encoded)) + + self.original_get_path = getattr(ansible.module_utils.basic, + 'get_module_path', None) + ansible.module_utils.basic.get_module_path = self._get_path + + def _get_path(self): + return self.temp_dir + + def revert(self): + ansible.module_utils.basic.get_module_path = self.original_get_path + sys.stdout = self.original_stdout + sys.stderr = self.original_stderr + sys.stdin = self.original_stdin + ansible.module_utils.basic._ANSIBLE_ARGS = '{}' + + +class ProgramRunner(Runner): + """ + Base class for runners that run external programs. + + :param str path: + Absolute path to the program file on the master, as it can be retrieved + via :class:`mitogen.service.FileService`. + :param bool emulate_tty: + If :data:`True`, execute the program with `stdout` and `stderr` merged + into a single pipe, emulating Ansible behaviour when an SSH TTY is in + use. + """ + def __init__(self, path, emulate_tty=None, **kwargs): + super(ProgramRunner, self).__init__(**kwargs) + self.emulate_tty = emulate_tty + self.path = path + + def setup(self): + super(ProgramRunner, self).setup() + self._setup_program() + + def _get_program_filename(self): + """ + Return the filename used for program on disk. Ansible uses the original + filename for non-Ansiballz runs, and "ansible_module_+filename for + Ansiballz runs. + """ + return os.path.basename(self.path) + + program_fp = None + + def _setup_program(self): + """ + Create a temporary file containing the program code. The code is + fetched via :meth:`_get_program`. + """ + filename = self._get_program_filename() + path = os.path.join(self.get_temp_dir(), filename) + self.program_fp = open(path, 'wb') + self.program_fp.write(self._get_program()) + self.program_fp.flush() + os.chmod(self.program_fp.name, int('0700', 8)) + reopen_readonly(self.program_fp) + + def _get_program(self): + """ + Fetch the module binary from the master if necessary. + """ + return ansible_mitogen.target.get_small_file( + context=self.service_context, + path=self.path, + ) + + def _get_program_args(self): + """ + Return any arguments to pass to the program. + """ + return [] + + def revert(self): + """ + Delete the temporary program file. + """ + if self.program_fp: + self.program_fp.close() + super(ProgramRunner, self).revert() + + def _get_argv(self): + """ + Return the final argument vector used to execute the program. + """ + return [ + self.args.get('_ansible_shell_executable', '/bin/sh'), + '-c', + self._get_shell_fragment(), + ] + + def _get_shell_fragment(self): + return "%s %s" % ( + shlex_quote(self.program_fp.name), + ' '.join(map(shlex_quote, self._get_program_args())), + ) + + def _run(self): + try: + rc, stdout, stderr = ansible_mitogen.target.exec_args( + args=self._get_argv(), + emulate_tty=self.emulate_tty, + ) + except Exception: + LOG.exception('While running %s', self._get_argv()) + e = sys.exc_info()[1] + return { + u'rc': 1, + u'stdout': u'', + u'stderr': u'%s: %s' % (type(e), e), + } + + return { + u'rc': rc, + u'stdout': mitogen.core.to_text(stdout), + u'stderr': mitogen.core.to_text(stderr), + } + + +class ArgsFileRunner(Runner): + def setup(self): + super(ArgsFileRunner, self).setup() + self._setup_args() + + def _setup_args(self): + """ + Create a temporary file containing the module's arguments. The + arguments are formatted via :meth:`_get_args`. + """ + self.args_fp = tempfile.NamedTemporaryFile( + prefix='ansible_mitogen', + suffix='-args', + dir=self.get_temp_dir(), + ) + self.args_fp.write(utf8(self._get_args_contents())) + self.args_fp.flush() + reopen_readonly(self.program_fp) + + def _get_args_contents(self): + """ + Return the module arguments formatted as JSON. + """ + return json.dumps(self.args) + + def _get_program_args(self): + return [self.args_fp.name] + + def revert(self): + """ + Delete the temporary argument file. + """ + self.args_fp.close() + super(ArgsFileRunner, self).revert() + + +class BinaryRunner(ArgsFileRunner, ProgramRunner): + pass + + +class ScriptRunner(ProgramRunner): + def __init__(self, interpreter_fragment, is_python, **kwargs): + super(ScriptRunner, self).__init__(**kwargs) + self.interpreter_fragment = interpreter_fragment + self.is_python = is_python + + b_ENCODING_STRING = b('# -*- coding: utf-8 -*-') + + def _get_program(self): + return self._rewrite_source( + super(ScriptRunner, self)._get_program() + ) + + def _get_argv(self): + return [ + self.args.get('_ansible_shell_executable', '/bin/sh'), + '-c', + self._get_shell_fragment(), + ] + + def _get_shell_fragment(self): + """ + Scripts are eligible for having their hashbang line rewritten, and to + be executed via /bin/sh using the ansible_*_interpreter value used as a + shell fragment prefixing to the invocation. + """ + return "%s %s %s" % ( + self.interpreter_fragment, + shlex_quote(self.program_fp.name), + ' '.join(map(shlex_quote, self._get_program_args())), + ) + + def _rewrite_source(self, s): + """ + Mutate the source according to the per-task parameters. + """ + # While Ansible rewrites the #! using ansible_*_interpreter, it is + # never actually used to execute the script, instead it is a shell + # fragment consumed by shell/__init__.py::build_module_command(). + new = [b('#!') + utf8(self.interpreter_fragment)] + if self.is_python: + new.append(self.b_ENCODING_STRING) + + _, _, rest = bytes_partition(s, b('\n')) + new.append(rest) + return b('\n').join(new) + + +class NewStyleRunner(ScriptRunner): + """ + Execute a new-style Ansible module, where Module Replacer-related tricks + aren't required. + """ + #: path => new-style module bytecode. + _code_by_path = {} + + def __init__(self, module_map, py_module_name, **kwargs): + super(NewStyleRunner, self).__init__(**kwargs) + self.module_map = module_map + self.py_module_name = py_module_name + + def _setup_imports(self): + """ + Ensure the local importer and PushFileService has everything for the + Ansible module before setup() completes, but before detach() is called + in an asynchronous task. + + The master automatically streams modules towards us concurrent to the + runner invocation, however there is no public API to synchronize on the + completion of those preloads. Instead simply reuse the importer's + synchronization mechanism by importing everything the module will need + prior to detaching. + """ + # I think "custom" means "found in custom module_utils search path", + # e.g. playbook relative dir, ~/.ansible/..., Ansible collection. + for fullname, _, _ in self.module_map['custom']: + mitogen.core.import_module(fullname) + + # I think "builtin" means "part of ansible/ansible-base/ansible-core", + # as opposed to Python builtin modules such as sys. + for fullname in self.module_map['builtin']: + try: + mitogen.core.import_module(fullname) + except ImportError as exc: + # #590: Ansible 2.8 module_utils.distro is a package that + # replaces itself in sys.modules with a non-package during + # import. Prior to replacement, it is a real package containing + # a '_distro' submodule which is used on 2.x. Given a 2.x + # controller and 3.x target, the import hook never needs to run + # again before this replacement occurs, and 'distro' is + # replaced with a module from the stdlib. In this case as this + # loop progresses to the next entry and attempts to preload + # 'distro._distro', the import mechanism will fail. So here we + # silently ignore any failure for it. + if fullname == 'ansible.module_utils.distro._distro': + continue + + # ansible.module_utils.compat.selinux raises ImportError if it + # can't load libselinux.so. The importer would usually catch + # this & skip selinux operations. We don't care about selinux, + # we're using import to get a copy of the module. + if (fullname == 'ansible.module_utils.compat.selinux' + and exc.msg == 'unable to load libselinux.so'): + continue + + raise + + def _setup_excepthook(self): + """ + Starting with Ansible 2.6, some modules (file.py) install a + sys.excepthook and never clean it up. So we must preserve the original + excepthook and restore it after the run completes. + """ + self.original_excepthook = sys.excepthook + + def setup(self): + super(NewStyleRunner, self).setup() + + self._stdio = NewStyleStdio(self.args, self.get_temp_dir()) + # It is possible that not supplying the script filename will break some + # module, but this has never been a bug report. Instead act like an + # interpreter that had its script piped on stdin. + self._argv = TemporaryArgv(['']) + self._temp_watcher = TempFileWatcher() + self._importer = ModuleUtilsImporter( + context=self.service_context, + module_utils=self.module_map['custom'], + ) + self._setup_imports() + self._setup_excepthook() + self.atexit_wrapper = AtExitWrapper() + if libc__res_init: + libc__res_init() + + def _revert_excepthook(self): + sys.excepthook = self.original_excepthook + + def revert(self): + self.atexit_wrapper.revert() + self._temp_watcher.revert() + self._argv.revert() + self._stdio.revert() + self._revert_excepthook() + super(NewStyleRunner, self).revert() + + def _get_program_filename(self): + """ + See ProgramRunner._get_program_filename(). + """ + return 'ansible_module_' + os.path.basename(self.path) + + def _setup_args(self): + pass + + # issue #555: in old times it was considered good form to reload sys and + # change the default encoding. This hack was removed from Ansible long ago, + # but not before permeating into many third party modules. + PREHISTORIC_HACK_RE = re.compile( + b(r'reload\s*\(\s*sys\s*\)\s*' + r'sys\s*\.\s*setdefaultencoding\([^)]+\)') + ) + + def _setup_program(self): + source = ansible_mitogen.target.get_small_file( + context=self.service_context, + path=self.path, + ) + self.source = self.PREHISTORIC_HACK_RE.sub(b(''), source) + + def _get_code(self): + try: + return self._code_by_path[self.path] + except KeyError: + return self._code_by_path.setdefault(self.path, compile( + # Py2.4 doesn't support kwargs. + self.source, # source + "master:" + self.path, # filename + 'exec', # mode + 0, # flags + True, # dont_inherit + )) + + if mitogen.core.PY3: + main_module_name = '__main__' + else: + main_module_name = b('__main__') + + def _handle_magic_exception(self, mod, exc): + """ + Beginning with Ansible >2.6, some modules (file.py) install a + sys.excepthook which is a closure over AnsibleModule, redirecting the + magical exception to AnsibleModule.fail_json(). + + For extra special needs bonus points, the class is not defined in + module_utils, but is defined in the module itself, meaning there is no + type for isinstance() that outlasts the invocation. + """ + klass = getattr(mod, 'AnsibleModuleError', None) + if klass and isinstance(exc, klass): + mod.module.fail_json(**exc.results) + + def _run_code(self, code, mod): + try: + if mitogen.core.PY3: + exec(code, vars(mod)) + else: + exec('exec code in vars(mod)') + except Exception: + self._handle_magic_exception(mod, sys.exc_info()[1]) + raise + + def _get_module_package(self): + """ + Since Ansible 2.9 __package__ must be set in accordance with an + approximation of the original package hierarchy, so that relative + imports function correctly. + """ + pkg, sep, modname = str_rpartition(self.py_module_name, '.') + if not sep: + return None + if mitogen.core.PY3: + return pkg + return pkg.encode() + + def _run(self): + mod = types.ModuleType(self.main_module_name) + mod.__package__ = self._get_module_package() + # Some Ansible modules use __file__ to find the Ansiballz temporary + # directory. We must provide some temporary path in __file__, but we + # don't want to pointlessly write the module to disk when it never + # actually needs to exist. So just pass the filename as it would exist. + mod.__file__ = os.path.join( + self.get_temp_dir(), + 'ansible_module_' + os.path.basename(self.path), + ) + + code = self._get_code() + rc = 2 + try: + try: + self._run_code(code, mod) + except SystemExit: + exc = sys.exc_info()[1] + rc = exc.args[0] + except Exception: + # This writes to stderr by default. + traceback.print_exc() + rc = 1 + + finally: + self.atexit_wrapper.run_callbacks() + + return { + u'rc': rc, + u'stdout': mitogen.core.to_text(sys.stdout.getvalue()), + u'stderr': mitogen.core.to_text(sys.stderr.getvalue()), + } + + +class JsonArgsRunner(ScriptRunner): + JSON_ARGS = b('<>') + + def _get_args_contents(self): + return json.dumps(self.args).encode() + + def _rewrite_source(self, s): + return ( + super(JsonArgsRunner, self)._rewrite_source(s) + .replace(self.JSON_ARGS, self._get_args_contents()) + ) + + +class WantJsonRunner(ArgsFileRunner, ScriptRunner): + pass + + +class OldStyleRunner(ArgsFileRunner, ScriptRunner): + def _get_args_contents(self): + """ + Mimic the argument formatting behaviour of + ActionBase._execute_module(). + """ + return ' '.join( + '%s=%s' % (key, shlex_quote(str(self.args[key]))) + for key in self.args + ) + ' ' # Bug-for-bug :( diff --git a/mitogen-0.3.9/ansible_mitogen/services.py b/mitogen-0.3.9/ansible_mitogen/services.py new file mode 100644 index 0000000..3e9de65 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/services.py @@ -0,0 +1,565 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +Classes in this file define Mitogen 'services' that run (initially) within the +connection multiplexer process that is forked off the top-level controller +process. + +Once a worker process connects to a multiplexer process +(Connection._connect()), it communicates with these services to establish new +connections, grant access to files by children, and register for notification +when a child has completed a job. +""" + +from __future__ import absolute_import, division, print_function +from __future__ import unicode_literals +__metaclass__ = type + +import logging +import os +import sys +import threading + +import ansible.constants + +import mitogen.core +import mitogen.service +import ansible_mitogen.loaders +import ansible_mitogen.module_finder +import ansible_mitogen.target +import ansible_mitogen.utils.unsafe + + +LOG = logging.getLogger(__name__) + +# Force load of plugin to ensure ConfigManager has definitions loaded. Done +# during module import to ensure a single-threaded environment; PluginLoader +# is not thread-safe. +ansible_mitogen.loaders.shell_loader.get('sh') + + +if sys.version_info[0] == 3: + def reraise(tp, value, tb): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value +else: + exec( + "def reraise(tp, value, tb=None):\n" + " raise tp, value, tb\n" + ) + + +def _get_candidate_temp_dirs(): + try: + # >=2.5 + options = ansible.constants.config.get_plugin_options('shell', 'sh') + remote_tmp = options.get('remote_tmp') or ansible.constants.DEFAULT_REMOTE_TMP + system_tmpdirs = options.get('system_tmpdirs', ('/var/tmp', '/tmp')) + except AttributeError: + # 2.3 + remote_tmp = ansible.constants.DEFAULT_REMOTE_TMP + system_tmpdirs = ('/var/tmp', '/tmp') + + return ansible_mitogen.utils.unsafe.cast([remote_tmp] + list(system_tmpdirs)) + + +def key_from_dict(**kwargs): + """ + Return a unique string representation of a dict as quickly as possible. + Used to generated deduplication keys from a request. + """ + out = [] + stack = [kwargs] + while stack: + obj = stack.pop() + if isinstance(obj, dict): + stack.extend(sorted(obj.items())) + elif isinstance(obj, (list, tuple)): + stack.extend(obj) + else: + out.append(str(obj)) + return ''.join(out) + + +class Error(Exception): + pass + + +class ContextService(mitogen.service.Service): + """ + Used by workers to fetch the single Context instance corresponding to a + connection configuration, creating the matching connection if it does not + exist. + + For connection methods and their parameters, see: + https://mitogen.readthedocs.io/en/latest/api.html#context-factories + + This concentrates connections in the top-level process, which may become a + bottleneck. The bottleneck can be removed using per-CPU connection + processes and arranging for the worker to select one according to a hash of + the connection parameters (sharding). + """ + max_interpreters = int(os.getenv('MITOGEN_MAX_INTERPRETERS', '20')) + + def __init__(self, *args, **kwargs): + super(ContextService, self).__init__(*args, **kwargs) + self._lock = threading.Lock() + #: Records the :meth:`get` result dict for successful calls, returned + #: for identical subsequent calls. Keyed by :meth:`key_from_dict`. + self._response_by_key = {} + #: List of :class:`mitogen.core.Latch` awaiting the result for a + #: particular key. + self._latches_by_key = {} + #: Mapping of :class:`mitogen.core.Context` -> reference count. Each + #: call to :meth:`get` increases this by one. Calls to :meth:`put` + #: decrease it by one. + self._refs_by_context = {} + #: List of contexts in creation order by via= parameter. When + #: :attr:`max_interpreters` is reached, the most recently used context + #: is destroyed to make room for any additional context. + self._lru_by_via = {} + #: :func:`key_from_dict` result by Context. + self._key_by_context = {} + #: Mapping of Context -> parent Context + self._via_by_context = {} + + @mitogen.service.expose(mitogen.service.AllowParents()) + @mitogen.service.arg_spec({ + 'stack': list, + }) + def reset(self, stack): + """ + Return a reference, forcing close and discard of the underlying + connection. Used for 'meta: reset_connection' or when some other error + is detected. + + :returns: + :data:`True` if a connection was found to discard, otherwise + :data:`False`. + """ + LOG.debug('%r.reset(%r)', self, stack) + + # this could happen if we have a `shutdown -r` shell command + # and then a `wait_for_connection` right afterwards + # in this case, we have no stack to disconnect from + if not stack: + return False + + l = mitogen.core.Latch() + context = None + with self._lock: + for i, spec in enumerate(stack): + key = key_from_dict(via=context, **spec) + response = self._response_by_key.get(key) + if response is None: + LOG.debug('%r: could not find connection to shut down; ' + 'failed at hop %d', self, i) + return False + + context = response['context'] + + mitogen.core.listen(context, 'disconnect', l.put) + self._shutdown_unlocked(context) + + # The timeout below is to turn a hang into a crash in case there is any + # possible race between 'disconnect' signal subscription, and the child + # abruptly disconnecting. + l.get(timeout=30.0) + return True + + @mitogen.service.expose(mitogen.service.AllowParents()) + @mitogen.service.arg_spec({ + 'context': mitogen.core.Context + }) + def put(self, context): + """ + Return a reference, making it eligable for recycling once its reference + count reaches zero. + """ + LOG.debug('decrementing reference count for %r', context) + self._lock.acquire() + try: + if self._refs_by_context.get(context, 0) == 0: + LOG.warning('%r.put(%r): refcount was 0. shutdown_all called?', + self, context) + return + self._refs_by_context[context] -= 1 + finally: + self._lock.release() + + def _produce_response(self, key, response): + """ + Reply to every waiting request matching a configuration key with a + response dictionary, deleting the list of waiters when done. + + :param str key: + Result of :meth:`key_from_dict` + :param dict response: + Response dictionary + :returns: + Number of waiters that were replied to. + """ + self._lock.acquire() + try: + latches = self._latches_by_key.pop(key) + count = len(latches) + for latch in latches: + latch.put(response) + finally: + self._lock.release() + return count + + def _forget_context_unlocked(self, context): + key = self._key_by_context.get(context) + if key is None: + LOG.debug('%r: attempt to forget unknown %r', self, context) + return + + self._response_by_key.pop(key, None) + self._latches_by_key.pop(key, None) + self._key_by_context.pop(context, None) + self._refs_by_context.pop(context, None) + self._via_by_context.pop(context, None) + self._lru_by_via.pop(context, None) + + def _shutdown_unlocked(self, context, lru=None, new_context=None): + """ + Arrange for `context` to be shut down, and optionally add `new_context` + to the LRU list while holding the lock. + """ + LOG.info('%r._shutdown_unlocked(): shutting down %r', self, context) + context.shutdown() + via = self._via_by_context.get(context) + if via: + lru = self._lru_by_via.get(via) + if lru: + if context in lru: + lru.remove(context) + if new_context: + lru.append(new_context) + self._forget_context_unlocked(context) + + def _update_lru_unlocked(self, new_context, spec, via): + """ + Update the LRU ("MRU"?) list associated with the connection described + by `kwargs`, destroying the most recently created context if the list + is full. Finally add `new_context` to the list. + """ + self._via_by_context[new_context] = via + + lru = self._lru_by_via.setdefault(via, []) + if len(lru) < self.max_interpreters: + lru.append(new_context) + return + + for context in reversed(lru): + if self._refs_by_context[context] == 0: + break + else: + LOG.warning('via=%r reached maximum number of interpreters, ' + 'but they are all marked as in-use.', via) + return + + self._shutdown_unlocked(context, lru=lru, new_context=new_context) + + def _update_lru(self, new_context, spec, via): + self._lock.acquire() + try: + self._update_lru_unlocked(new_context, spec, via) + finally: + self._lock.release() + + @mitogen.service.expose(mitogen.service.AllowParents()) + def dump(self): + """ + For testing, return a list of dicts describing every currently + connected context. + """ + return [ + { + 'context_name': context.name, + 'via': getattr(self._via_by_context.get(context), + 'name', None), + 'refs': self._refs_by_context.get(context), + } + for context, key in sorted(self._key_by_context.items(), + key=lambda c_k: c_k[0].context_id) + ] + + @mitogen.service.expose(mitogen.service.AllowParents()) + def shutdown_all(self): + """ + For testing use, arrange for all connections to be shut down. + """ + self._lock.acquire() + try: + for context in list(self._key_by_context): + self._shutdown_unlocked(context) + finally: + self._lock.release() + + def _on_context_disconnect(self, context): + """ + Respond to Context disconnect event by deleting any record of the no + longer reachable context. This method runs in the Broker thread and + must not to block. + """ + self._lock.acquire() + try: + LOG.info('%r: Forgetting %r due to stream disconnect', self, context) + self._forget_context_unlocked(context) + finally: + self._lock.release() + + ALWAYS_PRELOAD = ( + 'ansible.module_utils.basic', + 'ansible.module_utils.json_utils', + 'ansible.release', + 'ansible_mitogen.runner', + 'ansible_mitogen.target', + 'mitogen.fork', + 'mitogen.service', + ) + + def _send_module_forwards(self, context): + if hasattr(self.router.responder, 'forward_modules'): + self.router.responder.forward_modules(context, self.ALWAYS_PRELOAD) + + _candidate_temp_dirs = None + + def _get_candidate_temp_dirs(self): + """ + Return a list of locations to try to create the single temporary + directory used by the run. This simply caches the (expensive) plugin + load of :func:`_get_candidate_temp_dirs`. + """ + if self._candidate_temp_dirs is None: + self._candidate_temp_dirs = _get_candidate_temp_dirs() + return self._candidate_temp_dirs + + def _connect(self, key, spec, via=None): + """ + Actual connect implementation. Arranges for the Mitogen connection to + be created and enqueues an asynchronous call to start the forked task + parent in the remote context. + + :param key: + Deduplication key representing the connection configuration. + :param spec: + Connection specification. + :returns: + Dict like:: + + { + 'context': mitogen.core.Context or None, + 'via': mitogen.core.Context or None, + 'init_child_result': { + 'fork_context': mitogen.core.Context, + 'home_dir': str or None, + }, + 'msg': str or None + } + + Where `context` is a reference to the newly constructed context, + `init_child_result` is the result of executing + :func:`ansible_mitogen.target.init_child` in that context, `msg` is + an error message and the remaining fields are :data:`None`, or + `msg` is :data:`None` and the remaining fields are set. + """ + try: + method = getattr(self.router, spec['method']) + except AttributeError: + raise Error('unsupported method: %(method)s' % spec) + + context = method(via=via, unidirectional=True, **spec['kwargs']) + if via and spec.get('enable_lru'): + self._update_lru(context, spec, via) + + # Forget the context when its disconnect event fires. + mitogen.core.listen(context, 'disconnect', + lambda: self._on_context_disconnect(context)) + + self._send_module_forwards(context) + init_child_result = context.call( + ansible_mitogen.target.init_child, + log_level=LOG.getEffectiveLevel(), + candidate_temp_dirs=self._get_candidate_temp_dirs(), + ) + + if os.environ.get('MITOGEN_DUMP_THREAD_STACKS'): + from mitogen import debug + context.call(debug.dump_to_logger) + + self._key_by_context[context] = key + self._refs_by_context[context] = 0 + return { + 'context': context, + 'via': via, + 'init_child_result': init_child_result, + 'msg': None, + } + + def _wait_or_start(self, spec, via=None): + latch = mitogen.core.Latch() + key = key_from_dict(via=via, **spec) + self._lock.acquire() + try: + response = self._response_by_key.get(key) + if response is not None: + self._refs_by_context[response['context']] += 1 + latch.put(response) + return latch + + latches = self._latches_by_key.setdefault(key, []) + first = len(latches) == 0 + latches.append(latch) + finally: + self._lock.release() + + if first: + # I'm the first requestee, so I will create the connection. + try: + response = self._connect(key, spec, via=via) + count = self._produce_response(key, response) + # Only record the response for non-error results. + self._response_by_key[key] = response + # Set the reference count to the number of waiters. + self._refs_by_context[response['context']] += count + except Exception: + self._produce_response(key, sys.exc_info()) + + return latch + + disconnect_msg = ( + 'Channel was disconnected while connection attempt was in progress; ' + 'this may be caused by an abnormal Ansible exit, or due to an ' + 'unreliable target.' + ) + + @mitogen.service.expose(mitogen.service.AllowParents()) + @mitogen.service.arg_spec({ + 'stack': list + }) + def get(self, stack): + """ + Return a Context referring to an established connection with the given + configuration, establishing new connections as necessary. + + :param list stack: + Connection descriptions. Each element is a dict containing 'method' + and 'kwargs' keys describing the Router method and arguments. + Subsequent elements are proxied via the previous. + + :returns dict: + * context: mitogen.parent.Context or None. + * init_child_result: Result of :func:`init_child`. + * msg: StreamError exception text or None. + * method_name: string failing method name. + """ + via = None + for spec in stack: + try: + result = self._wait_or_start(spec, via=via).get() + if isinstance(result, tuple): # exc_info() + reraise(*result) + via = result['context'] + except mitogen.core.ChannelError: + return { + 'context': None, + 'init_child_result': None, + 'method_name': spec['method'], + 'msg': self.disconnect_msg, + } + except mitogen.core.StreamError as e: + return { + 'context': None, + 'init_child_result': None, + 'method_name': spec['method'], + 'msg': str(e), + } + + return result + + +class ModuleDepService(mitogen.service.Service): + """ + Scan a new-style module and produce a cached mapping of module_utils names + to their resolved filesystem paths. + """ + invoker_class = mitogen.service.SerializedInvoker + + def __init__(self, *args, **kwargs): + super(ModuleDepService, self).__init__(*args, **kwargs) + self._cache = {} + + def _get_builtin_names(self, builtin_path, resolved): + return [ + mitogen.core.to_text(fullname) + for fullname, path, is_pkg in resolved + if os.path.abspath(path).startswith(builtin_path) + ] + + def _get_custom_tups(self, builtin_path, resolved): + return [ + (mitogen.core.to_text(fullname), + mitogen.core.to_text(path), + is_pkg) + for fullname, path, is_pkg in resolved + if not os.path.abspath(path).startswith(builtin_path) + ] + + @mitogen.service.expose(policy=mitogen.service.AllowParents()) + @mitogen.service.arg_spec({ + 'module_name': mitogen.core.UnicodeType, + 'module_path': mitogen.core.FsPathTypes, + 'search_path': tuple, + 'builtin_path': mitogen.core.FsPathTypes, + 'context': mitogen.core.Context, + }) + def scan(self, module_name, module_path, search_path, builtin_path, context): + key = (module_name, search_path) + if key not in self._cache: + resolved = ansible_mitogen.module_finder.scan( + module_name=module_name, + module_path=module_path, + search_path=tuple(search_path) + (builtin_path,), + ) + builtin_path = os.path.abspath(builtin_path) + builtin = self._get_builtin_names(builtin_path, resolved) + custom = self._get_custom_tups(builtin_path, resolved) + self._cache[key] = { + 'builtin': builtin, + 'custom': custom, + } + return self._cache[key] diff --git a/mitogen-0.3.9/ansible_mitogen/strategy.py b/mitogen-0.3.9/ansible_mitogen/strategy.py new file mode 100644 index 0000000..0a98e31 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/strategy.py @@ -0,0 +1,328 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import os +import signal +import threading + +try: + import setproctitle +except ImportError: + setproctitle = None + +import mitogen.core +import ansible_mitogen.affinity +import ansible_mitogen.loaders +import ansible_mitogen.mixins +import ansible_mitogen.process + +import ansible.executor.process.worker +import ansible.utils.sentinel + + +def _patch_awx_callback(): + """ + issue #400: AWX loads a display callback that suffers from thread-safety + issues. Detect the presence of older AWX versions and patch the bug. + """ + # AWX uses sitecustomize.py to force-load this package. If it exists, we're + # running under AWX. + try: + import awx_display_callback.events + except ImportError: + return + + if hasattr(awx_display_callback.events.EventContext(), '_local'): + # Patched version. + return + + def patch_add_local(self, **kwargs): + tls = vars(self._local) + ctx = tls.setdefault('_ctx', {}) + ctx.update(kwargs) + + awx_display_callback.events.EventContext._local = threading.local() + awx_display_callback.events.EventContext.add_local = patch_add_local + +_patch_awx_callback() + + +def wrap_action_loader__get(name, *args, **kwargs): + """ + While the mitogen strategy is active, trap action_loader.get() calls, + augmenting any fetched class with ActionModuleMixin, which replaces various + helper methods inherited from ActionBase with implementations that avoid + the use of shell fragments wherever possible. + + This is used instead of static subclassing as it generalizes to third party + action plugins outside the Ansible tree. + """ + get_kwargs = {'class_only': True} + if name in ('fetch',): + name = 'mitogen_' + name + get_kwargs['collection_list'] = kwargs.pop('collection_list', None) + + klass = ansible_mitogen.loaders.action_loader__get(name, **get_kwargs) + if klass: + bases = (ansible_mitogen.mixins.ActionModuleMixin, klass) + adorned_klass = type(str(name), bases, {}) + if kwargs.get('class_only'): + return adorned_klass + return adorned_klass(*args, **kwargs) + + +REDIRECTED_CONNECTION_PLUGINS = ( + 'buildah', + 'docker', + 'kubectl', + 'jail', + 'local', + 'lxc', + 'lxd', + 'machinectl', + 'podman', + 'setns', + 'ssh', +) + + +def wrap_connection_loader__get(name, *args, **kwargs): + """ + While a Mitogen strategy is active, rewrite connection_loader.get() calls + for some transports into requests for a compatible Mitogen transport. + """ + if name in REDIRECTED_CONNECTION_PLUGINS: + name = 'mitogen_' + name + + return ansible_mitogen.loaders.connection_loader__get(name, *args, **kwargs) + + +def wrap_worker__run(self): + """ + While a Mitogen strategy is active, trap WorkerProcess.run() calls and use + the opportunity to set the worker's name in the process list and log + output, activate profiling if requested, and bind the worker to a specific + CPU. + """ + if setproctitle: + setproctitle.setproctitle('worker:%s task:%s' % ( + self._host.name, + self._task.action, + )) + + # Ignore parent's attempts to murder us when we still need to write + # profiling output. + if mitogen.core._profile_hook.__name__ != '_profile_hook': + signal.signal(signal.SIGTERM, signal.SIG_IGN) + + ansible_mitogen.logging.set_process_name('task') + ansible_mitogen.affinity.policy.assign_worker() + return mitogen.core._profile_hook('WorkerProcess', + lambda: worker__run(self) + ) + + +class AnsibleWrappers(object): + """ + Manage add/removal of various Ansible runtime hooks. + """ + def _add_plugin_paths(self): + """ + Add the Mitogen plug-in directories to the ModuleLoader path, avoiding + the need for manual configuration. + """ + base_dir = os.path.join(os.path.dirname(__file__), 'plugins') + ansible_mitogen.loaders.connection_loader.add_directory( + os.path.join(base_dir, 'connection') + ) + ansible_mitogen.loaders.action_loader.add_directory( + os.path.join(base_dir, 'action') + ) + + def _install_wrappers(self): + """ + Install our PluginLoader monkey patches and update global variables + with references to the real functions. + """ + ansible_mitogen.loaders.action_loader.get = wrap_action_loader__get + ansible_mitogen.loaders.connection_loader.get_with_context = wrap_connection_loader__get + + global worker__run + worker__run = ansible.executor.process.worker.WorkerProcess.run + ansible.executor.process.worker.WorkerProcess.run = wrap_worker__run + + def _remove_wrappers(self): + """ + Uninstall the PluginLoader monkey patches. + """ + ansible_mitogen.loaders.action_loader.get = ( + ansible_mitogen.loaders.action_loader__get + ) + ansible_mitogen.loaders.connection_loader.get_with_context = ( + ansible_mitogen.loaders.connection_loader__get + ) + ansible.executor.process.worker.WorkerProcess.run = worker__run + + def install(self): + self._add_plugin_paths() + self._install_wrappers() + + def remove(self): + self._remove_wrappers() + + +class StrategyMixin(object): + """ + This mix-in enhances any built-in strategy by arranging for an appropriate + WorkerModel instance to be constructed as necessary, or for the existing + one to be reused. + + The WorkerModel in turn arranges for a connection multiplexer to be started + somewhere (by default in an external process), and for WorkerProcesses to + grow support for using those top-level services to communicate with remote + hosts. + + Mitogen: + + A private Broker IO multiplexer thread is created to dispatch IO + between the local Router and any connected streams, including streams + connected to Ansible WorkerProcesses, and SSH commands implementing + connections to remote machines. + + A Router is created that implements message dispatch to any locally + registered handlers, and message routing for remote streams. Router is + the junction point through which WorkerProceses and remote SSH contexts + can communicate. + + Router additionally adds message handlers for a variety of base + services, review the Standard Handles section of the How It Works guide + in the documentation. + + A ContextService is installed as a message handler in the connection + mutliplexer subprocess and run on a private thread. It is responsible + for accepting requests to establish new SSH connections from worker + processes, and ensuring precisely one connection exists and is reused + for subsequent playbook steps. The service presently runs in a single + thread, so to begin with, new SSH connections are serialized. + + Finally a mitogen.unix listener is created through which WorkerProcess + can establish a connection back into the connection multiplexer, in + order to avail of ContextService. A UNIX listener socket is necessary + as there is no more sane mechanism to arrange for IPC between the + Router in the connection multiplexer, and the corresponding Router in + the worker process. + + Ansible: + + PluginLoader monkey patches are installed to catch attempts to create + connection and action plug-ins. + + For connection plug-ins, if the desired method is "local" or "ssh", it + is redirected to one of the "mitogen_*" connection plug-ins. That + plug-in implements communication via a UNIX socket connection to the + connection multiplexer process, and uses ContextService running there + to establish a persistent connection to the target. + + For action plug-ins, the original class is looked up as usual, but a + new subclass is created dynamically in order to mix-in + ansible_mitogen.target.ActionModuleMixin, which overrides many of the + methods usually inherited from ActionBase in order to replace them with + pure-Python equivalents that avoid the use of shell. + + In particular, _execute_module() is overridden with an implementation + that uses ansible_mitogen.target.run_module() executed in the target + Context. run_module() implements module execution by importing the + module as if it were a normal Python module, and capturing its output + in the remote process. Since the Mitogen module loader is active in the + remote process, all the heavy lifting of transferring the action module + and its dependencies are automatically handled by Mitogen. + """ + + def _queue_task(self, host, task, task_vars, play_context): + """ + Many PluginLoader caches are defective as they are only populated in + the ephemeral WorkerProcess. Touch each plug-in path before forking to + ensure all workers receive a hot cache. + """ + ansible_mitogen.loaders.module_loader.find_plugin( + name=task.action, + mod_type='', + ) + ansible_mitogen.loaders.action_loader.get( + name=task.action, + class_only=True, + ) + if play_context.connection is not ansible.utils.sentinel.Sentinel: + # 2.8 appears to defer computing this until inside the worker. + # TODO: figure out where it has moved. + ansible_mitogen.loaders.connection_loader.get( + name=play_context.connection, + class_only=True, + ) + + return super(StrategyMixin, self)._queue_task( + host=host, + task=task, + task_vars=task_vars, + play_context=play_context, + ) + + def _get_worker_model(self): + """ + In classic mode a single :class:`WorkerModel` exists, which manages + references and configuration of the associated connection multiplexer + process. + """ + return ansible_mitogen.process.get_classic_worker_model() + + def run(self, iterator, play_context, result=0): + """ + Wrap :meth:`run` to ensure requisite infrastructure and modifications + are configured for the duration of the call. + """ + wrappers = AnsibleWrappers() + self._worker_model = self._get_worker_model() + ansible_mitogen.process.set_worker_model(self._worker_model) + try: + self._worker_model.on_strategy_start() + try: + wrappers.install() + try: + run = super(StrategyMixin, self).run + return mitogen.core._profile_hook('Strategy', + lambda: run(iterator, play_context) + ) + finally: + wrappers.remove() + finally: + self._worker_model.on_strategy_complete() + finally: + ansible_mitogen.process.set_worker_model(None) diff --git a/mitogen-0.3.9/ansible_mitogen/target.py b/mitogen-0.3.9/ansible_mitogen/target.py new file mode 100644 index 0000000..7d907d6 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/target.py @@ -0,0 +1,771 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +Helper functions intended to be executed on the target. These are entrypoints +for file transfer, module execution and sundry bits like changing file modes. +""" + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import errno +import grp +import json +import operator +import os +import pwd +import re +import signal +import stat +import subprocess +import sys +import tempfile +import traceback +import types + +# Absolute imports for <2.5. +logging = __import__('logging') + +import mitogen.core +import mitogen.parent +import mitogen.service +from mitogen.core import b + +try: + reduce +except NameError: + # Python 3.x. + from functools import reduce + +try: + BaseException +except NameError: + # Python 2.4 + BaseException = Exception + + +# Ansible since PR #41749 inserts "import __main__" into +# ansible.module_utils.basic. Mitogen's importer will refuse such an import, so +# we must setup a fake "__main__" before that module is ever imported. The +# str() is to cast Unicode to bytes on Python 2.6. +if not sys.modules.get(str('__main__')): + sys.modules[str('__main__')] = types.ModuleType(str('__main__')) + +import ansible.module_utils.json_utils +import ansible_mitogen.runner + + +LOG = logging.getLogger(__name__) + +MAKE_TEMP_FAILED_MSG = ( + u"Unable to find a useable temporary directory. This likely means no\n" + u"system-supplied TMP directory can be written to, or all directories\n" + u"were mounted on 'noexec' filesystems.\n" + u"\n" + u"The following paths were tried:\n" + u" %(paths)s\n" + u"\n" + u"Please check '-vvv' output for a log of individual path errors." +) + +# Python 2.4/2.5 cannot support fork+threads whatsoever, it doesn't even fix up +# interpreter state. So 2.4/2.5 interpreters start .local() contexts for +# isolation instead. Since we don't have any crazy memory sharing problems to +# avoid, there is no virginal fork parent either. The child is started directly +# from the login/become process. In future this will be default everywhere, +# fork is brainwrong from the stone age. +FORK_SUPPORTED = sys.version_info >= (2, 6) + +#: Initialized to an econtext.parent.Context pointing at a pristine fork of +#: the target Python interpreter before it executes any code or imports. +_fork_parent = None + +#: Set by :func:`init_child` to the name of a writeable and executable +#: temporary directory accessible by the active user account. +good_temp_dir = None + + +def subprocess__Popen__close_fds(self, but): + """ + issue #362, #435: subprocess.Popen(close_fds=True) aka. + AnsibleModule.run_command() loops the entire FD space on Python<3.2. + CentOS>5 ships with 1,048,576 FDs by default, resulting in huge (>500ms) + latency starting children. Therefore replace Popen._close_fds on Linux with + a version that is O(fds) rather than O(_SC_OPEN_MAX). + """ + try: + names = os.listdir(u'/proc/self/fd') + except OSError: + # May fail if acting on a container that does not have /proc mounted. + self._original_close_fds(but) + return + + for name in names: + if not name.isdigit(): + continue + + fd = int(name, 10) + if fd > 2 and fd != but: + try: + os.close(fd) + except OSError: + pass + + +if ( + sys.platform.startswith(u'linux') and + sys.version_info < (3,) and + hasattr(subprocess.Popen, u'_close_fds') and + not mitogen.is_master +): + subprocess.Popen._original_close_fds = subprocess.Popen._close_fds + subprocess.Popen._close_fds = subprocess__Popen__close_fds + + +def get_small_file(context, path): + """ + Basic in-memory caching module fetcher. This generates one roundtrip for + every previously unseen file, so it is only a temporary solution. + + :param context: + Context we should direct FileService requests to. For now (and probably + forever) this is just the top-level Mitogen connection manager process. + :param path: + Path to fetch from FileService, must previously have been registered by + a privileged context using the `register` command. + :returns: + Bytestring file data. + """ + pool = mitogen.service.get_or_create_pool(router=context.router) + service = pool.get_service(u'mitogen.service.PushFileService') + return service.get(path) + + +def transfer_file(context, in_path, out_path, sync=False, set_owner=False): + """ + Streamily download a file from the connection multiplexer process in the + controller. + + :param mitogen.core.Context context: + Reference to the context hosting the FileService that will transmit the + file. + :param bytes in_path: + FileService registered name of the input file. + :param bytes out_path: + Name of the output path on the local disk. + :param bool sync: + If :data:`True`, ensure the file content and metadat are fully on disk + before renaming the temporary file over the existing file. This should + ensure in the case of system crash, either the entire old or new file + are visible post-reboot. + :param bool set_owner: + If :data:`True`, look up the metadata username and group on the local + system and file the file owner using :func:`os.fchmod`. + """ + out_path = os.path.abspath(out_path) + fd, tmp_path = tempfile.mkstemp(suffix='.tmp', + prefix='.ansible_mitogen_transfer-', + dir=os.path.dirname(out_path)) + fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE) + LOG.debug('transfer_file(%r) temporary file: %s', out_path, tmp_path) + + try: + try: + ok, metadata = mitogen.service.FileService.get( + context=context, + path=in_path, + out_fp=fp, + ) + if not ok: + raise IOError('transfer of %r was interrupted.' % (in_path,)) + + set_file_mode(tmp_path, metadata['mode'], fd=fp.fileno()) + if set_owner: + set_file_owner(tmp_path, metadata['owner'], metadata['group'], + fd=fp.fileno()) + finally: + fp.close() + + if sync: + os.fsync(fp.fileno()) + os.rename(tmp_path, out_path) + except BaseException: + os.unlink(tmp_path) + raise + + os.utime(out_path, (metadata['atime'], metadata['mtime'])) + + +def prune_tree(path): + """ + Like shutil.rmtree(), but log errors rather than discard them, and do not + waste multiple os.stat() calls discovering whether the object can be + deleted, just try deleting it instead. + """ + try: + os.unlink(path) + return + except OSError: + e = sys.exc_info()[1] + if not (os.path.isdir(path) and + e.args[0] in (errno.EPERM, errno.EISDIR)): + LOG.error('prune_tree(%r): %s', path, e) + return + + try: + # Ensure write access for readonly directories. Ignore error in case + # path is on a weird filesystem (e.g. vfat). + os.chmod(path, int('0700', 8)) + except OSError: + e = sys.exc_info()[1] + LOG.warning('prune_tree(%r): %s', path, e) + + try: + for name in os.listdir(path): + if name not in ('.', '..'): + prune_tree(os.path.join(path, name)) + os.rmdir(path) + except OSError: + e = sys.exc_info()[1] + LOG.error('prune_tree(%r): %s', path, e) + + +def is_good_temp_dir(path): + """ + Return :data:`True` if `path` can be used as a temporary directory, logging + any failures that may cause it to be unsuitable. If the directory doesn't + exist, we attempt to create it using :func:`os.makedirs`. + """ + if not os.path.exists(path): + try: + os.makedirs(path, mode=int('0700', 8)) + except OSError: + e = sys.exc_info()[1] + LOG.debug('temp dir %r unusable: did not exist and attempting ' + 'to create it failed: %s', path, e) + return False + + try: + tmp = tempfile.NamedTemporaryFile( + prefix='ansible_mitogen_is_good_temp_dir', + dir=path, + ) + except (OSError, IOError): + e = sys.exc_info()[1] + LOG.debug('temp dir %r unusable: %s', path, e) + return False + + try: + try: + os.chmod(tmp.name, int('0700', 8)) + except OSError: + e = sys.exc_info()[1] + LOG.debug('temp dir %r unusable: chmod failed: %s', path, e) + return False + + try: + # access(.., X_OK) is sufficient to detect noexec. + if not os.access(tmp.name, os.X_OK): + raise OSError('filesystem appears to be mounted noexec') + except OSError: + e = sys.exc_info()[1] + LOG.debug('temp dir %r unusable: %s', path, e) + return False + finally: + tmp.close() + + return True + + +def find_good_temp_dir(candidate_temp_dirs): + """ + Given a list of candidate temp directories extracted from ``ansible.cfg``, + combine it with the Python-builtin list of candidate directories used by + :mod:`tempfile`, then iteratively try each until one is found that is both + writeable and executable. + + :param list candidate_temp_dirs: + List of candidate $variable-expanded and tilde-expanded directory paths + that may be usable as a temporary directory. + """ + paths = [os.path.expandvars(os.path.expanduser(p)) + for p in candidate_temp_dirs] + paths.extend(tempfile._candidate_tempdir_list()) + + for path in paths: + if is_good_temp_dir(path): + LOG.debug('Selected temp directory: %r (from %r)', path, paths) + return path + + raise IOError(MAKE_TEMP_FAILED_MSG % { + 'paths': '\n '.join(paths), + }) + + +@mitogen.core.takes_econtext +def init_child(econtext, log_level, candidate_temp_dirs): + """ + Called by ContextService immediately after connection; arranges for the + (presently) spotless Python interpreter to be forked, where the newly + forked interpreter becomes the parent of any newly forked future + interpreters. + + This is necessary to prevent modules that are executed in-process from + polluting the global interpreter state in a way that effects explicitly + isolated modules. + + :param int log_level: + Logging package level active in the master. + :param list[str] candidate_temp_dirs: + List of $variable-expanded and tilde-expanded directory names to add to + candidate list of temporary directories. + + :returns: + Dict like:: + + { + 'fork_context': mitogen.core.Context or None, + 'good_temp_dir': ... + 'home_dir': str + } + + Where `fork_context` refers to the newly forked 'fork parent' context + the controller will use to start forked jobs, and `home_dir` is the + home directory for the active user account. + """ + # Copying the master's log level causes log messages to be filtered before + # they reach LogForwarder, thus reducing an influx of tiny messges waking + # the connection multiplexer process in the master. + LOG.setLevel(log_level) + logging.getLogger('ansible_mitogen').setLevel(log_level) + + global _fork_parent + if FORK_SUPPORTED: + mitogen.parent.upgrade_router(econtext) + _fork_parent = econtext.router.fork() + + global good_temp_dir + good_temp_dir = find_good_temp_dir(candidate_temp_dirs) + + return { + u'fork_context': _fork_parent, + u'home_dir': mitogen.core.to_text(os.path.expanduser('~')), + u'good_temp_dir': good_temp_dir, + } + + +@mitogen.core.takes_econtext +def spawn_isolated_child(econtext): + """ + For helper functions executed in the fork parent context, arrange for + the context's router to be upgraded as necessary and for a new child to be + prepared. + + The actual fork occurs from the 'virginal fork parent', which does not have + any Ansible modules loaded prior to fork, to avoid conflicts resulting from + custom module_utils paths. + """ + mitogen.parent.upgrade_router(econtext) + if FORK_SUPPORTED: + context = econtext.router.fork() + else: + context = econtext.router.local() + LOG.debug('create_fork_child() -> %r', context) + return context + + +def run_module(kwargs): + """ + Set up the process environment in preparation for running an Ansible + module. This monkey-patches the Ansible libraries in various places to + prevent it from trying to kill the process on completion, and to prevent it + from reading sys.stdin. + """ + runner_name = kwargs.pop('runner_name') + klass = getattr(ansible_mitogen.runner, runner_name) + impl = klass(**mitogen.core.Kwargs(kwargs)) + return impl.run() + + +def _get_async_dir(): + return os.path.expanduser( + os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async') + ) + + +class AsyncRunner(object): + def __init__(self, job_id, timeout_secs, started_sender, econtext, kwargs): + self.job_id = job_id + self.timeout_secs = timeout_secs + self.started_sender = started_sender + self.econtext = econtext + self.kwargs = kwargs + self._timed_out = False + self._init_path() + + def _init_path(self): + async_dir = _get_async_dir() + if not os.path.exists(async_dir): + os.makedirs(async_dir) + self.path = os.path.join(async_dir, self.job_id) + + def _update(self, dct): + """ + Update an async job status file. + """ + LOG.info('%r._update(%r, %r)', self, self.job_id, dct) + dct.setdefault('ansible_job_id', self.job_id) + dct.setdefault('data', '') + + fp = open(self.path + '.tmp', 'w') + try: + fp.write(json.dumps(dct)) + finally: + fp.close() + os.rename(self.path + '.tmp', self.path) + + def _on_sigalrm(self, signum, frame): + """ + Respond to SIGALRM (job timeout) by updating the job file and killing + the process. + """ + msg = "Job reached maximum time limit of %d seconds." % ( + self.timeout_secs, + ) + self._update({ + "failed": 1, + "finished": 1, + "msg": msg, + }) + self._timed_out = True + self.econtext.broker.shutdown() + + def _install_alarm(self): + signal.signal(signal.SIGALRM, self._on_sigalrm) + signal.alarm(self.timeout_secs) + + def _run_module(self): + kwargs = dict(self.kwargs, **{ + 'detach': True, + 'econtext': self.econtext, + 'emulate_tty': False, + }) + return run_module(kwargs) + + def _parse_result(self, dct): + filtered, warnings = ( + ansible.module_utils.json_utils. + _filter_non_json_lines(dct['stdout']) + ) + result = json.loads(filtered) + result.setdefault('warnings', []).extend(warnings) + result['stderr'] = dct['stderr'] or result.get('stderr', '') + self._update(result) + + def _run(self): + """ + 1. Immediately updates the status file to mark the job as started. + 2. Installs a timer/signal handler to implement the time limit. + 3. Runs as with run_module(), writing the result to the status file. + + :param dict kwargs: + Runner keyword arguments. + :param str job_id: + String job ID. + :param int timeout_secs: + If >0, limit the task's maximum run time. + """ + self._update({ + 'started': 1, + 'finished': 0, + 'pid': os.getpid() + }) + self.started_sender.send(True) + + if self.timeout_secs > 0: + self._install_alarm() + + dct = self._run_module() + if not self._timed_out: + # After SIGALRM fires, there is a window between broker responding + # to shutdown() by killing the process, and work continuing on the + # main thread. If main thread was asleep in at least + # basic.py/select.select(), an EINTR will be raised. We want to + # discard that exception. + try: + self._parse_result(dct) + except Exception: + self._update({ + "failed": 1, + "msg": traceback.format_exc(), + "data": dct['stdout'], # temporary notice only + "stderr": dct['stderr'] + }) + + def run(self): + try: + try: + self._run() + except Exception: + self._update({ + "failed": 1, + "msg": traceback.format_exc(), + }) + finally: + self.econtext.broker.shutdown() + + +@mitogen.core.takes_econtext +def run_module_async(kwargs, job_id, timeout_secs, started_sender, econtext): + """ + Execute a module with its run status and result written to a file, + terminating on the process on completion. This function must run in a child + forked using :func:`create_fork_child`. + + @param mitogen.core.Sender started_sender: + A sender that will receive :data:`True` once the job has reached a + point where its initial job file has been written. This is required to + avoid a race where an overly eager controller can check for a task + before it has reached that point in execution, which is possible at + least on Python 2.4, where forking is not available for async tasks. + """ + arunner = AsyncRunner( + job_id, + timeout_secs, + started_sender, + econtext, + kwargs + ) + arunner.run() + + +def get_user_shell(): + """ + For commands executed directly via an SSH command-line, SSH looks up the + user's shell via getpwuid() and only defaults to /bin/sh if that field is + missing or empty. + """ + try: + pw_shell = pwd.getpwuid(os.geteuid()).pw_shell + except KeyError: + pw_shell = None + + return pw_shell or '/bin/sh' + + +def exec_args(args, in_data='', chdir=None, shell=None, emulate_tty=False): + """ + Run a command in a subprocess, emulating the argument handling behaviour of + SSH. + + :param list[str]: + Argument vector. + :param bytes in_data: + Optional standard input for the command. + :param bool emulate_tty: + If :data:`True`, arrange for stdout and stderr to be merged into the + stdout pipe and for LF to be translated into CRLF, emulating the + behaviour of a TTY. + :return: + (return code, stdout bytes, stderr bytes) + """ + LOG.debug('exec_args(%r, ..., chdir=%r)', args, chdir) + assert isinstance(args, list) + + if emulate_tty: + stderr = subprocess.STDOUT + else: + stderr = subprocess.PIPE + + proc = subprocess.Popen( + args=args, + stdout=subprocess.PIPE, + stderr=stderr, + stdin=subprocess.PIPE, + cwd=chdir, + ) + stdout, stderr = proc.communicate(in_data) + + if emulate_tty: + stdout = stdout.replace(b('\n'), b('\r\n')) + return proc.returncode, stdout, stderr or b('') + + +def exec_command(cmd, in_data='', chdir=None, shell=None, emulate_tty=False): + """ + Run a command in a subprocess, emulating the argument handling behaviour of + SSH. + + :param bytes cmd: + String command line, passed to user's shell. + :param bytes in_data: + Optional standard input for the command. + :return: + (return code, stdout bytes, stderr bytes) + """ + assert isinstance(cmd, mitogen.core.UnicodeType) + return exec_args( + args=[get_user_shell(), '-c', cmd], + in_data=in_data, + chdir=chdir, + shell=shell, + emulate_tty=emulate_tty, + ) + + +def read_path(path): + """ + Fetch the contents of a filesystem `path` as bytes. + """ + with open(path, 'rb') as f: + return f.read() + + +def set_file_owner(path, owner, group=None, fd=None): + if owner: + uid = pwd.getpwnam(owner).pw_uid + else: + uid = os.geteuid() + + if group: + gid = grp.getgrnam(group).gr_gid + else: + gid = os.getegid() + + if fd is not None and hasattr(os, 'fchown'): + os.fchown(fd, (uid, gid)) + else: + # Python<2.6 + os.chown(path, (uid, gid)) + + +def write_path(path, s, owner=None, group=None, mode=None, + utimes=None, sync=False): + """ + Writes bytes `s` to a filesystem `path`. + """ + path = os.path.abspath(path) + fd, tmp_path = tempfile.mkstemp(suffix='.tmp', + prefix='.ansible_mitogen_transfer-', + dir=os.path.dirname(path)) + fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE) + LOG.debug('write_path(path=%r) temporary file: %s', path, tmp_path) + + try: + try: + if mode: + set_file_mode(tmp_path, mode, fd=fp.fileno()) + if owner or group: + set_file_owner(tmp_path, owner, group, fd=fp.fileno()) + fp.write(s) + finally: + fp.close() + + if sync: + os.fsync(fp.fileno()) + os.rename(tmp_path, path) + except BaseException: + os.unlink(tmp_path) + raise + + if utimes: + os.utime(path, utimes) + + +CHMOD_CLAUSE_PAT = re.compile(r'([uoga]*)([+\-=])([ugo]|[rwx]*)') +CHMOD_MASKS = { + 'u': stat.S_IRWXU, + 'g': stat.S_IRWXG, + 'o': stat.S_IRWXO, + 'a': (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO), +} +CHMOD_BITS = { + 'u': {'r': stat.S_IRUSR, 'w': stat.S_IWUSR, 'x': stat.S_IXUSR}, + 'g': {'r': stat.S_IRGRP, 'w': stat.S_IWGRP, 'x': stat.S_IXGRP}, + 'o': {'r': stat.S_IROTH, 'w': stat.S_IWOTH, 'x': stat.S_IXOTH}, + 'a': { + 'r': (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH), + 'w': (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH), + 'x': (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) + } +} + + +def apply_mode_spec(spec, mode): + """ + Given a symbolic file mode change specification in the style of chmod(1) + `spec`, apply changes in the specification to the numeric file mode `mode`. + """ + for clause in mitogen.core.to_text(spec).split(','): + match = CHMOD_CLAUSE_PAT.match(clause) + who, op, perms = match.groups() + for ch in who or 'a': + mask = CHMOD_MASKS[ch] + bits = CHMOD_BITS[ch] + cur_perm_bits = mode & mask + new_perm_bits = reduce(operator.or_, (bits[p] for p in perms), 0) + mode &= ~mask + if op == '=': + mode |= new_perm_bits + elif op == '+': + mode |= new_perm_bits | cur_perm_bits + else: + mode |= cur_perm_bits & ~new_perm_bits + return mode + + +def set_file_mode(path, spec, fd=None): + """ + Update the permissions of a file using the same syntax as chmod(1). + """ + if isinstance(spec, int): + new_mode = spec + elif not mitogen.core.PY3 and isinstance(spec, long): + new_mode = spec + elif spec.isdigit(): + new_mode = int(spec, 8) + else: + mode = os.stat(path).st_mode + new_mode = apply_mode_spec(spec, mode) + + if fd is not None and hasattr(os, 'fchmod'): + os.fchmod(fd, new_mode) + else: + os.chmod(path, new_mode) + + +def file_exists(path): + """ + Return :data:`True` if `path` exists. This is a wrapper function over + :func:`os.path.exists`, since its implementation module varies across + Python versions. + """ + return os.path.exists(path) diff --git a/mitogen-0.3.9/ansible_mitogen/transport_config.py b/mitogen-0.3.9/ansible_mitogen/transport_config.py new file mode 100644 index 0000000..3ab623f --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/transport_config.py @@ -0,0 +1,820 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +Mitogen extends Ansible's target configuration mechanism in several ways that +require some care: + +* Per-task configurables in Ansible like ansible_python_interpreter are + connection-layer configurables in Mitogen. They must be extracted during each + task execution to form the complete connection-layer configuration. + +* Mitogen has extra configurables not supported by Ansible at all, such as + mitogen_ssh_debug_level. These are extracted the same way as + ansible_python_interpreter. + +* Mitogen allows connections to be delegated to other machines. Ansible has no + internal framework for this, and so Mitogen must figure out a delegated + connection configuration all on its own. It cannot reuse much of the Ansible + machinery for building a connection configuration, as that machinery is + deeply spread out and hard-wired to expect Ansible's usual mode of operation. + +For normal and delegate_to connections, Ansible's PlayContext is reused where +possible to maximize compatibility, but for proxy hops, configurations are +built up using the HostVars magic class to call VariableManager.get_vars() +behind the scenes on our behalf. Where Ansible has multiple sources of a +configuration item, for example, ansible_ssh_extra_args, Mitogen must (ideally +perfectly) reproduce how Ansible arrives at its value, without using mechanisms +that are hard-wired or change across Ansible versions. + +That is what this file is for. It exports two spec classes, one that takes all +information from PlayContext, and another that takes (almost) all information +from HostVars. +""" + +from __future__ import absolute_import, division, print_function +from __future__ import unicode_literals +__metaclass__ = type + +import abc +import os +import ansible.utils.shlex +import ansible.constants as C + +from ansible.module_utils.six import with_metaclass +from ansible.module_utils.parsing.convert_bool import boolean + +# this was added in Ansible >= 2.8.0; fallback to the default interpreter if necessary +try: + from ansible.executor.interpreter_discovery import discover_interpreter +except ImportError: + discover_interpreter = lambda action,interpreter_name,discovery_mode,task_vars: '/usr/bin/python' + +try: + from ansible.utils.unsafe_proxy import AnsibleUnsafeText +except ImportError: + from ansible.vars.unsafe_proxy import AnsibleUnsafeText + +import mitogen.core + + +def run_interpreter_discovery_if_necessary(s, task_vars, action, rediscover_python): + """ + Triggers ansible python interpreter discovery if requested. + Caches this value the same way Ansible does it. + For connections like `docker`, we want to rediscover the python interpreter because + it could be different than what's ran on the host + """ + # keep trying different interpreters until we don't error + if action._finding_python_interpreter: + return action._possible_python_interpreter + + if s in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']: + # python is the only supported interpreter_name as of Ansible 2.8.8 + interpreter_name = 'python' + discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name + + if task_vars.get('ansible_facts') is None: + task_vars['ansible_facts'] = {} + + if rediscover_python and task_vars.get('ansible_facts', {}).get(discovered_interpreter_config): + # if we're rediscovering python then chances are we're running something like a docker connection + # this will handle scenarios like running a playbook that does stuff + then dynamically creates a docker container, + # then runs the rest of the playbook inside that container, and then rerunning the playbook again + action._rediscovered_python = True + + # blow away the discovered_interpreter_config cache and rediscover + del task_vars['ansible_facts'][discovered_interpreter_config] + + if discovered_interpreter_config not in task_vars['ansible_facts']: + action._finding_python_interpreter = True + # fake pipelining so discover_interpreter can be happy + action._connection.has_pipelining = True + s = AnsibleUnsafeText(discover_interpreter( + action=action, + interpreter_name=interpreter_name, + discovery_mode=s, + task_vars=task_vars)) + + # cache discovered interpreter + task_vars['ansible_facts'][discovered_interpreter_config] = s + action._connection.has_pipelining = False + else: + s = task_vars['ansible_facts'][discovered_interpreter_config] + + # propagate discovered interpreter as fact + action._discovered_interpreter_key = discovered_interpreter_config + action._discovered_interpreter = s + + action._finding_python_interpreter = False + return s + + +def parse_python_path(s, task_vars, action, rediscover_python): + """ + Given the string set for ansible_python_interpeter, parse it using shell + syntax and return an appropriate argument vector. If the value detected is + one of interpreter discovery then run that first. Caches python interpreter + discovery value in `facts_from_task_vars` like how Ansible handles this. + """ + if not s: + # if python_path doesn't exist, default to `auto` and attempt to discover it + s = 'auto' + + s = run_interpreter_discovery_if_necessary(s, task_vars, action, rediscover_python) + # if unable to determine python_path, fallback to '/usr/bin/python' + if not s: + s = '/usr/bin/python' + + return ansible.utils.shlex.shlex_split(s) + + +def optional_secret(value): + """ + Wrap `value` in :class:`mitogen.core.Secret` if it is not :data:`None`, + otherwise return :data:`None`. + """ + if value is not None: + return mitogen.core.Secret(value) + + +def first_true(it, default=None): + """ + Return the first truthy element from `it`. + """ + for elem in it: + if elem: + return elem + return default + + +class Spec(with_metaclass(abc.ABCMeta, object)): + """ + A source for variables that comprise a connection configuration. + """ + + @abc.abstractmethod + def transport(self): + """ + The name of the Ansible plug-in implementing the connection. + """ + + @abc.abstractmethod + def inventory_name(self): + """ + The name of the target being connected to as it appears in Ansible's + inventory. + """ + + @abc.abstractmethod + def remote_addr(self): + """ + The network address of the target, or for container and other special + targets, some other unique identifier. + """ + + @abc.abstractmethod + def remote_user(self): + """ + The username of the login account on the target. + """ + + @abc.abstractmethod + def password(self): + """ + The password of the login account on the target. + """ + + @abc.abstractmethod + def become(self): + """ + :data:`True` if privilege escalation should be active. + """ + + @abc.abstractmethod + def become_method(self): + """ + The name of the Ansible become method to use. + """ + + @abc.abstractmethod + def become_user(self): + """ + The username of the target account for become. + """ + + @abc.abstractmethod + def become_pass(self): + """ + The password of the target account for become. + """ + + @abc.abstractmethod + def port(self): + """ + The port of the login service on the target machine. + """ + + @abc.abstractmethod + def python_path(self): + """ + Path to the Python interpreter on the target machine. + """ + + @abc.abstractmethod + def host_key_checking(self): + """ + Whether or not to check the keys of the target machine + """ + + @abc.abstractmethod + def private_key_file(self): + """ + Path to the SSH private key file to use to login. + """ + + @abc.abstractmethod + def ssh_executable(self): + """ + Path to the SSH executable. + """ + + @abc.abstractmethod + def timeout(self): + """ + The generic timeout for all connections. + """ + + @abc.abstractmethod + def ansible_ssh_timeout(self): + """ + The SSH-specific timeout for a connection. + """ + + @abc.abstractmethod + def ssh_args(self): + """ + The list of additional arguments that should be included in an SSH + invocation. + """ + + @abc.abstractmethod + def become_exe(self): + """ + The path to the executable implementing the become method on the remote + machine. + """ + + @abc.abstractmethod + def sudo_args(self): + """ + The list of additional arguments that should be included in a become + invocation. + """ + # TODO: split out into sudo_args/become_args. + + @abc.abstractmethod + def mitogen_via(self): + """ + The value of the mitogen_via= variable for this connection. Indicates + the connection should be established via an intermediary. + """ + + @abc.abstractmethod + def mitogen_kind(self): + """ + The type of container to use with the "setns" transport. + """ + + @abc.abstractmethod + def mitogen_mask_remote_name(self): + """ + Specifies whether to set a fixed "remote_name" field. The remote_name + is the suffix of `argv[0]` for remote interpreters. By default it + includes identifying information from the local process, which may be + undesirable in some circumstances. + """ + + @abc.abstractmethod + def mitogen_buildah_path(self): + """ + The path to the "buildah" program for the 'buildah' transport. + """ + + @abc.abstractmethod + def mitogen_docker_path(self): + """ + The path to the "docker" program for the 'docker' transport. + """ + + @abc.abstractmethod + def mitogen_kubectl_path(self): + """ + The path to the "kubectl" program for the 'docker' transport. + """ + + @abc.abstractmethod + def mitogen_lxc_path(self): + """ + The path to the "lxc" program for the 'lxd' transport. + """ + + @abc.abstractmethod + def mitogen_lxc_attach_path(self): + """ + The path to the "lxc-attach" program for the 'lxc' transport. + """ + + @abc.abstractmethod + def mitogen_lxc_info_path(self): + """ + The path to the "lxc-info" program for the 'lxc' transport. + """ + + @abc.abstractmethod + def mitogen_machinectl_path(self): + """ + The path to the "machinectl" program for the 'setns' transport. + """ + + @abc.abstractmethod + def mitogen_podman_path(self): + """ + The path to the "podman" program for the 'podman' transport. + """ + + @abc.abstractmethod + def mitogen_ssh_keepalive_interval(self): + """ + The SSH ServerAliveInterval. + """ + + @abc.abstractmethod + def mitogen_ssh_keepalive_count(self): + """ + The SSH ServerAliveCount. + """ + + @abc.abstractmethod + def mitogen_ssh_debug_level(self): + """ + The SSH debug level. + """ + + @abc.abstractmethod + def mitogen_ssh_compression(self): + """ + Whether SSH compression is enabled. + """ + + @abc.abstractmethod + def extra_args(self): + """ + Connection-specific arguments. + """ + + @abc.abstractmethod + def ansible_doas_exe(self): + """ + Value of "ansible_doas_exe" variable. + """ + + +class PlayContextSpec(Spec): + """ + PlayContextSpec takes almost all its information as-is from Ansible's + PlayContext. It is used for normal connections and delegate_to connections, + and should always be accurate. + """ + def __init__(self, connection, play_context, transport, inventory_name): + self._connection = connection + self._play_context = play_context + self._transport = transport + self._inventory_name = inventory_name + self._task_vars = self._connection._get_task_vars() + # used to run interpreter discovery + self._action = connection._action + + def transport(self): + return self._transport + + def inventory_name(self): + return self._inventory_name + + def remote_addr(self): + return self._play_context.remote_addr + + def remote_user(self): + return self._play_context.remote_user + + def become(self): + return self._play_context.become + + def become_method(self): + return self._play_context.become_method + + def become_user(self): + return self._play_context.become_user + + def become_pass(self): + # become_pass is owned/provided by the active become plugin. However + # PlayContext is intertwined with it. Known complications + # - ansible_become_password is higher priority than ansible_become_pass, + # `play_context.become_pass` doesn't obey this (atleast with Mitgeon). + # - `meta: reset_connection` runs `connection.reset()` but + # `ansible_mitogen.connection.Connection.reset()` recreates the + # connection object, setting `connection.become = None`. + become_plugin = self._connection.become + try: + become_pass = become_plugin.get_option('become_pass', playcontext=self._play_context) + except AttributeError: + become_pass = self._play_context.become_pass + return optional_secret(become_pass) + + def password(self): + return optional_secret(self._play_context.password) + + def port(self): + return self._play_context.port + + def python_path(self, rediscover_python=False): + s = self._connection.get_task_var('ansible_python_interpreter') + # #511, #536: executor/module_common.py::_get_shebang() hard-wires + # "/usr/bin/python" as the default interpreter path if no other + # interpreter is specified. + return parse_python_path( + s, + task_vars=self._task_vars, + action=self._action, + rediscover_python=rediscover_python) + + def host_key_checking(self): + def candidates(): + yield self._connection.get_task_var('ansible_ssh_host_key_checking') + yield self._connection.get_task_var('ansible_host_key_checking') + yield C.HOST_KEY_CHECKING + val = next((v for v in candidates() if v is not None), True) + return boolean(val) + + def private_key_file(self): + return self._play_context.private_key_file + + def ssh_executable(self): + return C.config.get_config_value("ssh_executable", plugin_type="connection", plugin_name="ssh", variables=self._task_vars.get("vars", {})) + + def timeout(self): + return self._play_context.timeout + + def ansible_ssh_timeout(self): + return ( + self._connection.get_task_var('ansible_timeout') or + self._connection.get_task_var('ansible_ssh_timeout') or + self.timeout() + ) + + def ssh_args(self): + return [ + mitogen.core.to_text(term) + for s in ( + C.config.get_config_value("ssh_args", plugin_type="connection", plugin_name="ssh", variables=self._task_vars.get("vars", {})), + C.config.get_config_value("ssh_common_args", plugin_type="connection", plugin_name="ssh", variables=self._task_vars.get("vars", {})), + C.config.get_config_value("ssh_extra_args", plugin_type="connection", plugin_name="ssh", variables=self._task_vars.get("vars", {})) + ) + for term in ansible.utils.shlex.shlex_split(s or '') + ] + + def become_exe(self): + # In Ansible 2.8, PlayContext.become_exe always has a default value due + # to the new options mechanism. Previously it was only set if a value + # ("somewhere") had been specified for the task. + # For consistency in the tests, here we make older Ansibles behave like + # newer Ansibles. + exe = self._play_context.become_exe + if exe is None and self._play_context.become_method == 'sudo': + exe = 'sudo' + return exe + + def sudo_args(self): + return [ + mitogen.core.to_text(term) + for term in ansible.utils.shlex.shlex_split( + first_true(( + self._play_context.become_flags, + # Ansible <=2.7. + getattr(self._play_context, 'sudo_flags', ''), + # Ansible <=2.3. + getattr(C, 'DEFAULT_BECOME_FLAGS', ''), + getattr(C, 'DEFAULT_SUDO_FLAGS', '') + ), default='') + ) + ] + + def mitogen_via(self): + return self._connection.get_task_var('mitogen_via') + + def mitogen_kind(self): + return self._connection.get_task_var('mitogen_kind') + + def mitogen_mask_remote_name(self): + return self._connection.get_task_var('mitogen_mask_remote_name') + + def mitogen_buildah_path(self): + return self._connection.get_task_var('mitogen_buildah_path') + + def mitogen_docker_path(self): + return self._connection.get_task_var('mitogen_docker_path') + + def mitogen_kubectl_path(self): + return self._connection.get_task_var('mitogen_kubectl_path') + + def mitogen_lxc_path(self): + return self._connection.get_task_var('mitogen_lxc_path') + + def mitogen_lxc_attach_path(self): + return self._connection.get_task_var('mitogen_lxc_attach_path') + + def mitogen_lxc_info_path(self): + return self._connection.get_task_var('mitogen_lxc_info_path') + + def mitogen_podman_path(self): + return self._connection.get_task_var('mitogen_podman_path') + + def mitogen_ssh_keepalive_interval(self): + return self._connection.get_task_var('mitogen_ssh_keepalive_interval') + + def mitogen_ssh_keepalive_count(self): + return self._connection.get_task_var('mitogen_ssh_keepalive_count') + + def mitogen_machinectl_path(self): + return self._connection.get_task_var('mitogen_machinectl_path') + + def mitogen_ssh_debug_level(self): + return self._connection.get_task_var('mitogen_ssh_debug_level') + + def mitogen_ssh_compression(self): + return self._connection.get_task_var('mitogen_ssh_compression') + + def extra_args(self): + return self._connection.get_extra_args() + + def ansible_doas_exe(self): + return ( + self._connection.get_task_var('ansible_doas_exe') or + os.environ.get('ANSIBLE_DOAS_EXE') + ) + + +class MitogenViaSpec(Spec): + """ + MitogenViaSpec takes most of its information from the HostVars of the + running task. HostVars is a lightweight wrapper around VariableManager, so + it is better to say that VariableManager.get_vars() is the ultimate source + of MitogenViaSpec's information. + + Due to this, mitogen_via= hosts must have all their configuration + information represented as host and group variables. We cannot use any + per-task configuration, as all that data belongs to the real target host. + + Ansible uses all kinds of strange historical logic for calculating + variables, including making their precedence configurable. MitogenViaSpec + must ultimately reimplement all of that logic. It is likely that if you are + having a configruation problem with connection delegation, the answer to + your problem lies in the method implementations below! + """ + def __init__(self, inventory_name, host_vars, task_vars, become_method, become_user, + play_context, action): + """ + :param str inventory_name: + The inventory name of the intermediary machine, i.e. not the target + machine. + :param dict host_vars: + The HostVars magic dictionary provided by Ansible in task_vars. + :param dict task_vars: + Task vars provided by Ansible. + :param str become_method: + If the mitogen_via= spec included a become method, the method it + specifies. + :param str become_user: + If the mitogen_via= spec included a become user, the user it + specifies. + :param PlayContext play_context: + For some global values **only**, the PlayContext used to describe + the real target machine. Values from this object are **strictly + restricted** to values that are Ansible-global, e.g. the passwords + specified interactively. + :param ActionModuleMixin action: + Backref to the ActionModuleMixin required for ansible interpreter discovery + """ + self._inventory_name = inventory_name + self._host_vars = host_vars + self._task_vars = task_vars + self._become_method = become_method + self._become_user = become_user + # Dangerous! You may find a variable you want in this object, but it's + # almost certainly for the wrong machine! + self._dangerous_play_context = play_context + self._action = action + + def transport(self): + return ( + self._host_vars.get('ansible_connection') or + C.DEFAULT_TRANSPORT + ) + + def inventory_name(self): + return self._inventory_name + + def remote_addr(self): + # play_context.py::MAGIC_VARIABLE_MAPPING + return ( + self._host_vars.get('ansible_ssh_host') or + self._host_vars.get('ansible_host') or + self._inventory_name + ) + + def remote_user(self): + return ( + self._host_vars.get('ansible_ssh_user') or + self._host_vars.get('ansible_user') or + C.DEFAULT_REMOTE_USER + ) + + def become(self): + return bool(self._become_user) + + def become_method(self): + return ( + self._become_method or + self._host_vars.get('ansible_become_method') or + C.DEFAULT_BECOME_METHOD + ) + + def become_user(self): + return self._become_user + + def become_pass(self): + return optional_secret( + self._host_vars.get('ansible_become_pass') or + self._host_vars.get('ansible_become_password') + ) + + def password(self): + return optional_secret( + self._host_vars.get('ansible_ssh_pass') or + self._host_vars.get('ansible_password') + ) + + def port(self): + return ( + self._host_vars.get('ansible_ssh_port') or + self._host_vars.get('ansible_port') or + C.DEFAULT_REMOTE_PORT + ) + + def python_path(self, rediscover_python=False): + s = self._host_vars.get('ansible_python_interpreter') + # #511, #536: executor/module_common.py::_get_shebang() hard-wires + # "/usr/bin/python" as the default interpreter path if no other + # interpreter is specified. + return parse_python_path( + s, + task_vars=self._task_vars, + action=self._action, + rediscover_python=rediscover_python) + + def host_key_checking(self): + def candidates(): + yield self._host_vars.get('ansible_ssh_host_key_checking') + yield self._host_vars.get('ansible_host_key_checking') + yield C.HOST_KEY_CHECKING + val = next((v for v in candidates() if v is not None), True) + return boolean(val) + + def private_key_file(self): + # TODO: must come from PlayContext too. + return ( + self._host_vars.get('ansible_ssh_private_key_file') or + self._host_vars.get('ansible_private_key_file') or + C.DEFAULT_PRIVATE_KEY_FILE + ) + + def ssh_executable(self): + return C.config.get_config_value("ssh_executable", plugin_type="connection", plugin_name="ssh", variables=self._task_vars.get("vars", {})) + + def timeout(self): + # TODO: must come from PlayContext too. + return C.DEFAULT_TIMEOUT + + def ansible_ssh_timeout(self): + return ( + self._host_vars.get('ansible_timeout') or + self._host_vars.get('ansible_ssh_timeout') or + self.timeout() + ) + + def ssh_args(self): + return [ + mitogen.core.to_text(term) + for s in ( + C.config.get_config_value("ssh_args", plugin_type="connection", plugin_name="ssh", variables=self._task_vars.get("vars", {})), + C.config.get_config_value("ssh_common_args", plugin_type="connection", plugin_name="ssh", variables=self._task_vars.get("vars", {})), + C.config.get_config_value("ssh_extra_args", plugin_type="connection", plugin_name="ssh", variables=self._task_vars.get("vars", {})) + ) + for term in ansible.utils.shlex.shlex_split(s) + if s + ] + + def become_exe(self): + return ( + self._host_vars.get('ansible_become_exe') or + C.DEFAULT_BECOME_EXE + ) + + def sudo_args(self): + return [ + mitogen.core.to_text(term) + for s in ( + self._host_vars.get('ansible_sudo_flags') or '', + self._host_vars.get('ansible_become_flags') or '', + ) + for term in ansible.utils.shlex.shlex_split(s) + ] + + def mitogen_via(self): + return self._host_vars.get('mitogen_via') + + def mitogen_kind(self): + return self._host_vars.get('mitogen_kind') + + def mitogen_mask_remote_name(self): + return self._host_vars.get('mitogen_mask_remote_name') + + def mitogen_buildah_path(self): + return self._host_vars.get('mitogen_buildah_path') + + def mitogen_docker_path(self): + return self._host_vars.get('mitogen_docker_path') + + def mitogen_kubectl_path(self): + return self._host_vars.get('mitogen_kubectl_path') + + def mitogen_lxc_path(self): + return self._host_vars.get('mitogen_lxc_path') + + def mitogen_lxc_attach_path(self): + return self._host_vars.get('mitogen_lxc_attach_path') + + def mitogen_lxc_info_path(self): + return self._host_vars.get('mitogen_lxc_info_path') + + def mitogen_podman_path(self): + return self._host_vars.get('mitogen_podman_path') + + def mitogen_ssh_keepalive_interval(self): + return self._host_vars.get('mitogen_ssh_keepalive_interval') + + def mitogen_ssh_keepalive_count(self): + return self._host_vars.get('mitogen_ssh_keepalive_count') + + def mitogen_machinectl_path(self): + return self._host_vars.get('mitogen_machinectl_path') + + def mitogen_ssh_debug_level(self): + return self._host_vars.get('mitogen_ssh_debug_level') + + def mitogen_ssh_compression(self): + return self._host_vars.get('mitogen_ssh_compression') + + def extra_args(self): + return [] # TODO + + def ansible_doas_exe(self): + return ( + self._host_vars.get('ansible_doas_exe') or + os.environ.get('ANSIBLE_DOAS_EXE') + ) diff --git a/mitogen-0.3.9/ansible_mitogen/utils/__init__.py b/mitogen-0.3.9/ansible_mitogen/utils/__init__.py new file mode 100644 index 0000000..a01b261 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/utils/__init__.py @@ -0,0 +1,29 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re + +import ansible + +__all__ = [ + 'ansible_version', +] + + +def _parse(v_string): + # Adapted from distutils.version.LooseVersion.parse() + component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE) + for component in component_re.split(v_string): + if not component or component == '.': + continue + try: + yield int(component) + except ValueError: + yield component + + +ansible_version = tuple(_parse(ansible.__version__)) + +del _parse +del re +del ansible diff --git a/mitogen-0.3.9/ansible_mitogen/utils/__pycache__/__init__.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000..cf56eae Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/utils/__pycache__/unsafe.cpython-310.pyc b/mitogen-0.3.9/ansible_mitogen/utils/__pycache__/unsafe.cpython-310.pyc new file mode 100644 index 0000000..52bb74a Binary files /dev/null and b/mitogen-0.3.9/ansible_mitogen/utils/__pycache__/unsafe.cpython-310.pyc differ diff --git a/mitogen-0.3.9/ansible_mitogen/utils/unsafe.py b/mitogen-0.3.9/ansible_mitogen/utils/unsafe.py new file mode 100644 index 0000000..b2c3d53 --- /dev/null +++ b/mitogen-0.3.9/ansible_mitogen/utils/unsafe.py @@ -0,0 +1,79 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import ansible +import ansible.utils.unsafe_proxy + +import ansible_mitogen.utils + +import mitogen +import mitogen.core +import mitogen.utils + +__all__ = [ + 'cast', +] + +def _cast_to_dict(obj): return {cast(k): cast(v) for k, v in obj.items()} +def _cast_to_list(obj): return [cast(v) for v in obj] +def _cast_unsafe(obj): return obj._strip_unsafe() +def _passthrough(obj): return obj + + +# A dispatch table to cast objects based on their exact type. +# This is an optimisation, reliable fallbacks are required (e.g. isinstance()) +_CAST_DISPATCH = { + bytes: bytes, + dict: _cast_to_dict, + list: _cast_to_list, + tuple: _cast_to_list, + mitogen.core.UnicodeType: mitogen.core.UnicodeType, +} +_CAST_DISPATCH.update({t: _passthrough for t in mitogen.utils.PASSTHROUGH}) + +if hasattr(ansible.utils.unsafe_proxy.AnsibleUnsafeText, '_strip_unsafe'): + _CAST_DISPATCH.update({ + ansible.utils.unsafe_proxy.AnsibleUnsafeBytes: _cast_unsafe, + ansible.utils.unsafe_proxy.AnsibleUnsafeText: _cast_unsafe, + ansible.utils.unsafe_proxy.NativeJinjaUnsafeText: _cast_unsafe, + }) +elif ansible_mitogen.utils.ansible_version[:2] <= (2, 16): + _CAST_DISPATCH.update({ + ansible.utils.unsafe_proxy.AnsibleUnsafeBytes: bytes, + ansible.utils.unsafe_proxy.AnsibleUnsafeText: mitogen.core.UnicodeType, + }) +else: + mitogen_ver = '.'.join(str(v) for v in mitogen.__version__) + raise ImportError("Mitogen %s can't unwrap Ansible %s AnsibleUnsafe objects" + % (mitogen_ver, ansible.__version__)) + + +def cast(obj): + """ + Return obj (or a copy) with subtypes of builtins cast to their supertype. + + This is an enhanced version of :func:`mitogen.utils.cast`. In addition it + handles ``ansible.utils.unsafe_proxy.AnsibleUnsafeText`` and variants. + + There are types handled by :func:`ansible.utils.unsafe_proxy.wrap_var()` + that this function currently does not handle (e.g. `set()`), or preserve + preserve (e.g. `tuple()`). Future enhancements may change this. + + :param obj: + Object to undecorate. + :returns: + Undecorated object. + """ + # Fast path: obj is a known type, dispatch directly + try: + unwrapper = _CAST_DISPATCH[type(obj)] + except KeyError: + pass + else: + return unwrapper(obj) + + # Slow path: obj is some unknown subclass + if isinstance(obj, dict): return _cast_to_dict(obj) + if isinstance(obj, (list, tuple)): return _cast_to_list(obj) + + return mitogen.utils.cast(obj) diff --git a/mitogen-0.3.9/mitogen.egg-info/PKG-INFO b/mitogen-0.3.9/mitogen.egg-info/PKG-INFO new file mode 100644 index 0000000..e324817 --- /dev/null +++ b/mitogen-0.3.9/mitogen.egg-info/PKG-INFO @@ -0,0 +1,39 @@ +Metadata-Version: 2.1 +Name: mitogen +Version: 0.3.9 +Summary: Library for writing distributed self-replicating programs. +Home-page: https://github.com/mitogen-hq/mitogen/ +Author: David Wilson +License: New BSD +Classifier: Environment :: Console +Classifier: Framework :: Ansible +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: System :: Distributed Computing +Classifier: Topic :: System :: Systems Administration +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.* +Description-Content-Type: text/markdown +License-File: LICENSE + +# Mitogen + +Please see the documentation. + +![](https://i.imgur.com/eBM6LhJ.gif) + +[![Total alerts](https://img.shields.io/lgtm/alerts/g/mitogen-hq/mitogen.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/mitogen-hq/mitogen/alerts/) + +[![Build Status](https://dev.azure.com/mitogen-hq/mitogen/_apis/build/status/mitogen-hq.mitogen?branchName=master)](https://dev.azure.com/mitogen-hq/mitogen/_build/latest?definitionId=1&branchName=master) diff --git a/mitogen-0.3.9/mitogen.egg-info/SOURCES.txt b/mitogen-0.3.9/mitogen.egg-info/SOURCES.txt new file mode 100644 index 0000000..2d2aed3 --- /dev/null +++ b/mitogen-0.3.9/mitogen.egg-info/SOURCES.txt @@ -0,0 +1,82 @@ +LICENSE +MANIFEST.in +README.md +setup.cfg +setup.py +ansible_mitogen/__init__.py +ansible_mitogen/affinity.py +ansible_mitogen/connection.py +ansible_mitogen/loaders.py +ansible_mitogen/logging.py +ansible_mitogen/mixins.py +ansible_mitogen/module_finder.py +ansible_mitogen/parsing.py +ansible_mitogen/planner.py +ansible_mitogen/process.py +ansible_mitogen/runner.py +ansible_mitogen/services.py +ansible_mitogen/strategy.py +ansible_mitogen/target.py +ansible_mitogen/transport_config.py +ansible_mitogen/compat/__init__.py +ansible_mitogen/plugins/__init__.py +ansible_mitogen/plugins/action/__init__.py +ansible_mitogen/plugins/action/mitogen_fetch.py +ansible_mitogen/plugins/action/mitogen_get_stack.py +ansible_mitogen/plugins/connection/__init__.py +ansible_mitogen/plugins/connection/mitogen_buildah.py +ansible_mitogen/plugins/connection/mitogen_doas.py +ansible_mitogen/plugins/connection/mitogen_docker.py +ansible_mitogen/plugins/connection/mitogen_jail.py +ansible_mitogen/plugins/connection/mitogen_kubectl.py +ansible_mitogen/plugins/connection/mitogen_local.py +ansible_mitogen/plugins/connection/mitogen_lxc.py +ansible_mitogen/plugins/connection/mitogen_lxd.py +ansible_mitogen/plugins/connection/mitogen_machinectl.py +ansible_mitogen/plugins/connection/mitogen_podman.py +ansible_mitogen/plugins/connection/mitogen_setns.py +ansible_mitogen/plugins/connection/mitogen_ssh.py +ansible_mitogen/plugins/connection/mitogen_su.py +ansible_mitogen/plugins/connection/mitogen_sudo.py +ansible_mitogen/plugins/strategy/__init__.py +ansible_mitogen/plugins/strategy/mitogen.py +ansible_mitogen/plugins/strategy/mitogen_free.py +ansible_mitogen/plugins/strategy/mitogen_host_pinned.py +ansible_mitogen/plugins/strategy/mitogen_linear.py +ansible_mitogen/utils/__init__.py +ansible_mitogen/utils/unsafe.py +mitogen/__init__.py +mitogen/buildah.py +mitogen/core.py +mitogen/debug.py +mitogen/doas.py +mitogen/docker.py +mitogen/fakessh.py +mitogen/fork.py +mitogen/jail.py +mitogen/kubectl.py +mitogen/lxc.py +mitogen/lxd.py +mitogen/master.py +mitogen/minify.py +mitogen/os_fork.py +mitogen/parent.py +mitogen/podman.py +mitogen/profiler.py +mitogen/select.py +mitogen/service.py +mitogen/setns.py +mitogen/ssh.py +mitogen/su.py +mitogen/sudo.py +mitogen/unix.py +mitogen/utils.py +mitogen.egg-info/PKG-INFO +mitogen.egg-info/SOURCES.txt +mitogen.egg-info/dependency_links.txt +mitogen.egg-info/not-zip-safe +mitogen.egg-info/top_level.txt +mitogen/compat/__init__.py +mitogen/compat/pkgutil.py +mitogen/compat/tokenize.py +tests/testlib.py \ No newline at end of file diff --git a/mitogen-0.3.9/mitogen.egg-info/dependency_links.txt b/mitogen-0.3.9/mitogen.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/mitogen-0.3.9/mitogen.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/mitogen-0.3.9/mitogen.egg-info/not-zip-safe b/mitogen-0.3.9/mitogen.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/mitogen-0.3.9/mitogen.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/mitogen-0.3.9/mitogen.egg-info/top_level.txt b/mitogen-0.3.9/mitogen.egg-info/top_level.txt new file mode 100644 index 0000000..2360b3f --- /dev/null +++ b/mitogen-0.3.9/mitogen.egg-info/top_level.txt @@ -0,0 +1,2 @@ +ansible_mitogen +mitogen diff --git a/mitogen-0.3.9/mitogen/__init__.py b/mitogen-0.3.9/mitogen/__init__.py new file mode 100644 index 0000000..b0c6679 --- /dev/null +++ b/mitogen-0.3.9/mitogen/__init__.py @@ -0,0 +1,120 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +On the Mitogen master, this is imported from ``mitogen/__init__.py`` as would +be expected. On the slave, it is built dynamically during startup. +""" + + +#: Library version as a tuple. +__version__ = (0, 3, 9) + + +#: This is :data:`False` in slave contexts. Previously it was used to prevent +#: re-execution of :mod:`__main__` in single file programs, however that now +#: happens automatically. +is_master = True + + +#: This is `0` in a master, otherwise it is the master-assigned ID unique to +#: the slave context used for message routing. +context_id = 0 + + +#: This is :data:`None` in a master, otherwise it is the master-assigned ID +#: unique to the slave's parent context. +parent_id = None + + +#: This is an empty list in a master, otherwise it is a list of parent context +#: IDs ordered from most direct to least direct. +parent_ids = [] + + +import os +_default_profiling = os.environ.get('MITOGEN_PROFILING') is not None +del os + + +def main(log_level='INFO', profiling=_default_profiling): + """ + Convenience decorator primarily useful for writing discardable test + scripts. + + In the master process, when `func` is defined in the :mod:`__main__` + module, arranges for `func(router)` to be invoked immediately, with + :py:class:`mitogen.master.Router` construction and destruction handled just + as in :py:func:`mitogen.utils.run_with_router`. In slaves, this function + does nothing. + + :param str log_level: + Logging package level to configure via + :py:func:`mitogen.utils.log_to_file`. + + :param bool profiling: + If :py:data:`True`, equivalent to setting + :py:attr:`mitogen.master.Router.profiling` prior to router + construction. This causes ``/tmp`` files to be created everywhere at + the end of a successful run with :py:mod:`cProfile` output for every + thread. + + Example: + + :: + + import mitogen + import requests + + def get_url(url): + return requests.get(url).text + + @mitogen.main() + def main(router): + z = router.ssh(hostname='k3') + print(z.call(get_url, 'https://example.org/'))))) + + """ + + def wrapper(func): + if func.__module__ != '__main__': + return func + import mitogen.parent + import mitogen.utils + if profiling: + mitogen.core.enable_profiling() + mitogen.master.Router.profiling = profiling + mitogen.utils.log_to_file(level=log_level) + return mitogen.core._profile_hook( + 'app.main', + mitogen.utils.run_with_router, + func, + ) + return wrapper diff --git a/mitogen-0.3.9/mitogen/__pycache__/__init__.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000..fe5121d Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/__init__.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/core.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000..f48d3fc Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/core.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/debug.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/debug.cpython-310.pyc new file mode 100644 index 0000000..46197cd Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/debug.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/fork.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/fork.cpython-310.pyc new file mode 100644 index 0000000..1b61353 Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/fork.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/master.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/master.cpython-310.pyc new file mode 100644 index 0000000..fbbd280 Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/master.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/minify.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/minify.cpython-310.pyc new file mode 100644 index 0000000..b3abfe4 Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/minify.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/parent.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/parent.cpython-310.pyc new file mode 100644 index 0000000..cdacede Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/parent.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/select.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/select.cpython-310.pyc new file mode 100644 index 0000000..036ccab Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/select.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/service.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/service.cpython-310.pyc new file mode 100644 index 0000000..038e4d9 Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/service.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/ssh.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/ssh.cpython-310.pyc new file mode 100644 index 0000000..1f1c341 Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/ssh.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/sudo.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/sudo.cpython-310.pyc new file mode 100644 index 0000000..e709fe4 Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/sudo.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/unix.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/unix.cpython-310.pyc new file mode 100644 index 0000000..7e94f16 Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/unix.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/__pycache__/utils.cpython-310.pyc b/mitogen-0.3.9/mitogen/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000..aae5273 Binary files /dev/null and b/mitogen-0.3.9/mitogen/__pycache__/utils.cpython-310.pyc differ diff --git a/mitogen-0.3.9/mitogen/buildah.py b/mitogen-0.3.9/mitogen/buildah.py new file mode 100644 index 0000000..7a1e3f8 --- /dev/null +++ b/mitogen-0.3.9/mitogen/buildah.py @@ -0,0 +1,72 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import logging + +import mitogen.parent + + +LOG = logging.getLogger(__name__) + + +class Options(mitogen.parent.Options): + container = None + username = None + buildah_path = 'buildah' + + def __init__(self, container=None, buildah_path=None, username=None, + **kwargs): + super(Options, self).__init__(**kwargs) + assert container is not None + self.container = container + if buildah_path: + self.buildah_path = buildah_path + if username: + self.username = username + + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = False + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } + + def _get_name(self): + return u'buildah.' + self.options.container + + def get_boot_command(self): + args = [self.options.buildah_path, 'run'] + if self.options.username: + args += ['--user=' + self.options.username] + args += ['--', self.options.container] + return args + super(Connection, self).get_boot_command() diff --git a/mitogen-0.3.9/mitogen/compat/__init__.py b/mitogen-0.3.9/mitogen/compat/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mitogen-0.3.9/mitogen/compat/pkgutil.py b/mitogen-0.3.9/mitogen/compat/pkgutil.py new file mode 100644 index 0000000..15eb2af --- /dev/null +++ b/mitogen-0.3.9/mitogen/compat/pkgutil.py @@ -0,0 +1,594 @@ +"""Utilities to support packages.""" + +# !mitogen: minify_safe + +# NOTE: This module must remain compatible with Python 2.3, as it is shared +# by setuptools for distribution with Python 2.3 and up. + +import os +import sys +import imp +import os.path +from types import ModuleType + +__all__ = [ + 'get_importer', 'iter_importers', 'get_loader', 'find_loader', + 'walk_packages', 'iter_modules', 'get_data', + 'ImpImporter', 'ImpLoader', 'read_code', 'extend_path', +] + +def read_code(stream): + # This helper is needed in order for the PEP 302 emulation to + # correctly handle compiled files + import marshal + + magic = stream.read(4) + if magic != imp.get_magic(): + return None + + stream.read(4) # Skip timestamp + return marshal.load(stream) + + +def simplegeneric(func): + """Make a trivial single-dispatch generic function""" + registry = {} + def wrapper(*args, **kw): + ob = args[0] + try: + cls = ob.__class__ + except AttributeError: + cls = type(ob) + try: + mro = cls.__mro__ + except AttributeError: + try: + class cls(cls, object): + pass + mro = cls.__mro__[1:] + except TypeError: + mro = object, # must be an ExtensionClass or some such :( + for t in mro: + if t in registry: + return registry[t](*args, **kw) + else: + return func(*args, **kw) + try: + wrapper.__name__ = func.__name__ + except (TypeError, AttributeError): + pass # Python 2.3 doesn't allow functions to be renamed + + def register(typ, func=None): + if func is None: + return lambda f: register(typ, f) + registry[typ] = func + return func + + wrapper.__dict__ = func.__dict__ + wrapper.__doc__ = func.__doc__ + wrapper.register = register + return wrapper + + +def walk_packages(path=None, prefix='', onerror=None): + """Yields (module_loader, name, ispkg) for all modules recursively + on path, or, if path is None, all accessible modules. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + + Note that this function must import all *packages* (NOT all + modules!) on the given path, in order to access the __path__ + attribute to find submodules. + + 'onerror' is a function which gets called with one argument (the + name of the package which was being imported) if any exception + occurs while trying to import a package. If no onerror function is + supplied, ImportErrors are caught and ignored, while all other + exceptions are propagated, terminating the search. + + Examples: + + # list all modules python can access + walk_packages() + + # list all submodules of ctypes + walk_packages(ctypes.__path__, ctypes.__name__+'.') + """ + + def seen(p, m={}): + if p in m: + return True + m[p] = True + + for importer, name, ispkg in iter_modules(path, prefix): + yield importer, name, ispkg + + if ispkg: + try: + __import__(name) + except ImportError: + if onerror is not None: + onerror(name) + except Exception: + if onerror is not None: + onerror(name) + else: + raise + else: + path = getattr(sys.modules[name], '__path__', None) or [] + + # don't traverse path items we've seen before + path = [p for p in path if not seen(p)] + + for item in walk_packages(path, name+'.', onerror): + yield item + + +def iter_modules(path=None, prefix=''): + """Yields (module_loader, name, ispkg) for all submodules on path, + or, if path is None, all top-level modules on sys.path. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + """ + + if path is None: + importers = iter_importers() + else: + importers = map(get_importer, path) + + yielded = {} + for i in importers: + for name, ispkg in iter_importer_modules(i, prefix): + if name not in yielded: + yielded[name] = 1 + yield i, name, ispkg + + +#@simplegeneric +def iter_importer_modules(importer, prefix=''): + if not hasattr(importer, 'iter_modules'): + return [] + return importer.iter_modules(prefix) + +iter_importer_modules = simplegeneric(iter_importer_modules) + + +class ImpImporter: + """PEP 302 Importer that wraps Python's "classic" import algorithm + + ImpImporter(dirname) produces a PEP 302 importer that searches that + directory. ImpImporter(None) produces a PEP 302 importer that searches + the current sys.path, plus any modules that are frozen or built-in. + + Note that ImpImporter does not currently support being used by placement + on sys.meta_path. + """ + + def __init__(self, path=None): + self.path = path + + def find_module(self, fullname, path=None): + # Note: we ignore 'path' argument since it is only used via meta_path + subname = fullname.split(".")[-1] + if subname != fullname and self.path is None: + return None + if self.path is None: + path = None + else: + path = [os.path.realpath(self.path)] + try: + file, filename, etc = imp.find_module(subname, path) + except ImportError: + return None + return ImpLoader(fullname, file, filename, etc) + + def iter_modules(self, prefix=''): + if self.path is None or not os.path.isdir(self.path): + return + + yielded = {} + import inspect + try: + filenames = os.listdir(self.path) + except OSError: + # ignore unreadable directories like import does + filenames = [] + filenames.sort() # handle packages before same-named modules + + for fn in filenames: + modname = inspect.getmodulename(fn) + if modname=='__init__' or modname in yielded: + continue + + path = os.path.join(self.path, fn) + ispkg = False + + if not modname and os.path.isdir(path) and '.' not in fn: + modname = fn + try: + dircontents = os.listdir(path) + except OSError: + # ignore unreadable directories like import does + dircontents = [] + for fn in dircontents: + subname = inspect.getmodulename(fn) + if subname=='__init__': + ispkg = True + break + else: + continue # not a package + + if modname and '.' not in modname: + yielded[modname] = 1 + yield prefix + modname, ispkg + + +class ImpLoader: + """PEP 302 Loader that wraps Python's "classic" import algorithm + """ + code = source = None + + def __init__(self, fullname, file, filename, etc): + self.file = file + self.filename = filename + self.fullname = fullname + self.etc = etc + + def load_module(self, fullname): + self._reopen() + try: + mod = imp.load_module(fullname, self.file, self.filename, self.etc) + finally: + if self.file: + self.file.close() + # Note: we don't set __loader__ because we want the module to look + # normal; i.e. this is just a wrapper for standard import machinery + return mod + + def get_data(self, pathname): + return open(pathname, "rb").read() + + def _reopen(self): + if self.file and self.file.closed: + mod_type = self.etc[2] + if mod_type==imp.PY_SOURCE: + self.file = open(self.filename, 'rU') + elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION): + self.file = open(self.filename, 'rb') + + def _fix_name(self, fullname): + if fullname is None: + fullname = self.fullname + elif fullname != self.fullname: + raise ImportError("Loader for module %s cannot handle " + "module %s" % (self.fullname, fullname)) + return fullname + + def is_package(self, fullname): + fullname = self._fix_name(fullname) + return self.etc[2]==imp.PKG_DIRECTORY + + def get_code(self, fullname=None): + fullname = self._fix_name(fullname) + if self.code is None: + mod_type = self.etc[2] + if mod_type==imp.PY_SOURCE: + source = self.get_source(fullname) + self.code = compile(source, self.filename, 'exec') + elif mod_type==imp.PY_COMPILED: + self._reopen() + try: + self.code = read_code(self.file) + finally: + self.file.close() + elif mod_type==imp.PKG_DIRECTORY: + self.code = self._get_delegate().get_code() + return self.code + + def get_source(self, fullname=None): + fullname = self._fix_name(fullname) + if self.source is None: + mod_type = self.etc[2] + if mod_type==imp.PY_SOURCE: + self._reopen() + try: + self.source = self.file.read() + finally: + self.file.close() + elif mod_type==imp.PY_COMPILED: + if os.path.exists(self.filename[:-1]): + f = open(self.filename[:-1], 'rU') + self.source = f.read() + f.close() + elif mod_type==imp.PKG_DIRECTORY: + self.source = self._get_delegate().get_source() + return self.source + + + def _get_delegate(self): + return ImpImporter(self.filename).find_module('__init__') + + def get_filename(self, fullname=None): + fullname = self._fix_name(fullname) + mod_type = self.etc[2] + if self.etc[2]==imp.PKG_DIRECTORY: + return self._get_delegate().get_filename() + elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION): + return self.filename + return None + + +try: + import zipimport + from zipimport import zipimporter + + def iter_zipimport_modules(importer, prefix=''): + dirlist = zipimport._zip_directory_cache[importer.archive].keys() + dirlist.sort() + _prefix = importer.prefix + plen = len(_prefix) + yielded = {} + import inspect + for fn in dirlist: + if not fn.startswith(_prefix): + continue + + fn = fn[plen:].split(os.sep) + + if len(fn)==2 and fn[1].startswith('__init__.py'): + if fn[0] not in yielded: + yielded[fn[0]] = 1 + yield fn[0], True + + if len(fn)!=1: + continue + + modname = inspect.getmodulename(fn[0]) + if modname=='__init__': + continue + + if modname and '.' not in modname and modname not in yielded: + yielded[modname] = 1 + yield prefix + modname, False + + iter_importer_modules.register(zipimporter, iter_zipimport_modules) + +except ImportError: + pass + + +def get_importer(path_item): + """Retrieve a PEP 302 importer for the given path item + + The returned importer is cached in sys.path_importer_cache + if it was newly created by a path hook. + + If there is no importer, a wrapper around the basic import + machinery is returned. This wrapper is never inserted into + the importer cache (None is inserted instead). + + The cache (or part of it) can be cleared manually if a + rescan of sys.path_hooks is necessary. + """ + try: + importer = sys.path_importer_cache[path_item] + except KeyError: + for path_hook in sys.path_hooks: + try: + importer = path_hook(path_item) + break + except ImportError: + pass + else: + importer = None + sys.path_importer_cache.setdefault(path_item, importer) + + if importer is None: + try: + importer = ImpImporter(path_item) + except ImportError: + importer = None + return importer + + +def iter_importers(fullname=""): + """Yield PEP 302 importers for the given module name + + If fullname contains a '.', the importers will be for the package + containing fullname, otherwise they will be importers for sys.meta_path, + sys.path, and Python's "classic" import machinery, in that order. If + the named module is in a package, that package is imported as a side + effect of invoking this function. + + Non PEP 302 mechanisms (e.g. the Windows registry) used by the + standard import machinery to find files in alternative locations + are partially supported, but are searched AFTER sys.path. Normally, + these locations are searched BEFORE sys.path, preventing sys.path + entries from shadowing them. + + For this to cause a visible difference in behaviour, there must + be a module or package name that is accessible via both sys.path + and one of the non PEP 302 file system mechanisms. In this case, + the emulation will find the former version, while the builtin + import mechanism will find the latter. + + Items of the following types can be affected by this discrepancy: + imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY + """ + if fullname.startswith('.'): + raise ImportError("Relative module names not supported") + if '.' in fullname: + # Get the containing package's __path__ + pkg = '.'.join(fullname.split('.')[:-1]) + if pkg not in sys.modules: + __import__(pkg) + path = getattr(sys.modules[pkg], '__path__', None) or [] + else: + for importer in sys.meta_path: + yield importer + path = sys.path + for item in path: + yield get_importer(item) + if '.' not in fullname: + yield ImpImporter() + +def get_loader(module_or_name): + """Get a PEP 302 "loader" object for module_or_name + + If the module or package is accessible via the normal import + mechanism, a wrapper around the relevant part of that machinery + is returned. Returns None if the module cannot be found or imported. + If the named module is not already imported, its containing package + (if any) is imported, in order to establish the package __path__. + + This function uses iter_importers(), and is thus subject to the same + limitations regarding platform-specific special import locations such + as the Windows registry. + """ + if module_or_name in sys.modules: + module_or_name = sys.modules[module_or_name] + if isinstance(module_or_name, ModuleType): + module = module_or_name + loader = getattr(module, '__loader__', None) + if loader is not None: + return loader + fullname = module.__name__ + else: + fullname = module_or_name + return find_loader(fullname) + +def find_loader(fullname): + """Find a PEP 302 "loader" object for fullname + + If fullname contains dots, path must be the containing package's __path__. + Returns None if the module cannot be found or imported. This function uses + iter_importers(), and is thus subject to the same limitations regarding + platform-specific special import locations such as the Windows registry. + """ + for importer in iter_importers(fullname): + loader = importer.find_module(fullname) + if loader is not None: + return loader + + return None + + +def extend_path(path, name): + """Extend a package's path. + + Intended use is to place the following code in a package's __init__.py: + + from pkgutil import extend_path + __path__ = extend_path(__path__, __name__) + + This will add to the package's __path__ all subdirectories of + directories on sys.path named after the package. This is useful + if one wants to distribute different parts of a single logical + package as multiple directories. + + It also looks for *.pkg files beginning where * matches the name + argument. This feature is similar to *.pth files (see site.py), + except that it doesn't special-case lines starting with 'import'. + A *.pkg file is trusted at face value: apart from checking for + duplicates, all entries found in a *.pkg file are added to the + path, regardless of whether they are exist the filesystem. (This + is a feature.) + + If the input path is not a list (as is the case for frozen + packages) it is returned unchanged. The input path is not + modified; an extended copy is returned. Items are only appended + to the copy at the end. + + It is assumed that sys.path is a sequence. Items of sys.path that + are not (unicode or 8-bit) strings referring to existing + directories are ignored. Unicode items of sys.path that cause + errors when used as filenames may cause this function to raise an + exception (in line with os.path.isdir() behavior). + """ + + if not isinstance(path, list): + # This could happen e.g. when this is called from inside a + # frozen package. Return the path unchanged in that case. + return path + + pname = os.path.join(*name.split('.')) # Reconstitute as relative path + # Just in case os.extsep != '.' + sname = os.extsep.join(name.split('.')) + sname_pkg = sname + os.extsep + "pkg" + init_py = "__init__" + os.extsep + "py" + + path = path[:] # Start with a copy of the existing path + + for dir in sys.path: + if not isinstance(dir, basestring) or not os.path.isdir(dir): + continue + subdir = os.path.join(dir, pname) + # XXX This may still add duplicate entries to path on + # case-insensitive filesystems + initfile = os.path.join(subdir, init_py) + if subdir not in path and os.path.isfile(initfile): + path.append(subdir) + # XXX Is this the right thing for subpackages like zope.app? + # It looks for a file named "zope.app.pkg" + pkgfile = os.path.join(dir, sname_pkg) + if os.path.isfile(pkgfile): + try: + f = open(pkgfile) + except IOError: + msg = sys.exc_info()[1] + sys.stderr.write("Can't open %s: %s\n" % + (pkgfile, msg)) + else: + for line in f: + line = line.rstrip('\n') + if not line or line.startswith('#'): + continue + path.append(line) # Don't check for existence! + f.close() + + return path + +def get_data(package, resource): + """Get a resource from a package. + + This is a wrapper round the PEP 302 loader get_data API. The package + argument should be the name of a package, in standard module format + (foo.bar). The resource argument should be in the form of a relative + filename, using '/' as the path separator. The parent directory name '..' + is not allowed, and nor is a rooted name (starting with a '/'). + + The function returns a binary string, which is the contents of the + specified resource. + + For packages located in the filesystem, which have already been imported, + this is the rough equivalent of + + d = os.path.dirname(sys.modules[package].__file__) + data = open(os.path.join(d, resource), 'rb').read() + + If the package cannot be located or loaded, or it uses a PEP 302 loader + which does not support get_data(), then None is returned. + """ + + loader = get_loader(package) + if loader is None or not hasattr(loader, 'get_data'): + return None + mod = sys.modules.get(package) or loader.load_module(package) + if mod is None or not hasattr(mod, '__file__'): + return None + + # Modify the resource name to be compatible with the loader.get_data + # signature - an os.path format "filename" starting with the dirname of + # the package's __file__ + parts = resource.split('/') + parts.insert(0, os.path.dirname(mod.__file__)) + resource_name = os.path.join(*parts) + return loader.get_data(resource_name) diff --git a/mitogen-0.3.9/mitogen/compat/tokenize.py b/mitogen-0.3.9/mitogen/compat/tokenize.py new file mode 100644 index 0000000..0473c6a --- /dev/null +++ b/mitogen-0.3.9/mitogen/compat/tokenize.py @@ -0,0 +1,453 @@ +"""Tokenization help for Python programs. + +generate_tokens(readline) is a generator that breaks a stream of +text into Python tokens. It accepts a readline-like method which is called +repeatedly to get the next line of input (or "" for EOF). It generates +5-tuples with these members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators + +Older entry points + tokenize_loop(readline, tokeneater) + tokenize(readline, tokeneater=printtoken) +are the same, except instead of generating tokens, tokeneater is a callback +function to which the 5 fields described above are passed as 5 arguments, +each time a new token is found.""" + +# !mitogen: minify_safe + +__author__ = 'Ka-Ping Yee ' +__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' + 'Skip Montanaro, Raymond Hettinger') + +from itertools import chain +import string, re +from token import * + +import token +__all__ = [x for x in dir(token) if not x.startswith("_")] +__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"] +del token + +COMMENT = N_TOKENS +tok_name[COMMENT] = 'COMMENT' +NL = N_TOKENS + 1 +tok_name[NL] = 'NL' +N_TOKENS += 2 + +def group(*choices): return '(' + '|'.join(choices) + ')' +def any(*choices): return group(*choices) + '*' +def maybe(*choices): return group(*choices) + '?' + +Whitespace = r'[ \f\t]*' +Comment = r'#[^\r\n]*' +Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) +Name = r'[a-zA-Z_]\w*' + +Hexnumber = r'0[xX][\da-fA-F]+[lL]?' +Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?' +Binnumber = r'0[bB][01]+[lL]?' +Decnumber = r'[1-9]\d*[lL]?' +Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) +Exponent = r'[eE][-+]?\d+' +Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) +Expfloat = r'\d+' + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') +Number = group(Imagnumber, Floatnumber, Intnumber) + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""') +# Single-line ' or " string. +String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", + r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') + +# Because of leftmost-then-longest match semantics, be sure to put the +# longest operators first (e.g., if = came before ==, == would get +# recognized as two instances of =). +Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", + r"//=?", + r"[+\-*/%&|^=<>]=?", + r"~") + +Bracket = '[][(){}]' +Special = group(r'\r?\n', r'[:;.,`@]') +Funny = group(Operator, Bracket, Special) + +PlainToken = group(Number, Funny, String, Name) +Token = Ignore + PlainToken + +# First (or only) line of ' or " string. +ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + + group("'", r'\\\r?\n'), + r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + + group('"', r'\\\r?\n')) +PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +tokenprog, pseudoprog, single3prog, double3prog = map( + re.compile, (Token, PseudoToken, Single3, Double3)) +endprogs = {"'": re.compile(Single), '"': re.compile(Double), + "'''": single3prog, '"""': double3prog, + "r'''": single3prog, 'r"""': double3prog, + "u'''": single3prog, 'u"""': double3prog, + "ur'''": single3prog, 'ur"""': double3prog, + "R'''": single3prog, 'R"""': double3prog, + "U'''": single3prog, 'U"""': double3prog, + "uR'''": single3prog, 'uR"""': double3prog, + "Ur'''": single3prog, 'Ur"""': double3prog, + "UR'''": single3prog, 'UR"""': double3prog, + "b'''": single3prog, 'b"""': double3prog, + "br'''": single3prog, 'br"""': double3prog, + "B'''": single3prog, 'B"""': double3prog, + "bR'''": single3prog, 'bR"""': double3prog, + "Br'''": single3prog, 'Br"""': double3prog, + "BR'''": single3prog, 'BR"""': double3prog, + 'r': None, 'R': None, 'u': None, 'U': None, + 'b': None, 'B': None} + +triple_quoted = {} +for t in ("'''", '"""', + "r'''", 'r"""', "R'''", 'R"""', + "u'''", 'u"""', "U'''", 'U"""', + "ur'''", 'ur"""', "Ur'''", 'Ur"""', + "uR'''", 'uR"""', "UR'''", 'UR"""', + "b'''", 'b"""', "B'''", 'B"""', + "br'''", 'br"""', "Br'''", 'Br"""', + "bR'''", 'bR"""', "BR'''", 'BR"""'): + triple_quoted[t] = t +single_quoted = {} +for t in ("'", '"', + "r'", 'r"', "R'", 'R"', + "u'", 'u"', "U'", 'U"', + "ur'", 'ur"', "Ur'", 'Ur"', + "uR'", 'uR"', "UR'", 'UR"', + "b'", 'b"', "B'", 'B"', + "br'", 'br"', "Br'", 'Br"', + "bR'", 'bR"', "BR'", 'BR"' ): + single_quoted[t] = t + +tabsize = 8 + +class TokenError(Exception): pass + +class StopTokenizing(Exception): pass + +def printtoken(type, token, srow_scol, erow_ecol, line): # for testing + srow, scol = srow_scol + erow, ecol = erow_ecol + print("%d,%d-%d,%d:\t%s\t%s" % \ + (srow, scol, erow, ecol, tok_name[type], repr(token))) + +def tokenize(readline, tokeneater=printtoken): + """ + The tokenize() function accepts two parameters: one representing the + input stream, and one providing an output mechanism for tokenize(). + + The first parameter, readline, must be a callable object which provides + the same interface as the readline() method of built-in file objects. + Each call to the function should return one line of input as a string. + + The second parameter, tokeneater, must also be a callable object. It is + called once for each token, with five arguments, corresponding to the + tuples generated by generate_tokens(). + """ + try: + tokenize_loop(readline, tokeneater) + except StopTokenizing: + pass + +# backwards compatible interface +def tokenize_loop(readline, tokeneater): + for token_info in generate_tokens(readline): + tokeneater(*token_info) + +class Untokenizer: + + def __init__(self): + self.tokens = [] + self.prev_row = 1 + self.prev_col = 0 + + def add_whitespace(self, start): + row, col = start + if row < self.prev_row or row == self.prev_row and col < self.prev_col: + raise ValueError("start ({},{}) precedes previous end ({},{})" + .format(row, col, self.prev_row, self.prev_col)) + row_offset = row - self.prev_row + if row_offset: + self.tokens.append("\\\n" * row_offset) + self.prev_col = 0 + col_offset = col - self.prev_col + if col_offset: + self.tokens.append(" " * col_offset) + + def untokenize(self, iterable): + it = iter(iterable) + indents = [] + startline = False + for t in it: + if len(t) == 2: + self.compat(t, it) + break + tok_type, token, start, end, line = t + if tok_type == ENDMARKER: + break + if tok_type == INDENT: + indents.append(token) + continue + elif tok_type == DEDENT: + indents.pop() + self.prev_row, self.prev_col = end + continue + elif tok_type in (NEWLINE, NL): + startline = True + elif startline and indents: + indent = indents[-1] + if start[1] >= len(indent): + self.tokens.append(indent) + self.prev_col = len(indent) + startline = False + self.add_whitespace(start) + self.tokens.append(token) + self.prev_row, self.prev_col = end + if tok_type in (NEWLINE, NL): + self.prev_row += 1 + self.prev_col = 0 + return "".join(self.tokens) + + def compat(self, token, iterable): + indents = [] + toks_append = self.tokens.append + startline = token[0] in (NEWLINE, NL) + prevstring = False + + for tok in chain([token], iterable): + toknum, tokval = tok[:2] + + if toknum in (NAME, NUMBER): + tokval += ' ' + + # Insert a space between two consecutive strings + if toknum == STRING: + if prevstring: + tokval = ' ' + tokval + prevstring = True + else: + prevstring = False + + if toknum == INDENT: + indents.append(tokval) + continue + elif toknum == DEDENT: + indents.pop() + continue + elif toknum in (NEWLINE, NL): + startline = True + elif startline and indents: + toks_append(indents[-1]) + startline = False + toks_append(tokval) + +def untokenize(iterable): + """Transform tokens back into Python source code. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited intput: + # Output text will tokenize the back to the input + t1 = [tok[:2] for tok in generate_tokens(f.readline)] + newcode = untokenize(t1) + readline = iter(newcode.splitlines(1)).next + t2 = [tok[:2] for tok in generate_tokens(readline)] + assert t1 == t2 + """ + ut = Untokenizer() + return ut.untokenize(iterable) + +def generate_tokens(readline): + """ + The generate_tokens() generator requires one argument, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as a string. Alternately, readline + can be a callable function terminating with StopIteration: + readline = open(myfile).next # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + logical line; continuation lines are included. + """ + lnum = parenlev = continued = 0 + namechars, numchars = string.ascii_letters + '_', '0123456789' + contstr, needcont = '', 0 + contline = None + indents = [0] + + while 1: # loop over lines in stream + try: + line = readline() + except StopIteration: + line = '' + lnum += 1 + pos, max = 0, len(line) + + if contstr: # continued string + if not line: + raise TokenError("EOF in multi-line string", strstart) + endmatch = endprog.match(line) + if endmatch: + pos = end = endmatch.end(0) + yield (STRING, contstr + line[:end], + strstart, (lnum, end), contline + line) + contstr, needcont = '', 0 + contline = None + elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': + yield (ERRORTOKEN, contstr + line, + strstart, (lnum, len(line)), contline) + contstr = '' + contline = None + continue + else: + contstr = contstr + line + contline = contline + line + continue + + elif parenlev == 0 and not continued: # new statement + if not line: break + column = 0 + while pos < max: # measure leading whitespace + if line[pos] == ' ': + column += 1 + elif line[pos] == '\t': + column = (column//tabsize + 1)*tabsize + elif line[pos] == '\f': + column = 0 + else: + break + pos += 1 + if pos == max: + break + + if line[pos] in '#\r\n': # skip comments or blank lines + if line[pos] == '#': + comment_token = line[pos:].rstrip('\r\n') + nl_pos = pos + len(comment_token) + yield (COMMENT, comment_token, + (lnum, pos), (lnum, pos + len(comment_token)), line) + yield (NL, line[nl_pos:], + (lnum, nl_pos), (lnum, len(line)), line) + else: + yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], + (lnum, pos), (lnum, len(line)), line) + continue + + if column > indents[-1]: # count indents or dedents + indents.append(column) + yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) + while column < indents[-1]: + if column not in indents: + raise IndentationError( + "unindent does not match any outer indentation level", + ("", lnum, pos, line)) + indents = indents[:-1] + yield (DEDENT, '', (lnum, pos), (lnum, pos), line) + + else: # continued statement + if not line: + raise TokenError("EOF in multi-line statement", (lnum, 0)) + continued = 0 + + while pos < max: + pseudomatch = pseudoprog.match(line, pos) + if pseudomatch: # scan for tokens + start, end = pseudomatch.span(1) + spos, epos, pos = (lnum, start), (lnum, end), end + if start == end: + continue + token, initial = line[start:end], line[start] + + if initial in numchars or \ + (initial == '.' and token != '.'): # ordinary number + yield (NUMBER, token, spos, epos, line) + elif initial in '\r\n': + if parenlev > 0: + n = NL + else: + n = NEWLINE + yield (n, token, spos, epos, line) + elif initial == '#': + assert not token.endswith("\n") + yield (COMMENT, token, spos, epos, line) + elif token in triple_quoted: + endprog = endprogs[token] + endmatch = endprog.match(line, pos) + if endmatch: # all on one line + pos = endmatch.end(0) + token = line[start:pos] + yield (STRING, token, spos, (lnum, pos), line) + else: + strstart = (lnum, start) # multiple lines + contstr = line[start:] + contline = line + break + elif initial in single_quoted or \ + token[:2] in single_quoted or \ + token[:3] in single_quoted: + if token[-1] == '\n': # continued string + strstart = (lnum, start) + endprog = (endprogs[initial] or endprogs[token[1]] or + endprogs[token[2]]) + contstr, needcont = line[start:], 1 + contline = line + break + else: # ordinary string + yield (STRING, token, spos, epos, line) + elif initial in namechars: # ordinary name + yield (NAME, token, spos, epos, line) + elif initial == '\\': # continued stmt + continued = 1 + else: + if initial in '([{': + parenlev += 1 + elif initial in ')]}': + parenlev -= 1 + yield (OP, token, spos, epos, line) + else: + yield (ERRORTOKEN, line[pos], + (lnum, pos), (lnum, pos+1), line) + pos += 1 + + for indent in indents[1:]: # pop remaining indent levels + yield (DEDENT, '', (lnum, 0), (lnum, 0), '') + yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') + +if __name__ == '__main__': # testing + import sys + if len(sys.argv) > 1: + tokenize(open(sys.argv[1]).readline) + else: + tokenize(sys.stdin.readline) diff --git a/mitogen-0.3.9/mitogen/core.py b/mitogen-0.3.9/mitogen/core.py new file mode 100644 index 0000000..cdfbbcd --- /dev/null +++ b/mitogen-0.3.9/mitogen/core.py @@ -0,0 +1,4196 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +This module implements most package functionality, but remains separate from +non-essential code in order to reduce its size, since it is also serves as the +bootstrap implementation sent to every new slave context. +""" + +import sys +try: + import _frozen_importlib_external +except ImportError: + pass +else: + class MonkeyPatchedPathFinder(_frozen_importlib_external.PathFinder): + """ + Meta path finder for sys.path and package __path__ attributes. + + Patched for https://github.com/python/cpython/issues/115911. + """ + @classmethod + def _path_importer_cache(cls, path): + if path == '': + try: + path = _frozen_importlib_external._os.getcwd() + except (FileNotFoundError, PermissionError): + return None + return super()._path_importer_cache(path) + + if sys.version_info[:2] <= (3, 12): + for i, mpf in enumerate(sys.meta_path): + if mpf is _frozen_importlib_external.PathFinder: + sys.meta_path[i] = MonkeyPatchedPathFinder + del i, mpf + + +import binascii +import collections +import encodings.latin_1 +import encodings.utf_8 +import errno +import fcntl +import itertools +import linecache +import logging +import os +import pickle as py_pickle +import pstats +import signal +import socket +import struct +import syslog +import threading +import time +import traceback +import types +import warnings +import weakref +import zlib + +try: + # Python >= 3.4, PEP 451 ModuleSpec API + import importlib.machinery + import importlib.util +except ImportError: + # Python < 3.4, PEP 302 Import Hooks + import imp + +# Absolute imports for <2.5. +select = __import__('select') + +try: + import cProfile +except ImportError: + cProfile = None + +try: + import thread +except ImportError: + import threading as thread + +try: + import cPickle as pickle +except ImportError: + import pickle + +try: + from cStringIO import StringIO as BytesIO +except ImportError: + from io import BytesIO + +try: + BaseException +except NameError: + BaseException = Exception + +try: + ModuleNotFoundError +except NameError: + ModuleNotFoundError = ImportError + +# TODO: usage of 'import' after setting __name__, but before fixing up +# sys.modules generates a warning. This happens when profiling = True. +warnings.filterwarnings('ignore', + "Parent module 'mitogen' not found while handling absolute import") + +LOG = logging.getLogger('mitogen') +IOLOG = logging.getLogger('mitogen.io') +IOLOG.setLevel(logging.INFO) + +# str.encode() may take import lock. Deadlock possible if broker calls +# .encode() on behalf of thread currently waiting for module. +LATIN1_CODEC = encodings.latin_1.Codec() + +_v = False +_vv = False + +GET_MODULE = 100 +CALL_FUNCTION = 101 +FORWARD_LOG = 102 +ADD_ROUTE = 103 +DEL_ROUTE = 104 +ALLOCATE_ID = 105 +SHUTDOWN = 106 +LOAD_MODULE = 107 +FORWARD_MODULE = 108 +DETACHING = 109 +CALL_SERVICE = 110 +STUB_CALL_SERVICE = 111 + +#: Special value used to signal disconnection or the inability to route a +#: message, when it appears in the `reply_to` field. Usually causes +#: :class:`mitogen.core.ChannelError` to be raised when it is received. +#: +#: It indicates the sender did not know how to process the message, or wishes +#: no further messages to be delivered to it. It is used when: +#: +#: * a remote receiver is disconnected or explicitly closed. +#: * a related message could not be delivered due to no route existing for it. +#: * a router is being torn down, as a sentinel value to notify +#: :meth:`mitogen.core.Router.add_handler` callbacks to clean up. +IS_DEAD = 999 + +try: + BaseException +except NameError: + BaseException = Exception + +PY24 = sys.version_info < (2, 5) +PY3 = sys.version_info > (3,) +if PY3: + b = str.encode + BytesType = bytes + UnicodeType = str + FsPathTypes = (str,) + BufferType = lambda buf, start: memoryview(buf)[start:] + long = int +else: + b = str + BytesType = str + FsPathTypes = (str, unicode) + BufferType = buffer + UnicodeType = unicode + +AnyTextType = (BytesType, UnicodeType) + +try: + next +except NameError: + next = lambda it: it.next() + +# #550: prehistoric WSL did not advertise itself in uname output. +try: + fp = open('/proc/sys/kernel/osrelease') + IS_WSL = 'Microsoft' in fp.read() + fp.close() +except IOError: + IS_WSL = False + + +#: Default size for calls to :meth:`Side.read` or :meth:`Side.write`, and the +#: size of buffers configured by :func:`mitogen.parent.create_socketpair`. This +#: value has many performance implications, 128KiB seems to be a sweet spot. +#: +#: * When set low, large messages cause many :class:`Broker` IO loop +#: iterations, burning CPU and reducing throughput. +#: * When set high, excessive RAM is reserved by the OS for socket buffers (2x +#: per child), and an identically sized temporary userspace buffer is +#: allocated on each read that requires zeroing, and over a particular size +#: may require two system calls to allocate/deallocate. +#: +#: Care must be taken to ensure the underlying kernel object and receiving +#: program support the desired size. For example, +#: +#: * Most UNIXes have TTYs with fixed 2KiB-4KiB buffers, making them unsuitable +#: for efficient IO. +#: * Different UNIXes have varying presets for pipes, which may not be +#: configurable. On recent Linux the default pipe buffer size is 64KiB, but +#: under memory pressure may be as low as 4KiB for unprivileged processes. +#: * When communication is via an intermediary process, its internal buffers +#: effect the speed OS buffers will drain. For example OpenSSH uses 64KiB +#: reads. +#: +#: An ideal :class:`Message` has a size that is a multiple of +#: :data:`CHUNK_SIZE` inclusive of headers, to avoid wasting IO loop iterations +#: writing small trailer chunks. +CHUNK_SIZE = 131072 + +_tls = threading.local() + + +if __name__ == 'mitogen.core': + # When loaded using import mechanism, ExternalContext.main() will not have + # a chance to set the synthetic mitogen global, so just import it here. + import mitogen +else: + # When loaded as __main__, ensure classes and functions gain a __module__ + # attribute consistent with the host process, so that pickling succeeds. + __name__ = 'mitogen.core' + + +class Error(Exception): + """ + Base for all exceptions raised by Mitogen. + + :param str fmt: + Exception text, or format string if `args` is non-empty. + :param tuple args: + Format string arguments. + """ + def __init__(self, fmt=None, *args): + if args: + fmt %= args + if fmt and not isinstance(fmt, UnicodeType): + fmt = fmt.decode('utf-8') + Exception.__init__(self, fmt) + + +class LatchError(Error): + """ + Raised when an attempt is made to use a :class:`mitogen.core.Latch` that + has been marked closed. + """ + pass + + +class Blob(BytesType): + """ + A serializable bytes subclass whose content is summarized in repr() output, + making it suitable for logging binary data. + """ + def __repr__(self): + return '[blob: %d bytes]' % len(self) + + def __reduce__(self): + return (Blob, (BytesType(self),)) + + +class Secret(UnicodeType): + """ + A serializable unicode subclass whose content is masked in repr() output, + making it suitable for logging passwords. + """ + def __repr__(self): + return '[secret]' + + if not PY3: + # TODO: what is this needed for in 2.x? + def __str__(self): + return UnicodeType(self) + + def __reduce__(self): + return (Secret, (UnicodeType(self),)) + + +class Kwargs(dict): + """ + A serializable dict subclass that indicates its keys should be coerced to + Unicode on Python 3 and bytes on Python<2.6. + + Python 2 produces keyword argument dicts whose keys are bytes, requiring a + helper to ensure compatibility with Python 3 where Unicode is required, + whereas Python 3 produces keyword argument dicts whose keys are Unicode, + requiring a helper for Python 2.4/2.5, where bytes are required. + """ + if PY3: + def __init__(self, dct): + for k, v in dct.items(): + if type(k) is bytes: + self[k.decode()] = v + else: + self[k] = v + elif sys.version_info < (2, 6, 5): + def __init__(self, dct): + for k, v in dct.iteritems(): + if type(k) is unicode: + k, _ = encodings.utf_8.encode(k) + self[k] = v + + def __repr__(self): + return 'Kwargs(%s)' % (dict.__repr__(self),) + + def __reduce__(self): + return (Kwargs, (dict(self),)) + + +class CallError(Error): + """ + Serializable :class:`Error` subclass raised when :meth:`Context.call() + ` fails. A copy of the traceback from the + external context is appended to the exception message. + """ + def __init__(self, fmt=None, *args): + if not isinstance(fmt, BaseException): + Error.__init__(self, fmt, *args) + else: + e = fmt + cls = e.__class__ + fmt = '%s.%s: %s' % (cls.__module__, cls.__name__, e) + tb = sys.exc_info()[2] + if tb: + fmt += '\n' + fmt += ''.join(traceback.format_tb(tb)) + Error.__init__(self, fmt) + + def __reduce__(self): + return (_unpickle_call_error, (self.args[0],)) + + +def _unpickle_call_error(s): + if not (type(s) is UnicodeType and len(s) < 10000): + raise TypeError('cannot unpickle CallError: bad input') + return CallError(s) + + +class ChannelError(Error): + """ + Raised when a channel dies or has been closed. + """ + remote_msg = 'Channel closed by remote end.' + local_msg = 'Channel closed by local end.' + + +class StreamError(Error): + """ + Raised when a stream cannot be established. + """ + pass + + +class TimeoutError(Error): + """ + Raised when a timeout occurs on a stream. + """ + pass + + +def to_text(o): + """ + Coerce `o` to Unicode by decoding it from UTF-8 if it is an instance of + :class:`bytes`, otherwise pass it to the :class:`str` constructor. The + returned object is always a plain :class:`str`, any subclass is removed. + """ + if isinstance(o, BytesType): + return o.decode('utf-8') + return UnicodeType(o) + + +# Documented in api.rst to work around Sphinx limitation. +now = getattr(time, 'monotonic', time.time) + + +# Python 2.4 +try: + any +except NameError: + def any(it): + for elem in it: + if elem: + return True + + +def _partition(s, sep, find): + """ + (str|unicode).(partition|rpartition) for Python 2.4/2.5. + """ + idx = find(sep) + if idx != -1: + left = s[0:idx] + return left, sep, s[len(left)+len(sep):] + + +def threading__current_thread(): + try: + return threading.current_thread() # Added in Python 2.6+ + except AttributeError: + return threading.currentThread() # Deprecated in Python 3.10+ + + +def threading__thread_name(thread): + try: + return thread.name # Added in Python 2.6+ + except AttributeError: + return thread.getName() # Deprecated in Python 3.10+ + + +if hasattr(UnicodeType, 'rpartition'): + str_partition = UnicodeType.partition + str_rpartition = UnicodeType.rpartition + bytes_partition = BytesType.partition +else: + def str_partition(s, sep): + return _partition(s, sep, s.find) or (s, u'', u'') + def str_rpartition(s, sep): + return _partition(s, sep, s.rfind) or (u'', u'', s) + def bytes_partition(s, sep): + return _partition(s, sep, s.find) or (s, '', '') + + +def _has_parent_authority(context_id): + return ( + (context_id == mitogen.context_id) or + (context_id in mitogen.parent_ids) + ) + +def has_parent_authority(msg, _stream=None): + """ + Policy function for use with :class:`Receiver` and + :meth:`Router.add_handler` that requires incoming messages to originate + from a parent context, or on a :class:`Stream` whose :attr:`auth_id + ` has been set to that of a parent context or the current + context. + """ + return _has_parent_authority(msg.auth_id) + + +def _signals(obj, signal): + return ( + obj.__dict__ + .setdefault('_signals', {}) + .setdefault(signal, []) + ) + + +def listen(obj, name, func): + """ + Arrange for `func()` to be invoked when signal `name` is fired on `obj`. + """ + _signals(obj, name).append(func) + + +def unlisten(obj, name, func): + """ + Remove `func()` from the list of functions invoked when signal `name` is + fired by `obj`. + + :raises ValueError: + `func()` was not on the list. + """ + _signals(obj, name).remove(func) + + +def fire(obj, name, *args, **kwargs): + """ + Arrange for `func(*args, **kwargs)` to be invoked for every function + registered for signal `name` on `obj`. + """ + for func in _signals(obj, name): + func(*args, **kwargs) + + +def takes_econtext(func): + """ + Decorator that marks a function or class method to automatically receive a + kwarg named `econtext`, referencing the + :class:`mitogen.core.ExternalContext` active in the context in which the + function is being invoked in. The decorator is only meaningful when the + function is invoked via :data:`CALL_FUNCTION `. + + When the function is invoked directly, `econtext` must still be passed to + it explicitly. + """ + func.mitogen_takes_econtext = True + return func + + +def takes_router(func): + """ + Decorator that marks a function or class method to automatically receive a + kwarg named `router`, referencing the :class:`mitogen.core.Router` active + in the context in which the function is being invoked in. The decorator is + only meaningful when the function is invoked via :data:`CALL_FUNCTION + `. + + When the function is invoked directly, `router` must still be passed to it + explicitly. + """ + func.mitogen_takes_router = True + return func + + +def is_blacklisted_import(importer, fullname): + """ + Return :data:`True` if `fullname` is part of a blacklisted package, or if + any packages have been whitelisted and `fullname` is not part of one. + + NB: + - If a package is on both lists, then it is treated as blacklisted. + - If any package is whitelisted, then all non-whitelisted packages are + treated as blacklisted. + """ + return ((not any(fullname.startswith(s) for s in importer.whitelist)) or + (any(fullname.startswith(s) for s in importer.blacklist))) + + +def set_cloexec(fd): + """ + Set the file descriptor `fd` to automatically close on :func:`os.execve`. + This has no effect on file descriptors inherited across :func:`os.fork`, + they must be explicitly closed through some other means, such as + :func:`mitogen.fork.on_fork`. + """ + flags = fcntl.fcntl(fd, fcntl.F_GETFD) + assert fd > 2, 'fd %r <= 2' % (fd,) + fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) + + +def set_nonblock(fd): + """ + Set the file descriptor `fd` to non-blocking mode. For most underlying file + types, this causes :func:`os.read` or :func:`os.write` to raise + :class:`OSError` with :data:`errno.EAGAIN` rather than block the thread + when the underlying kernel buffer is exhausted. + """ + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) + + +def set_block(fd): + """ + Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread when + the underlying kernel buffer is exhausted. + """ + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) + + +def io_op(func, *args): + """ + Wrap `func(*args)` that may raise :class:`select.error`, :class:`IOError`, + or :class:`OSError`, trapping UNIX error codes relating to disconnection + and retry events in various subsystems: + + * When a signal is delivered to the process on Python 2, system call retry + is signalled through :data:`errno.EINTR`. The invocation is automatically + restarted. + * When performing IO against a TTY, disconnection of the remote end is + signalled by :data:`errno.EIO`. + * When performing IO against a socket, disconnection of the remote end is + signalled by :data:`errno.ECONNRESET`. + * When performing IO against a pipe, disconnection of the remote end is + signalled by :data:`errno.EPIPE`. + + :returns: + Tuple of `(return_value, disconnect_reason)`, where `return_value` is + the return value of `func(*args)`, and `disconnected` is an exception + instance when disconnection was detected, otherwise :data:`None`. + """ + while True: + try: + return func(*args), None + except (select.error, OSError, IOError): + e = sys.exc_info()[1] + _vv and IOLOG.debug('io_op(%r) -> OSError: %s', func, e) + if e.args[0] == errno.EINTR: + continue + if e.args[0] in (errno.EIO, errno.ECONNRESET, errno.EPIPE): + return None, e + raise + + +class PidfulStreamHandler(logging.StreamHandler): + """ + A :class:`logging.StreamHandler` subclass used when + :meth:`Router.enable_debug() ` has been + called, or the `debug` parameter was specified during context construction. + Verifies the process ID has not changed on each call to :meth:`emit`, + reopening the associated log file when a change is detected. + + This ensures logging to the per-process output files happens correctly even + when uncooperative third party components call :func:`os.fork`. + """ + #: PID that last opened the log file. + open_pid = None + + #: Output path template. + template = '/tmp/mitogen.%s.%s.log' + + def _reopen(self): + self.acquire() + try: + if self.open_pid == os.getpid(): + return + ts = time.strftime('%Y%m%d_%H%M%S') + path = self.template % (os.getpid(), ts) + self.stream = open(path, 'w', 1) + set_cloexec(self.stream.fileno()) + self.stream.write('Parent PID: %s\n' % (os.getppid(),)) + self.stream.write('Created by:\n\n%s\n' % ( + ''.join(traceback.format_stack()), + )) + self.open_pid = os.getpid() + finally: + self.release() + + def emit(self, record): + if self.open_pid != os.getpid(): + self._reopen() + logging.StreamHandler.emit(self, record) + + +def enable_debug_logging(): + global _v, _vv + _v = True + _vv = True + root = logging.getLogger() + root.setLevel(logging.DEBUG) + IOLOG.setLevel(logging.DEBUG) + handler = PidfulStreamHandler() + handler.formatter = logging.Formatter( + '%(asctime)s %(levelname).1s %(name)s: %(message)s', + '%H:%M:%S' + ) + root.handlers.insert(0, handler) + + +_profile_hook = lambda name, func, *args: func(*args) +_profile_fmt = os.environ.get( + 'MITOGEN_PROFILE_FMT', + '/tmp/mitogen.stats.%(pid)s.%(identity)s.%(now)s.%(ext)s', +) + + +def _profile_hook(name, func, *args): + """ + Call `func(*args)` and return its result. This function is replaced by + :func:`_real_profile_hook` when :func:`enable_profiling` is called. This + interface is obsolete and will be replaced by a signals-based integration + later on. + """ + return func(*args) + + +def _real_profile_hook(name, func, *args): + profiler = cProfile.Profile() + profiler.enable() + try: + return func(*args) + finally: + path = _profile_fmt % { + 'now': int(1e6 * now()), + 'identity': name, + 'pid': os.getpid(), + 'ext': '%s' + } + profiler.dump_stats(path % ('pstats',)) + profiler.create_stats() + fp = open(path % ('log',), 'w') + try: + stats = pstats.Stats(profiler, stream=fp) + stats.sort_stats('cumulative') + stats.print_stats() + finally: + fp.close() + + +def enable_profiling(econtext=None): + global _profile_hook + _profile_hook = _real_profile_hook + + +def import_module(modname): + """ + Import `module` and return the attribute named `attr`. + """ + return __import__(modname, None, None, ['']) + + +def pipe(): + """ + Create a UNIX pipe pair using :func:`os.pipe`, wrapping the returned + descriptors in Python file objects in order to manage their lifetime and + ensure they are closed when their last reference is discarded and they have + not been closed explicitly. + """ + rfd, wfd = os.pipe() + return ( + os.fdopen(rfd, 'rb', 0), + os.fdopen(wfd, 'wb', 0) + ) + + +def iter_split(buf, delim, func): + """ + Invoke `func(s)` for each `delim`-delimited chunk in the potentially large + `buf`, avoiding intermediate lists and quadratic string operations. Return + the trailing undelimited portion of `buf`, or any unprocessed portion of + `buf` after `func(s)` returned :data:`False`. + + :returns: + `(trailer, cont)`, where `cont` is :data:`False` if the last call to + `func(s)` returned :data:`False`. + """ + dlen = len(delim) + start = 0 + cont = True + while cont: + nl = buf.find(delim, start) + if nl == -1: + break + cont = not func(buf[start:nl]) is False + start = nl + dlen + return buf[start:], cont + + +class Py24Pickler(py_pickle.Pickler): + """ + Exceptions were classic classes until Python 2.5. Sadly for 2.4, cPickle + offers little control over how a classic instance is pickled. Therefore 2.4 + uses a pure-Python pickler, so CallError can be made to look as it does on + newer Pythons. + + This mess will go away once proper serialization exists. + """ + @classmethod + def dumps(cls, obj, protocol): + bio = BytesIO() + self = cls(bio, protocol=protocol) + self.dump(obj) + return bio.getvalue() + + def save_exc_inst(self, obj): + if isinstance(obj, CallError): + func, args = obj.__reduce__() + self.save(func) + self.save(args) + self.write(py_pickle.REDUCE) + else: + py_pickle.Pickler.save_inst(self, obj) + + if PY24: + dispatch = py_pickle.Pickler.dispatch.copy() + dispatch[py_pickle.InstanceType] = save_exc_inst + + +if PY3: + # In 3.x Unpickler is a class exposing find_class as an overridable, but it + # cannot be overridden without subclassing. + class _Unpickler(pickle.Unpickler): + def find_class(self, module, func): + return self.find_global(module, func) + pickle__dumps = pickle.dumps +elif PY24: + # On Python 2.4, we must use a pure-Python pickler. + pickle__dumps = Py24Pickler.dumps + _Unpickler = pickle.Unpickler +else: + pickle__dumps = pickle.dumps + # In 2.x Unpickler is a function exposing a writeable find_global + # attribute. + _Unpickler = pickle.Unpickler + + +class Message(object): + """ + Messages are the fundamental unit of communication, comprising fields from + the :ref:`stream-protocol` header, an optional reference to the receiving + :class:`mitogen.core.Router` for ingress messages, and helper methods for + deserialization and generating replies. + """ + #: Integer target context ID. :class:`Router` delivers messages locally + #: when their :attr:`dst_id` matches :data:`mitogen.context_id`, otherwise + #: they are routed up or downstream. + dst_id = None + + #: Integer source context ID. Used as the target of replies if any are + #: generated. + src_id = None + + #: Context ID under whose authority the message is acting. See + #: :ref:`source-verification`. + auth_id = None + + #: Integer target handle in the destination context. This is one of the + #: :ref:`standard-handles`, or a dynamically generated handle used to + #: receive a one-time reply, such as the return value of a function call. + handle = None + + #: Integer target handle to direct any reply to this message. Used to + #: receive a one-time reply, such as the return value of a function call. + #: :data:`IS_DEAD` has a special meaning when it appears in this field. + reply_to = None + + #: Raw message data bytes. + data = b('') + + _unpickled = object() + + #: The :class:`Router` responsible for routing the message. This is + #: :data:`None` for locally originated messages. + router = None + + #: The :class:`Receiver` over which the message was last received. Part of + #: the :class:`mitogen.select.Select` interface. Defaults to :data:`None`. + receiver = None + + HEADER_FMT = '>hLLLLLL' + HEADER_LEN = struct.calcsize(HEADER_FMT) + HEADER_MAGIC = 0x4d49 # 'MI' + + def __init__(self, **kwargs): + """ + Construct a message from from the supplied `kwargs`. :attr:`src_id` and + :attr:`auth_id` are always set to :data:`mitogen.context_id`. + """ + self.src_id = mitogen.context_id + self.auth_id = mitogen.context_id + vars(self).update(kwargs) + assert isinstance(self.data, BytesType), 'Message data is not Bytes' + + def pack(self): + return ( + struct.pack(self.HEADER_FMT, self.HEADER_MAGIC, self.dst_id, + self.src_id, self.auth_id, self.handle, + self.reply_to or 0, len(self.data)) + + self.data + ) + + def _unpickle_context(self, context_id, name): + return _unpickle_context(context_id, name, router=self.router) + + def _unpickle_sender(self, context_id, dst_handle): + return _unpickle_sender(self.router, context_id, dst_handle) + + def _unpickle_bytes(self, s, encoding): + s, n = LATIN1_CODEC.encode(s) + return s + + def _find_global(self, module, func): + """ + Return the class implementing `module_name.class_name` or raise + `StreamError` if the module is not whitelisted. + """ + if module == __name__: + if func == '_unpickle_call_error' or func == 'CallError': + return _unpickle_call_error + elif func == '_unpickle_sender': + return self._unpickle_sender + elif func == '_unpickle_context': + return self._unpickle_context + elif func == 'Blob': + return Blob + elif func == 'Secret': + return Secret + elif func == 'Kwargs': + return Kwargs + elif module == '_codecs' and func == 'encode': + return self._unpickle_bytes + elif module == '__builtin__' and func == 'bytes': + return BytesType + raise StreamError('cannot unpickle %r/%r', module, func) + + @property + def is_dead(self): + """ + :data:`True` if :attr:`reply_to` is set to the magic value + :data:`IS_DEAD`, indicating the sender considers the channel dead. Dead + messages can be raised in a variety of circumstances, see + :data:`IS_DEAD` for more information. + """ + return self.reply_to == IS_DEAD + + @classmethod + def dead(cls, reason=None, **kwargs): + """ + Syntax helper to construct a dead message. + """ + kwargs['data'], _ = encodings.utf_8.encode(reason or u'') + return cls(reply_to=IS_DEAD, **kwargs) + + @classmethod + def pickled(cls, obj, **kwargs): + """ + Construct a pickled message, setting :attr:`data` to the serialization + of `obj`, and setting remaining fields using `kwargs`. + + :returns: + The new message. + """ + self = cls(**kwargs) + try: + self.data = pickle__dumps(obj, protocol=2) + except pickle.PicklingError: + e = sys.exc_info()[1] + self.data = pickle__dumps(CallError(e), protocol=2) + return self + + def reply(self, msg, router=None, **kwargs): + """ + Compose a reply to this message and send it using :attr:`router`, or + `router` is :attr:`router` is :data:`None`. + + :param obj: + Either a :class:`Message`, or an object to be serialized in order + to construct a new message. + :param router: + Optional router to use if :attr:`router` is :data:`None`. + :param kwargs: + Optional keyword parameters overriding message fields in the reply. + """ + if not isinstance(msg, Message): + msg = Message.pickled(msg) + msg.dst_id = self.src_id + msg.handle = self.reply_to + vars(msg).update(kwargs) + if msg.handle: + (self.router or router).route(msg) + else: + LOG.debug('dropping reply to message with no return address: %r', + msg) + + if PY3: + UNPICKLER_KWARGS = {'encoding': 'bytes'} + else: + UNPICKLER_KWARGS = {} + + def _throw_dead(self): + if len(self.data): + raise ChannelError(self.data.decode('utf-8', 'replace')) + elif self.src_id == mitogen.context_id: + raise ChannelError(ChannelError.local_msg) + else: + raise ChannelError(ChannelError.remote_msg) + + def unpickle(self, throw=True, throw_dead=True): + """ + Unpickle :attr:`data`, optionally raising any exceptions present. + + :param bool throw_dead: + If :data:`True`, raise exceptions, otherwise it is the caller's + responsibility. + + :raises CallError: + The serialized data contained CallError exception. + :raises ChannelError: + The `is_dead` field was set. + """ + _vv and IOLOG.debug('%r.unpickle()', self) + if throw_dead and self.is_dead: + self._throw_dead() + + obj = self._unpickled + if obj is Message._unpickled: + fp = BytesIO(self.data) + unpickler = _Unpickler(fp, **self.UNPICKLER_KWARGS) + unpickler.find_global = self._find_global + try: + # Must occur off the broker thread. + try: + obj = unpickler.load() + except: + LOG.error('raw pickle was: %r', self.data) + raise + self._unpickled = obj + except (TypeError, ValueError): + e = sys.exc_info()[1] + raise StreamError('invalid message: %s', e) + + if throw: + if isinstance(obj, CallError): + raise obj + + return obj + + def __repr__(self): + return 'Message(%r, %r, %r, %r, %r, %r..%d)' % ( + self.dst_id, self.src_id, self.auth_id, self.handle, + self.reply_to, (self.data or '')[:50], len(self.data) + ) + + +class Sender(object): + """ + Senders are used to send pickled messages to a handle in another context, + it is the inverse of :class:`mitogen.core.Receiver`. + + Senders may be serialized, making them convenient to wire up data flows. + See :meth:`mitogen.core.Receiver.to_sender` for more information. + + :param mitogen.core.Context context: + Context to send messages to. + :param int dst_handle: + Destination handle to send messages to. + """ + def __init__(self, context, dst_handle): + self.context = context + self.dst_handle = dst_handle + + def send(self, data): + """ + Send `data` to the remote end. + """ + _vv and IOLOG.debug('%r.send(%r..)', self, repr(data)[:100]) + self.context.send(Message.pickled(data, handle=self.dst_handle)) + + explicit_close_msg = 'Sender was explicitly closed' + + def close(self): + """ + Send a dead message to the remote, causing :meth:`ChannelError` to be + raised in any waiting thread. + """ + _vv and IOLOG.debug('%r.close()', self) + self.context.send( + Message.dead( + reason=self.explicit_close_msg, + handle=self.dst_handle + ) + ) + + def __repr__(self): + return 'Sender(%r, %r)' % (self.context, self.dst_handle) + + def __reduce__(self): + return _unpickle_sender, (self.context.context_id, self.dst_handle) + + +def _unpickle_sender(router, context_id, dst_handle): + if not (isinstance(router, Router) and + isinstance(context_id, (int, long)) and context_id >= 0 and + isinstance(dst_handle, (int, long)) and dst_handle > 0): + raise TypeError('cannot unpickle Sender: bad input or missing router') + return Sender(Context(router, context_id), dst_handle) + + +class Receiver(object): + """ + Receivers maintain a thread-safe queue of messages sent to a handle of this + context from another context. + + :param mitogen.core.Router router: + Router to register the handler on. + + :param int handle: + If not :data:`None`, an explicit handle to register, otherwise an + unused handle is chosen. + + :param bool persist: + If :data:`False`, unregister the handler after one message is received. + Single-message receivers are intended for RPC-like transactions, such + as in the case of :meth:`mitogen.parent.Context.call_async`. + + :param mitogen.core.Context respondent: + Context this receiver is receiving from. If not :data:`None`, arranges + for the receiver to receive a dead message if messages can no longer be + routed to the context due to disconnection, and ignores messages that + did not originate from the respondent context. + """ + #: If not :data:`None`, a function invoked as `notify(receiver)` after a + #: message has been received. The function is invoked on :class:`Broker` + #: thread, therefore it must not block. Used by + #: :class:`mitogen.select.Select` to efficiently implement waiting on + #: multiple event sources. + notify = None + + raise_channelerror = True + + def __init__(self, router, handle=None, persist=True, + respondent=None, policy=None, overwrite=False): + self.router = router + #: The handle. + self.handle = handle # Avoid __repr__ crash in add_handler() + self._latch = Latch() # Must exist prior to .add_handler() + self.handle = router.add_handler( + fn=self._on_receive, + handle=handle, + policy=policy, + persist=persist, + respondent=respondent, + overwrite=overwrite, + ) + + def __repr__(self): + return 'Receiver(%r, %r)' % (self.router, self.handle) + + def __enter__(self): + return self + + def __exit__(self, _1, _2, _3): + self.close() + + def to_sender(self): + """ + Return a :class:`Sender` configured to deliver messages to this + receiver. As senders are serializable, this makes it convenient to pass + `(context_id, handle)` pairs around:: + + def deliver_monthly_report(sender): + for line in open('monthly_report.txt'): + sender.send(line) + sender.close() + + @mitogen.main() + def main(router): + remote = router.ssh(hostname='mainframe') + recv = mitogen.core.Receiver(router) + remote.call(deliver_monthly_report, recv.to_sender()) + for msg in recv: + print(msg) + """ + return Sender(self.router.myself(), self.handle) + + def _on_receive(self, msg): + """ + Callback registered for the handle with :class:`Router`; appends data + to the internal queue. + """ + _vv and IOLOG.debug('%r._on_receive(%r)', self, msg) + self._latch.put(msg) + if self.notify: + self.notify(self) + + closed_msg = 'the Receiver has been closed' + + def close(self): + """ + Unregister the receiver's handle from its associated router, and cause + :class:`ChannelError` to be raised in any thread waiting in :meth:`get` + on this receiver. + """ + if self.handle: + self.router.del_handler(self.handle) + self.handle = None + self._latch.close() + + def size(self): + """ + Return the number of items currently buffered. + + As with :class:`Queue.Queue`, `0` may be returned even though a + subsequent call to :meth:`get` will succeed, since a message may be + posted at any moment between :meth:`size` and :meth:`get`. + + As with :class:`Queue.Queue`, `>0` may be returned even though a + subsequent call to :meth:`get` will block, since another waiting thread + may be woken at any moment between :meth:`size` and :meth:`get`. + + :raises LatchError: + The underlying latch has already been marked closed. + """ + return self._latch.size() + + def empty(self): + """ + Return `size() == 0`. + + .. deprecated:: 0.2.8 + Use :meth:`size` instead. + + :raises LatchError: + The latch has already been marked closed. + """ + return self._latch.empty() + + def get(self, timeout=None, block=True, throw_dead=True): + """ + Sleep waiting for a message to arrive on this receiver. + + :param float timeout: + If not :data:`None`, specifies a timeout in seconds. + + :raises mitogen.core.ChannelError: + The remote end indicated the channel should be closed, + communication with it was lost, or :meth:`close` was called in the + local process. + + :raises mitogen.core.TimeoutError: + Timeout was reached. + + :returns: + :class:`Message` that was received. + """ + _vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block) + try: + msg = self._latch.get(timeout=timeout, block=block) + except LatchError: + raise ChannelError(self.closed_msg) + if msg.is_dead and throw_dead: + msg._throw_dead() + return msg + + def __iter__(self): + """ + Yield consecutive :class:`Message` instances delivered to this receiver + until :class:`ChannelError` is raised. + """ + while True: + try: + msg = self.get() + except ChannelError: + return + yield msg + + +class Channel(Sender, Receiver): + """ + A channel inherits from :class:`mitogen.core.Sender` and + `mitogen.core.Receiver` to provide bidirectional functionality. + + .. deprecated:: 0.2.0 + This class is incomplete and obsolete, it will be removed in Mitogen + 0.3. + + Channels were an early attempt at syntax sugar. It is always easier to pass + around unidirectional pairs of senders/receivers, even though the syntax is + baroque: + + .. literalinclude:: ../examples/ping_pong.py + + Since all handles aren't known until after both ends are constructed, for + both ends to communicate through a channel, it is necessary for one end to + retrieve the handle allocated to the other and reconfigure its own channel + to match. Currently this is a manual task. + """ + def __init__(self, router, context, dst_handle, handle=None): + Sender.__init__(self, context, dst_handle) + Receiver.__init__(self, router, handle) + + def close(self): + Receiver.close(self) + Sender.close(self) + + def __repr__(self): + return 'Channel(%s, %s)' % ( + Sender.__repr__(self), + Receiver.__repr__(self) + ) + + +class Importer(object): + """ + Import protocol implementation that fetches modules from the parent + process. + + :param context: Context to communicate via. + """ + # The Mitogen package is handled specially, since the child context must + # construct it manually during startup. + MITOGEN_PKG_CONTENT = [ + 'buildah', + 'compat', + 'debug', + 'doas', + 'docker', + 'kubectl', + 'fakessh', + 'fork', + 'jail', + 'lxc', + 'lxd', + 'master', + 'minify', + 'os_fork', + 'parent', + 'podman', + 'select', + 'service', + 'setns', + 'ssh', + 'su', + 'sudo', + 'utils', + ] + + ALWAYS_BLACKLIST = [ + # 2.x generates needless imports for 'builtins', while 3.x does the + # same for '__builtin__'. The correct one is built-in, the other always + # a negative round-trip. + 'builtins', + '__builtin__', + + # On some Python releases (e.g. 3.8, 3.9) the subprocess module tries + # to import of this Windows-only builtin module. + 'msvcrt', + + # Python 2.x module that was renamed to _thread in 3.x. + # This entry avoids a roundtrip on 2.x -> 3.x. + 'thread', + + # org.python.core imported by copy, pickle, xml.sax; breaks Jython, but + # very unlikely to trigger a bug report. + 'org', + ] + + if PY3: + ALWAYS_BLACKLIST += ['cStringIO'] + + def __init__(self, router, context, core_src, whitelist=(), blacklist=()): + self._log = logging.getLogger('mitogen.importer') + self._context = context + self._present = {'mitogen': self.MITOGEN_PKG_CONTENT} + self._lock = threading.Lock() + self.whitelist = list(whitelist) or [''] + self.blacklist = list(blacklist) + self.ALWAYS_BLACKLIST + + # Preserve copies of the original server-supplied whitelist/blacklist + # for later use by children. + self.master_whitelist = self.whitelist[:] + self.master_blacklist = self.blacklist[:] + + # Presence of an entry in this map indicates in-flight GET_MODULE. + self._callbacks = {} + self._cache = {} + if core_src: + self._update_linecache('x/mitogen/core.py', core_src) + self._cache['mitogen.core'] = ( + 'mitogen.core', + None, + 'x/mitogen/core.py', + zlib.compress(core_src, 9), + [], + ) + self._install_handler(router) + + def _update_linecache(self, path, data): + """ + The Python 2.4 linecache module, used to fetch source code for + tracebacks and :func:`inspect.getsource`, does not support PEP-302, + meaning it needs extra help to for Mitogen-loaded modules. Directly + populate its cache if a loaded module belongs to the Mitogen package. + """ + if PY24 and 'mitogen' in path: + linecache.cache[path] = ( + len(data), + 0.0, + [line+'\n' for line in data.splitlines()], + path, + ) + + def _install_handler(self, router): + router.add_handler( + fn=self._on_load_module, + handle=LOAD_MODULE, + policy=has_parent_authority, + ) + + def __repr__(self): + return 'Importer' + + @staticmethod + def _loader_from_module(module, default=None): + """Return the loader for a module object.""" + try: + return module.__spec__.loader + except AttributeError: + pass + try: + return module.__loader__ + except AttributeError: + pass + return default + + def builtin_find_module(self, fullname): + # imp.find_module() will always succeed for __main__, because it is a + # built-in module. That means it exists on a special linked list deep + # within the bowels of the interpreter. We must special case it. + if fullname == '__main__': + raise ModuleNotFoundError() + + # For a module inside a package (e.g. pkg_a.mod_b) use the search path + # of that package (e.g. ['/usr/lib/python3.11/site-packages/pkg_a']). + parent, _, modname = str_rpartition(fullname, '.') + if parent: + path = sys.modules[parent].__path__ + else: + path = None + + # For a top-level module search builtin modules, frozen modules, + # system specific locations (e.g. Windows registry, site-packages). + # Otherwise use search path of the parent package. + # Works for both stdlib modules & third-party modules. + # If the search is unsuccessful then raises ImportError. + fp, pathname, description = imp.find_module(modname, path) + if fp: + fp.close() + + def find_module(self, fullname, path=None): + """ + Return a loader (ourself) or None, for the module with fullname. + + Implements importlib.abc.MetaPathFinder.find_module(). + Deprecrated in Python 3.4+, replaced by find_spec(). + Raises ImportWarning in Python 3.10+. + Removed in Python 3.12. + + fullname Fully qualified module name, e.g. "os.path". + path __path__ of parent packge. None for a top level module. + """ + if hasattr(_tls, 'running'): + return None + + _tls.running = True + try: + #_v and self._log.debug('Python requested %r', fullname) + fullname = to_text(fullname) + pkgname, _, suffix = str_rpartition(fullname, '.') + pkg = sys.modules.get(pkgname) + if pkgname and getattr(pkg, '__loader__', None) is not self: + self._log.debug('%s is submodule of a locally loaded package', + fullname) + return None + + if pkgname and suffix not in self._present.get(pkgname, ()): + self._log.debug('%s has no submodule %s', pkgname, suffix) + return None + + # #114: explicitly whitelisted prefixes override any + # system-installed package. + if self.whitelist != ['']: + if any(fullname.startswith(s) for s in self.whitelist): + return self + + try: + self.builtin_find_module(fullname) + _vv and self._log.debug('%r is available locally', fullname) + except ImportError: + _vv and self._log.debug('we will try to load %r', fullname) + return self + finally: + del _tls.running + + def find_spec(self, fullname, path, target=None): + """ + Return a `ModuleSpec` for module with `fullname` if we will load it. + Otherwise return `None`, allowing other finders to try. + + fullname Fully qualified name of the module (e.g. foo.bar.baz) + path Path entries to search. None for a top-level module. + target Existing module to be reloaded (if any). + + Implements importlib.abc.MetaPathFinder.find_spec() + Python 3.4+. + """ + # Presence of _tls.running indicates we've re-invoked importlib. + # Abort early to prevent infinite recursion. See below. + if hasattr(_tls, 'running'): + return None + + log = self._log.getChild('find_spec') + + if fullname.endswith('.'): + return None + + pkgname, _, modname = fullname.rpartition('.') + if pkgname and modname not in self._present.get(pkgname, ()): + log.debug('Skipping %s. Parent %s has no submodule %s', + fullname, pkgname, modname) + return None + + pkg = sys.modules.get(pkgname) + pkg_loader = self._loader_from_module(pkg) + if pkgname and pkg_loader is not self: + log.debug('Skipping %s. Parent %s was loaded by %r', + fullname, pkgname, pkg_loader) + return None + + # #114: whitelisted prefixes override any system-installed package. + if self.whitelist != ['']: + if any(s and fullname.startswith(s) for s in self.whitelist): + log.debug('Handling %s. It is whitelisted', fullname) + return importlib.machinery.ModuleSpec(fullname, loader=self) + + if fullname == '__main__': + log.debug('Handling %s. A special case', fullname) + return importlib.machinery.ModuleSpec(fullname, loader=self) + + # Re-invoke the import machinery to allow other finders to try. + # Set a guard, so we don't infinitely recurse. See top of this method. + _tls.running = True + try: + spec = importlib.util._find_spec(fullname, path, target) + finally: + del _tls.running + + if spec: + log.debug('Skipping %s. Available as %r', fullname, spec) + return spec + + log.debug('Handling %s. Unavailable locally', fullname) + return importlib.machinery.ModuleSpec(fullname, loader=self) + + blacklisted_msg = ( + '%r is present in the Mitogen importer blacklist, therefore this ' + 'context will not attempt to request it from the master, as the ' + 'request will always be refused.' + ) + pkg_resources_msg = ( + 'pkg_resources is prohibited from importing __main__, as it causes ' + 'problems in applications whose main module is not designed to be ' + 're-imported by children.' + ) + absent_msg = ( + 'The Mitogen master process was unable to serve %r. It may be a ' + 'native Python extension, or it may be missing entirely. Check the ' + 'importer debug logs on the master for more information.' + ) + + def _refuse_imports(self, fullname): + if is_blacklisted_import(self, fullname): + raise ModuleNotFoundError(self.blacklisted_msg % (fullname,)) + + f = sys._getframe(2) + requestee = f.f_globals['__name__'] + + if fullname == '__main__' and requestee == 'pkg_resources': + # Anything that imports pkg_resources will eventually cause + # pkg_resources to try and scan __main__ for its __requires__ + # attribute (pkg_resources/__init__.py::_build_master()). This + # breaks any app that is not expecting its __main__ to suddenly be + # sucked over a network and injected into a remote process, like + # py.test. + raise ModuleNotFoundError(self.pkg_resources_msg) + + if fullname == 'pbr': + # It claims to use pkg_resources to read version information, which + # would result in PEP-302 being used, but it actually does direct + # filesystem access. So instead smodge the environment to override + # any version that was defined. This will probably break something + # later. + os.environ['PBR_VERSION'] = '0.0.0' + + def _on_load_module(self, msg): + if msg.is_dead: + return + + tup = msg.unpickle() + fullname = tup[0] + _v and self._log.debug('received %s', fullname) + + self._lock.acquire() + try: + self._cache[fullname] = tup + if tup[2] is not None and PY24: + self._update_linecache( + path='master:' + tup[2], + data=zlib.decompress(tup[3]) + ) + callbacks = self._callbacks.pop(fullname, []) + finally: + self._lock.release() + + for callback in callbacks: + callback() + + def _request_module(self, fullname, callback): + self._lock.acquire() + try: + present = fullname in self._cache + if not present: + funcs = self._callbacks.get(fullname) + if funcs is not None: + _v and self._log.debug('existing request for %s in flight', + fullname) + funcs.append(callback) + else: + _v and self._log.debug('sending new %s request to parent', + fullname) + self._callbacks[fullname] = [callback] + self._context.send( + Message(data=b(fullname), handle=GET_MODULE) + ) + finally: + self._lock.release() + + if present: + callback() + + def create_module(self, spec): + """ + Return a module object for the given ModuleSpec. + + Implements PEP-451 importlib.abc.Loader API introduced in Python 3.4. + Unlike Loader.load_module() this shouldn't populate sys.modules or + set module attributes. Both are done by Python. + """ + self._log.debug('Creating module for %r', spec) + + # FIXME Should this be done in find_spec()? Can it? + self._refuse_imports(spec.name) + + # FIXME "create_module() should properly handle the case where it is + # called more than once for the same spec/module." -- PEP-451 + event = threading.Event() + self._request_module(spec.name, callback=event.set) + event.wait() + + # 0:fullname 1:pkg_present 2:path 3:compressed 4:related + _, pkg_present, path, _, _ = self._cache[spec.name] + + if path is None: + raise ImportError(self.absent_msg % (spec.name)) + + spec.origin = self.get_filename(spec.name) + if pkg_present is not None: + # TODO Namespace packages + spec.submodule_search_locations = [] + self._present[spec.name] = pkg_present + + module = types.ModuleType(spec.name) + # FIXME create_module() shouldn't initialise module attributes + module.__file__ = spec.origin + return module + + def exec_module(self, module): + """ + Execute the module to initialise it. Don't return anything. + + Implements PEP-451 importlib.abc.Loader API, introduced in Python 3.4. + """ + name = module.__spec__.name + origin = module.__spec__.origin + self._log.debug('Executing %s from %s', name, origin) + source = self.get_source(name) + try: + # Compile the source into a code object. Don't add any __future__ + # flags and don't inherit any from this module. + # FIXME Should probably be exposed as get_code() + code = compile(source, origin, 'exec', flags=0, dont_inherit=True) + except SyntaxError: + # FIXME Why is this LOG, rather than self._log? + LOG.exception('while importing %r', name) + raise + + exec(code, module.__dict__) + + def load_module(self, fullname): + """ + Return the loaded module specified by fullname. + + Implements importlib.abc.Loader.load_module(). + Deprecated in Python 3.4+, replaced by create_module() & exec_module(). + """ + fullname = to_text(fullname) + _v and self._log.debug('requesting %s', fullname) + self._refuse_imports(fullname) + + event = threading.Event() + self._request_module(fullname, event.set) + event.wait() + + # 0:fullname 1:pkg_present 2:path 3:compressed 4:related + _, pkg_present, path, _, _ = self._cache[fullname] + if path is None: + raise ModuleNotFoundError(self.absent_msg % (fullname,)) + + mod = sys.modules.setdefault(fullname, imp.new_module(fullname)) + mod.__file__ = self.get_filename(fullname) + mod.__loader__ = self + if pkg_present is not None: # it's a package. + mod.__path__ = [] + mod.__package__ = fullname + self._present[fullname] = pkg_present + else: + mod.__package__ = str_rpartition(fullname, '.')[0] or None + + if mod.__package__ and not PY3: + # 2.x requires __package__ to be exactly a string. + mod.__package__, _ = encodings.utf_8.encode(mod.__package__) + + source = self.get_source(fullname) + try: + code = compile(source, mod.__file__, 'exec', 0, 1) + except SyntaxError: + LOG.exception('while importing %r', fullname) + raise + + if PY3: + exec(code, vars(mod)) + else: + exec('exec code in vars(mod)') + + # #590: if a module replaces itself in sys.modules during import, below + # is necessary. This matches PyImport_ExecCodeModuleEx() + return sys.modules.get(fullname, mod) + + def get_filename(self, fullname): + if fullname in self._cache: + path = self._cache[fullname][2] + if path is None: + # If find_loader() returns self but a subsequent master RPC + # reveals the module can't be loaded, and so load_module() + # throws ImportError, on Python 3.x it is still possible for + # the loader to be called to fetch metadata. + raise ModuleNotFoundError(self.absent_msg % (fullname,)) + return u'master:' + self._cache[fullname][2] + + def get_source(self, fullname): + if fullname in self._cache: + compressed = self._cache[fullname][3] + if compressed is None: + raise ModuleNotFoundError(self.absent_msg % (fullname,)) + + source = zlib.decompress(self._cache[fullname][3]) + if PY3: + return to_text(source) + return source + + +class LogHandler(logging.Handler): + """ + A :class:`logging.Handler` subclass that arranges for :data:`FORWARD_LOG` + messages to be sent to a parent context in response to logging messages + generated by the current context. This is installed by default in child + contexts during bootstrap, so that :mod:`logging` events can be viewed and + managed centrally in the master process. + + The handler is initially *corked* after construction, such that it buffers + messages until :meth:`uncork` is called. This allows logging to be + installed prior to communication with the target being available, and + avoids any possible race where early log messages might be dropped. + + :param mitogen.core.Context context: + The context to send log messages towards. At present this is always + the master process. + """ + def __init__(self, context): + logging.Handler.__init__(self) + self.context = context + self.local = threading.local() + self._buffer = [] + # Private synchronization is needed while corked, to ensure no + # concurrent call to _send() exists during uncork(). + self._buffer_lock = threading.Lock() + + def uncork(self): + """ + #305: during startup :class:`LogHandler` may be installed before it is + possible to route messages, therefore messages are buffered until + :meth:`uncork` is called by :class:`ExternalContext`. + """ + self._buffer_lock.acquire() + try: + self._send = self.context.send + for msg in self._buffer: + self._send(msg) + self._buffer = None + finally: + self._buffer_lock.release() + + def _send(self, msg): + self._buffer_lock.acquire() + try: + if self._buffer is None: + # uncork() may run concurrent to _send() + self._send(msg) + else: + self._buffer.append(msg) + finally: + self._buffer_lock.release() + + def emit(self, rec): + """ + Send a :data:`FORWARD_LOG` message towards the target context. + """ + if rec.name == 'mitogen.io' or \ + getattr(self.local, 'in_emit', False): + return + + self.local.in_emit = True + try: + msg = self.format(rec) + encoded = '%s\x00%s\x00%s' % (rec.name, rec.levelno, msg) + if isinstance(encoded, UnicodeType): + # Logging package emits both :( + encoded = encoded.encode('utf-8') + self._send(Message(data=encoded, handle=FORWARD_LOG)) + finally: + self.local.in_emit = False + + +class Stream(object): + """ + A :class:`Stream` is one readable and optionally one writeable file + descriptor (represented by :class:`Side`) aggregated alongside an + associated :class:`Protocol` that knows how to respond to IO readiness + events for those descriptors. + + Streams are registered with :class:`Broker`, and callbacks are invoked on + the broker thread in response to IO activity. When registered using + :meth:`Broker.start_receive` or :meth:`Broker._start_transmit`, the broker + may call any of :meth:`on_receive`, :meth:`on_transmit`, + :meth:`on_shutdown` or :meth:`on_disconnect`. + + It is expected that the :class:`Protocol` associated with a stream will + change over its life. For example during connection setup, the initial + protocol may be :class:`mitogen.parent.BootstrapProtocol` that knows how to + enter SSH and sudo passwords and transmit the :mod:`mitogen.core` source to + the target, before handing off to :class:`MitogenProtocol` when the target + process is initialized. + + Streams connecting to children are in turn aggregated by + :class:`mitogen.parent.Connection`, which contains additional logic for + managing any child process, and a reference to any separate ``stderr`` + :class:`Stream` connected to that process. + """ + #: A :class:`Side` representing the stream's receive file descriptor. + receive_side = None + + #: A :class:`Side` representing the stream's transmit file descriptor. + transmit_side = None + + #: A :class:`Protocol` representing the protocol active on the stream. + protocol = None + + #: In parents, the :class:`mitogen.parent.Connection` instance. + conn = None + + #: The stream name. This is used in the :meth:`__repr__` output in any log + #: messages, it may be any descriptive string. + name = u'default' + + def set_protocol(self, protocol): + """ + Bind a :class:`Protocol` to this stream, by updating + :attr:`Protocol.stream` to refer to this stream, and updating this + stream's :attr:`Stream.protocol` to the refer to the protocol. Any + prior protocol's :attr:`Protocol.stream` is set to :data:`None`. + """ + if self.protocol: + self.protocol.stream = None + self.protocol = protocol + self.protocol.stream = self + + def accept(self, rfp, wfp): + """ + Attach a pair of file objects to :attr:`receive_side` and + :attr:`transmit_side`, after wrapping them in :class:`Side` instances. + :class:`Side` will call :func:`set_nonblock` and :func:`set_cloexec` + on the underlying file descriptors during construction. + + The same file object may be used for both sides. The default + :meth:`on_disconnect` is handles the possibility that only one + descriptor may need to be closed. + + :param file rfp: + The file object to receive from. + :param file wfp: + The file object to transmit to. + """ + self.receive_side = Side(self, rfp) + self.transmit_side = Side(self, wfp) + + def __repr__(self): + return "" % (self.name, id(self) & 0xffff,) + + def on_receive(self, broker): + """ + Invoked by :class:`Broker` when the stream's :attr:`receive_side` has + been marked readable using :meth:`Broker.start_receive` and the broker + has detected the associated file descriptor is ready for reading. + + Subclasses must implement this if they are registered using + :meth:`Broker.start_receive`, and the method must invoke + :meth:`on_disconnect` if reading produces an empty string. + + The default implementation reads :attr:`Protocol.read_size` bytes and + passes the resulting bytestring to :meth:`Protocol.on_receive`. If the + bytestring is 0 bytes, invokes :meth:`on_disconnect` instead. + """ + buf = self.receive_side.read(self.protocol.read_size) + if not buf: + LOG.debug('%r: empty read, disconnecting', self.receive_side) + return self.on_disconnect(broker) + + self.protocol.on_receive(broker, buf) + + def on_transmit(self, broker): + """ + Invoked by :class:`Broker` when the stream's :attr:`transmit_side` has + been marked writeable using :meth:`Broker._start_transmit` and the + broker has detected the associated file descriptor is ready for + writing. + + Subclasses must implement they are ever registerd with + :meth:`Broker._start_transmit`. + + The default implementation invokes :meth:`Protocol.on_transmit`. + """ + self.protocol.on_transmit(broker) + + def on_shutdown(self, broker): + """ + Invoked by :meth:`Broker.shutdown` to allow the stream time to + gracefully shutdown. + + The default implementation emits a ``shutdown`` signal before + invoking :meth:`on_disconnect`. + """ + fire(self, 'shutdown') + self.protocol.on_shutdown(broker) + + def on_disconnect(self, broker): + """ + Invoked by :class:`Broker` to force disconnect the stream during + shutdown, invoked by the default :meth:`on_shutdown` implementation, + and usually invoked by any subclass :meth:`on_receive` implementation + in response to a 0-byte read. + + The base implementation fires a ``disconnect`` event, then closes + :attr:`receive_side` and :attr:`transmit_side` after unregistering the + stream from the broker. + """ + fire(self, 'disconnect') + self.protocol.on_disconnect(broker) + + +class Protocol(object): + """ + Implement the program behaviour associated with activity on a + :class:`Stream`. The protocol in use may vary over a stream's life, for + example to allow :class:`mitogen.parent.BootstrapProtocol` to initialize + the connected child before handing it off to :class:`MitogenProtocol`. A + stream's active protocol is tracked in the :attr:`Stream.protocol` + attribute, and modified via :meth:`Stream.set_protocol`. + + Protocols do not handle IO, they are entirely reliant on the interface + provided by :class:`Stream` and :class:`Side`, allowing the underlying IO + implementation to be replaced without modifying behavioural logic. + """ + stream_class = Stream + + #: The :class:`Stream` this protocol is currently bound to, or + #: :data:`None`. + stream = None + + #: The size of the read buffer used by :class:`Stream` when this is the + #: active protocol for the stream. + read_size = CHUNK_SIZE + + @classmethod + def build_stream(cls, *args, **kwargs): + stream = cls.stream_class() + stream.set_protocol(cls(*args, **kwargs)) + return stream + + def __repr__(self): + return '%s(%s)' % ( + self.__class__.__name__, + self.stream and self.stream.name, + ) + + def on_shutdown(self, broker): + _v and LOG.debug('%r: shutting down', self) + self.stream.on_disconnect(broker) + + def on_disconnect(self, broker): + # Normally both sides an FD, so it is important that tranmit_side is + # deregistered from Poller before closing the receive side, as pollers + # like epoll and kqueue unregister all events on FD close, causing + # subsequent attempt to unregister the transmit side to fail. + LOG.debug('%r: disconnecting', self) + broker.stop_receive(self.stream) + if self.stream.transmit_side: + broker._stop_transmit(self.stream) + + self.stream.receive_side.close() + if self.stream.transmit_side: + self.stream.transmit_side.close() + + +class DelimitedProtocol(Protocol): + """ + Provide a :meth:`Protocol.on_receive` implementation for protocols that are + delimited by a fixed string, like text based protocols. Each message is + passed to :meth:`on_line_received` as it arrives, with incomplete messages + passed to :meth:`on_partial_line_received`. + + When emulating user input it is often necessary to respond to incomplete + lines, such as when a "Password: " prompt is sent. + :meth:`on_partial_line_received` may be called repeatedly with an + increasingly complete message. When a complete message is finally received, + :meth:`on_line_received` will be called once for it before the buffer is + discarded. + + If :func:`on_line_received` returns :data:`False`, remaining data is passed + unprocessed to the stream's current protocol's :meth:`on_receive`. This + allows switching from line-oriented to binary while the input buffer + contains both kinds of data. + """ + #: The delimiter. Defaults to newline. + delimiter = b('\n') + _trailer = b('') + + def on_receive(self, broker, buf): + _vv and IOLOG.debug('%r.on_receive()', self) + stream = self.stream + self._trailer, cont = mitogen.core.iter_split( + buf=self._trailer + buf, + delim=self.delimiter, + func=self.on_line_received, + ) + + if self._trailer: + if cont: + self.on_partial_line_received(self._trailer) + else: + assert stream.protocol is not self, \ + 'stream protocol is no longer %r' % (self,) + stream.protocol.on_receive(broker, self._trailer) + + def on_line_received(self, line): + """ + Receive a line from the stream. + + :param bytes line: + The encoded line, excluding the delimiter. + :returns: + :data:`False` to indicate this invocation modified the stream's + active protocol, and any remaining buffered data should be passed + to the new protocol's :meth:`on_receive` method. + + Any other return value is ignored. + """ + pass + + def on_partial_line_received(self, line): + """ + Receive a trailing unterminated partial line from the stream. + + :param bytes line: + The encoded partial line. + """ + pass + + +class BufferedWriter(object): + """ + Implement buffered output while avoiding quadratic string operations. This + is currently constructed by each protocol, in future it may become fixed + for each stream instead. + """ + def __init__(self, broker, protocol): + self._broker = broker + self._protocol = protocol + self._buf = collections.deque() + self._len = 0 + + def write(self, s): + """ + Transmit `s` immediately, falling back to enqueuing it and marking the + stream writeable if no OS buffer space is available. + """ + if not self._len: + # Modifying epoll/Kqueue state is expensive, as are needless broker + # loops. Rather than wait for writeability, just write immediately, + # and fall back to the broker loop on error or full buffer. + try: + n = self._protocol.stream.transmit_side.write(s) + if n: + if n == len(s): + return + s = s[n:] + except OSError: + pass + + self._broker._start_transmit(self._protocol.stream) + self._buf.append(s) + self._len += len(s) + + def on_transmit(self, broker): + """ + Respond to stream writeability by retrying previously buffered + :meth:`write` calls. + """ + if self._buf: + buf = self._buf.popleft() + written = self._protocol.stream.transmit_side.write(buf) + if not written: + _v and LOG.debug('disconnected during write to %r', self) + self._protocol.stream.on_disconnect(broker) + return + elif written != len(buf): + self._buf.appendleft(BufferType(buf, written)) + + _vv and IOLOG.debug('transmitted %d bytes to %r', written, self) + self._len -= written + + if not self._buf: + broker._stop_transmit(self._protocol.stream) + + +class Side(object): + """ + Represent one side of a :class:`Stream`. This allows unidirectional (e.g. + pipe) and bidirectional (e.g. socket) streams to operate identically. + + Sides are also responsible for tracking the open/closed state of the + underlying FD, preventing erroneous duplicate calls to :func:`os.close` due + to duplicate :meth:`Stream.on_disconnect` calls, which would otherwise risk + silently succeeding by closing an unrelated descriptor. For this reason, it + is crucial only one file object exists per unique descriptor. + + :param mitogen.core.Stream stream: + The stream this side is associated with. + :param object fp: + The file or socket object managing the underlying file descriptor. Any + object may be used that supports `fileno()` and `close()` methods. + :param bool cloexec: + If :data:`True`, the descriptor has its :data:`fcntl.FD_CLOEXEC` flag + enabled using :func:`fcntl.fcntl`. + :param bool keep_alive: + If :data:`True`, the continued existence of this side will extend the + shutdown grace period until it has been unregistered from the broker. + :param bool blocking: + If :data:`False`, the descriptor has its :data:`os.O_NONBLOCK` flag + enabled using :func:`fcntl.fcntl`. + """ + _fork_refs = weakref.WeakValueDictionary() + closed = False + + def __init__(self, stream, fp, cloexec=True, keep_alive=True, blocking=False): + #: The :class:`Stream` for which this is a read or write side. + self.stream = stream + # File or socket object responsible for the lifetime of its underlying + # file descriptor. + self.fp = fp + #: Integer file descriptor to perform IO on, or :data:`None` if + #: :meth:`close` has been called. This is saved separately from the + #: file object, since :meth:`file.fileno` cannot be called on it after + #: it has been closed. + self.fd = fp.fileno() + #: If :data:`True`, causes presence of this side in + #: :class:`Broker`'s active reader set to defer shutdown until the + #: side is disconnected. + self.keep_alive = keep_alive + self._fork_refs[id(self)] = self + if cloexec: + set_cloexec(self.fd) + if not blocking: + set_nonblock(self.fd) + + def __repr__(self): + return '' % ( + self.stream.name or repr(self.stream), + self.fd + ) + + @classmethod + def _on_fork(cls): + while cls._fork_refs: + _, side = cls._fork_refs.popitem() + _vv and IOLOG.debug('Side._on_fork() closing %r', side) + side.close() + + def close(self): + """ + Call :meth:`file.close` on :attr:`fp` if it is not :data:`None`, + then set it to :data:`None`. + """ + _vv and IOLOG.debug('%r.close()', self) + if not self.closed: + self.closed = True + self.fp.close() + + def read(self, n=CHUNK_SIZE): + """ + Read up to `n` bytes from the file descriptor, wrapping the underlying + :func:`os.read` call with :func:`io_op` to trap common disconnection + conditions. + + :meth:`read` always behaves as if it is reading from a regular UNIX + file; socket, pipe, and TTY disconnection errors are masked and result + in a 0-sized read like a regular file. + + :returns: + Bytes read, or the empty string to indicate disconnection was + detected. + """ + if self.closed: + # Refuse to touch the handle after closed, it may have been reused + # by another thread. TODO: synchronize read()/write()/close(). + return b('') + s, disconnected = io_op(os.read, self.fd, n) + if disconnected: + LOG.debug('%r: disconnected during read: %s', self, disconnected) + return b('') + return s + + def write(self, s): + """ + Write as much of the bytes from `s` as possible to the file descriptor, + wrapping the underlying :func:`os.write` call with :func:`io_op` to + trap common disconnection conditions. + + :returns: + Number of bytes written, or :data:`None` if disconnection was + detected. + """ + if self.closed: + # Don't touch the handle after close, it may be reused elsewhere. + return None + + written, disconnected = io_op(os.write, self.fd, s) + if disconnected: + LOG.debug('%r: disconnected during write: %s', self, disconnected) + return None + return written + + +class MitogenProtocol(Protocol): + """ + :class:`Protocol` implementing mitogen's :ref:`stream protocol + `. + """ + #: If not :data:`False`, indicates the stream has :attr:`auth_id` set and + #: its value is the same as :data:`mitogen.context_id` or appears in + #: :data:`mitogen.parent_ids`. + is_privileged = False + + #: Invoked as `on_message(stream, msg)` each message received from the + #: peer. + on_message = None + + def __init__(self, router, remote_id, auth_id=None, + local_id=None, parent_ids=None): + self._router = router + self.remote_id = remote_id + #: If not :data:`None`, :class:`Router` stamps this into + #: :attr:`Message.auth_id` of every message received on this stream. + self.auth_id = auth_id + + if parent_ids is None: + parent_ids = mitogen.parent_ids + if local_id is None: + local_id = mitogen.context_id + + self.is_privileged = ( + (remote_id in parent_ids) or + auth_id in ([local_id] + parent_ids) + ) + self.sent_modules = set(['mitogen', 'mitogen.core']) + self._input_buf = collections.deque() + self._input_buf_len = 0 + self._writer = BufferedWriter(router.broker, self) + + #: Routing records the dst_id of every message arriving from this + #: stream. Any arriving DEL_ROUTE is rebroadcast for any such ID. + self.egress_ids = set() + + def on_receive(self, broker, buf): + """ + Handle the next complete message on the stream. Raise + :class:`StreamError` on failure. + """ + _vv and IOLOG.debug('%r.on_receive()', self) + if self._input_buf and self._input_buf_len < 128: + self._input_buf[0] += buf + else: + self._input_buf.append(buf) + + self._input_buf_len += len(buf) + while self._receive_one(broker): + pass + + corrupt_msg = ( + '%s: Corruption detected: frame signature incorrect. This likely means' + ' some external process is interfering with the connection. Received:' + '\n\n' + '%r' + ) + + def _receive_one(self, broker): + if self._input_buf_len < Message.HEADER_LEN: + return False + + msg = Message() + msg.router = self._router + (magic, msg.dst_id, msg.src_id, msg.auth_id, + msg.handle, msg.reply_to, msg_len) = struct.unpack( + Message.HEADER_FMT, + self._input_buf[0][:Message.HEADER_LEN], + ) + + if magic != Message.HEADER_MAGIC: + LOG.error(self.corrupt_msg, self.stream.name, self._input_buf[0][:2048]) + self.stream.on_disconnect(broker) + return False + + if msg_len > self._router.max_message_size: + LOG.error('%r: Maximum message size exceeded (got %d, max %d)', + self, msg_len, self._router.max_message_size) + self.stream.on_disconnect(broker) + return False + + total_len = msg_len + Message.HEADER_LEN + if self._input_buf_len < total_len: + _vv and IOLOG.debug( + '%r: Input too short (want %d, got %d)', + self, msg_len, self._input_buf_len - Message.HEADER_LEN + ) + return False + + start = Message.HEADER_LEN + prev_start = start + remain = total_len + bits = [] + while remain: + buf = self._input_buf.popleft() + bit = buf[start:remain] + bits.append(bit) + remain -= len(bit) + start + prev_start = start + start = 0 + + msg.data = b('').join(bits) + self._input_buf.appendleft(buf[prev_start+len(bit):]) + self._input_buf_len -= total_len + self._router._async_route(msg, self.stream) + return True + + def pending_bytes(self): + """ + Return the number of bytes queued for transmission on this stream. This + can be used to limit the amount of data buffered in RAM by an otherwise + unlimited consumer. + + For an accurate result, this method should be called from the Broker + thread, for example by using :meth:`Broker.defer_sync`. + """ + return self._writer._len + + def on_transmit(self, broker): + """ + Transmit buffered messages. + """ + _vv and IOLOG.debug('%r.on_transmit()', self) + self._writer.on_transmit(broker) + + def _send(self, msg): + _vv and IOLOG.debug('%r._send(%r)', self, msg) + self._writer.write(msg.pack()) + + def send(self, msg): + """ + Send `data` to `handle`, and tell the broker we have output. May be + called from any thread. + """ + self._router.broker.defer(self._send, msg) + + def on_shutdown(self, broker): + """ + Disable :class:`Protocol` immediate disconnect behaviour. + """ + _v and LOG.debug('%r: shutting down', self) + + +class Context(object): + """ + Represent a remote context regardless of the underlying connection method. + Context objects are simple facades that emit messages through an + associated router, and have :ref:`signals` raised against them in response + to various events relating to the context. + + **Note:** This is the somewhat limited core version, used by child + contexts. The master subclass is documented below this one. + + Contexts maintain no internal state and are thread-safe. + + Prefer :meth:`Router.context_by_id` over constructing context objects + explicitly, as that method is deduplicating, and returns the only context + instance :ref:`signals` will be raised on. + + :param mitogen.core.Router router: + Router to emit messages through. + :param int context_id: + Context ID. + :param str name: + Context name. + """ + name = None + remote_name = None + + def __init__(self, router, context_id, name=None): + self.router = router + self.context_id = context_id + if name: + self.name = to_text(name) + + def __reduce__(self): + return _unpickle_context, (self.context_id, self.name) + + def on_disconnect(self): + _v and LOG.debug('%r: disconnecting', self) + fire(self, 'disconnect') + + def send_async(self, msg, persist=False): + """ + Arrange for `msg` to be delivered to this context, with replies + directed to a newly constructed receiver. :attr:`dst_id + ` is set to the target context ID, and :attr:`reply_to + ` is set to the newly constructed receiver's handle. + + :param bool persist: + If :data:`False`, the handler will be unregistered after a single + message has been received. + + :param mitogen.core.Message msg: + The message. + + :returns: + :class:`Receiver` configured to receive any replies sent to the + message's `reply_to` handle. + """ + receiver = Receiver(self.router, persist=persist, respondent=self) + msg.dst_id = self.context_id + msg.reply_to = receiver.handle + + _v and LOG.debug('sending message to %r: %r', self, msg) + self.send(msg) + return receiver + + def call_service_async(self, service_name, method_name, **kwargs): + if isinstance(service_name, BytesType): + service_name = service_name.encode('utf-8') + elif not isinstance(service_name, UnicodeType): + service_name = service_name.name() # Service.name() + _v and LOG.debug('calling service %s.%s of %r, args: %r', + service_name, method_name, self, kwargs) + tup = (service_name, to_text(method_name), Kwargs(kwargs)) + msg = Message.pickled(tup, handle=CALL_SERVICE) + return self.send_async(msg) + + def send(self, msg): + """ + Arrange for `msg` to be delivered to this context. :attr:`dst_id + ` is set to the target context ID. + + :param Message msg: + Message. + """ + msg.dst_id = self.context_id + self.router.route(msg) + + def call_service(self, service_name, method_name, **kwargs): + recv = self.call_service_async(service_name, method_name, **kwargs) + return recv.get().unpickle() + + def send_await(self, msg, deadline=None): + """ + Like :meth:`send_async`, but expect a single reply (`persist=False`) + delivered within `deadline` seconds. + + :param mitogen.core.Message msg: + The message. + :param float deadline: + If not :data:`None`, seconds before timing out waiting for a reply. + :returns: + Deserialized reply. + :raises TimeoutError: + No message was received and `deadline` passed. + """ + receiver = self.send_async(msg) + response = receiver.get(deadline) + data = response.unpickle() + _vv and IOLOG.debug('%r._send_await() -> %r', self, data) + return data + + def __repr__(self): + return 'Context(%s, %r)' % (self.context_id, self.name) + + +def _unpickle_context(context_id, name, router=None): + if not (isinstance(context_id, (int, long)) and context_id >= 0 and ( + (name is None) or + (isinstance(name, UnicodeType) and len(name) < 100)) + ): + raise TypeError('cannot unpickle Context: bad input') + + if isinstance(router, Router): + return router.context_by_id(context_id, name=name) + return Context(None, context_id, name) # For plain Jane pickle. + + +class Poller(object): + """ + A poller manages OS file descriptors the user is waiting to become + available for IO. The :meth:`poll` method blocks the calling thread + until one or more become ready. + + Each descriptor has an associated `data` element, which is unique for each + readiness type, and defaults to being the same as the file descriptor. The + :meth:`poll` method yields the data associated with a descriptor, rather + than the descriptor itself, allowing concise loops like:: + + p = Poller() + p.start_receive(conn.fd, data=conn.on_read) + p.start_transmit(conn.fd, data=conn.on_write) + + for callback in p.poll(): + callback() # invoke appropriate bound instance method + + Pollers may be modified while :meth:`poll` is yielding results. Removals + are processed immediately, causing pending events for the descriptor to be + discarded. + + The :meth:`close` method must be called when a poller is discarded to avoid + a resource leak. + + Pollers may only be used by one thread at a time. + + This implementation uses :func:`select.select` for wider platform support. + That is considered an implementation detail. Previous versions have used + :func:`select.poll`. Future versions may decide at runtime. + """ + SUPPORTED = True + + #: Increments on every poll(). Used to version _rfds and _wfds. + _generation = 1 + + def __init__(self): + self._rfds = {} + self._wfds = {} + + def __repr__(self): + return '%s' % (type(self).__name__,) + + def _update(self, fd): + """ + Required by PollPoller subclass. + """ + pass + + @property + def readers(self): + """ + Return a list of `(fd, data)` tuples for every FD registered for + receive readiness. + """ + return list((fd, data) for fd, (data, gen) in self._rfds.items()) + + @property + def writers(self): + """ + Return a list of `(fd, data)` tuples for every FD registered for + transmit readiness. + """ + return list((fd, data) for fd, (data, gen) in self._wfds.items()) + + def close(self): + """ + Close any underlying OS resource used by the poller. + """ + pass + + def start_receive(self, fd, data=None): + """ + Cause :meth:`poll` to yield `data` when `fd` is readable. + """ + self._rfds[fd] = (data or fd, self._generation) + self._update(fd) + + def stop_receive(self, fd): + """ + Stop yielding readability events for `fd`. + + Redundant calls to :meth:`stop_receive` are silently ignored, this may + change in future. + """ + self._rfds.pop(fd, None) + self._update(fd) + + def start_transmit(self, fd, data=None): + """ + Cause :meth:`poll` to yield `data` when `fd` is writeable. + """ + self._wfds[fd] = (data or fd, self._generation) + self._update(fd) + + def stop_transmit(self, fd): + """ + Stop yielding writeability events for `fd`. + + Redundant calls to :meth:`stop_transmit` are silently ignored, this may + change in future. + """ + self._wfds.pop(fd, None) + self._update(fd) + + def _poll(self, timeout): + (rfds, wfds, _), _ = io_op(select.select, + self._rfds, + self._wfds, + (), timeout + ) + + for fd in rfds: + _vv and IOLOG.debug('%r: POLLIN for %r', self, fd) + data, gen = self._rfds.get(fd, (None, None)) + if gen and gen < self._generation: + yield data + + for fd in wfds: + _vv and IOLOG.debug('%r: POLLOUT for %r', self, fd) + data, gen = self._wfds.get(fd, (None, None)) + if gen and gen < self._generation: + yield data + + def poll(self, timeout=None): + """ + Block the calling thread until one or more FDs are ready for IO. + + :param float timeout: + If not :data:`None`, seconds to wait without an event before + returning an empty iterable. + :returns: + Iterable of `data` elements associated with ready FDs. + """ + _vv and IOLOG.debug('%r.poll(%r)', self, timeout) + self._generation += 1 + return self._poll(timeout) + + +class Latch(object): + """ + A latch is a :class:`Queue.Queue`-like object that supports mutation and + waiting from multiple threads, however unlike :class:`Queue.Queue`, + waiting threads always remain interruptible, so CTRL+C always succeeds, and + waits where a timeout is set experience no wake up latency. These + properties are not possible in combination using the built-in threading + primitives available in Python 2.x. + + Latches implement queues using the UNIX self-pipe trick, and a per-thread + :func:`socket.socketpair` that is lazily created the first time any + latch attempts to sleep on a thread, and dynamically associated with the + waiting Latch only for duration of the wait. + + See :ref:`waking-sleeping-threads` for further discussion. + """ + #: The :class:`Poller` implementation to use. Instances are short lived so + #: prefer :class:`mitogen.parent.PollPoller` if it's available, otherwise + #: :class:`mitogen.core.Poller`. They don't need syscalls to create, + #: configure, or destroy. Replaced during import of :mod:`mitogen.parent`. + poller_class = Poller + + #: If not :data:`None`, a function invoked as `notify(latch)` after a + #: successful call to :meth:`put`. The function is invoked on the + #: :meth:`put` caller's thread, which may be the :class:`Broker` thread, + #: therefore it must not block. Used by :class:`mitogen.select.Select` to + #: efficiently implement waiting on multiple event sources. + notify = None + + # The _cls_ prefixes here are to make it crystal clear in the code which + # state mutation isn't covered by :attr:`_lock`. + + #: List of reusable :func:`socket.socketpair` tuples. The list is mutated + #: from multiple threads, the only safe operations are `append()` and + #: `pop()`. + _cls_idle_socketpairs = [] + + #: List of every socket object that must be closed by :meth:`_on_fork`. + #: Inherited descriptors cannot be reused, as the duplicated handles + #: reference the same underlying kernel object in use by the parent. + _cls_all_sockets = [] + + def __init__(self): + self.closed = False + self._lock = threading.Lock() + #: List of unconsumed enqueued items. + self._queue = [] + #: List of `(wsock, cookie)` awaiting an element, where `wsock` is the + #: socketpair's write side, and `cookie` is the string to write. + self._sleeping = [] + #: Number of elements of :attr:`_sleeping` that have already been + #: woken, and have a corresponding element index from :attr:`_queue` + #: assigned to them. + self._waking = 0 + + @classmethod + def _on_fork(cls): + """ + Clean up any files belonging to the parent process after a fork. + """ + cls._cls_idle_socketpairs = [] + while cls._cls_all_sockets: + cls._cls_all_sockets.pop().close() + + def close(self): + """ + Mark the latch as closed, and cause every sleeping thread to be woken, + with :class:`mitogen.core.LatchError` raised in each thread. + """ + self._lock.acquire() + try: + self.closed = True + while self._waking < len(self._sleeping): + wsock, cookie = self._sleeping[self._waking] + self._wake(wsock, cookie) + self._waking += 1 + finally: + self._lock.release() + + def size(self): + """ + Return the number of items currently buffered. + + As with :class:`Queue.Queue`, `0` may be returned even though a + subsequent call to :meth:`get` will succeed, since a message may be + posted at any moment between :meth:`size` and :meth:`get`. + + As with :class:`Queue.Queue`, `>0` may be returned even though a + subsequent call to :meth:`get` will block, since another waiting thread + may be woken at any moment between :meth:`size` and :meth:`get`. + + :raises LatchError: + The latch has already been marked closed. + """ + self._lock.acquire() + try: + if self.closed: + raise LatchError() + return len(self._queue) + finally: + self._lock.release() + + def empty(self): + """ + Return `size() == 0`. + + .. deprecated:: 0.2.8 + Use :meth:`size` instead. + + :raises LatchError: + The latch has already been marked closed. + """ + return self.size() == 0 + + def _get_socketpair(self): + """ + Return an unused socketpair, creating one if none exist. + """ + try: + return self._cls_idle_socketpairs.pop() # pop() must be atomic + except IndexError: + rsock, wsock = socket.socketpair() + rsock.setblocking(False) + set_cloexec(rsock.fileno()) + set_cloexec(wsock.fileno()) + self._cls_all_sockets.extend((rsock, wsock)) + return rsock, wsock + + COOKIE_MAGIC, = struct.unpack('L', b('LTCH') * (struct.calcsize('L')//4)) + COOKIE_FMT = '>Qqqq' # #545: id() and get_ident() may exceed long on armhfp. + COOKIE_SIZE = struct.calcsize(COOKIE_FMT) + + def _make_cookie(self): + """ + Return a string encoding the ID of the process, instance and thread. + This disambiguates legitimate wake-ups, accidental writes to the FD, + and buggy internal FD sharing. + """ + return struct.pack(self.COOKIE_FMT, self.COOKIE_MAGIC, + os.getpid(), id(self), thread.get_ident()) + + def get(self, timeout=None, block=True): + """ + Return the next enqueued object, or sleep waiting for one. + + :param float timeout: + If not :data:`None`, specifies a timeout in seconds. + + :param bool block: + If :data:`False`, immediately raise + :class:`mitogen.core.TimeoutError` if the latch is empty. + + :raises mitogen.core.LatchError: + :meth:`close` has been called, and the object is no longer valid. + + :raises mitogen.core.TimeoutError: + Timeout was reached. + + :returns: + The de-queued object. + """ + _vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', + self, timeout, block) + self._lock.acquire() + try: + if self.closed: + raise LatchError() + i = len(self._sleeping) + if len(self._queue) > i: + _vv and IOLOG.debug('%r.get() -> %r', self, self._queue[i]) + return self._queue.pop(i) + if not block: + raise TimeoutError() + rsock, wsock = self._get_socketpair() + cookie = self._make_cookie() + self._sleeping.append((wsock, cookie)) + finally: + self._lock.release() + + poller = self.poller_class() + poller.start_receive(rsock.fileno()) + try: + return self._get_sleep(poller, timeout, block, rsock, wsock, cookie) + finally: + poller.close() + + def _get_sleep(self, poller, timeout, block, rsock, wsock, cookie): + """ + When a result is not immediately available, sleep waiting for + :meth:`put` to write a byte to our socket pair. + """ + _vv and IOLOG.debug( + '%r._get_sleep(timeout=%r, block=%r, fd=%d/%d)', + self, timeout, block, rsock.fileno(), wsock.fileno() + ) + + e = None + try: + list(poller.poll(timeout)) + except Exception: + e = sys.exc_info()[1] + + self._lock.acquire() + try: + i = self._sleeping.index((wsock, cookie)) + del self._sleeping[i] + + try: + got_cookie = rsock.recv(self.COOKIE_SIZE) + except socket.error: + e2 = sys.exc_info()[1] + if e2.args[0] == errno.EAGAIN: + e = TimeoutError() + else: + e = e2 + + self._cls_idle_socketpairs.append((rsock, wsock)) + if e: + raise e + + assert cookie == got_cookie, ( + "Cookie incorrect; got %r, expected %r" + % (binascii.hexlify(got_cookie), + binascii.hexlify(cookie)) + ) + assert i < self._waking, ( + "Cookie correct, but no queue element assigned." + ) + self._waking -= 1 + if self.closed: + raise LatchError() + _vv and IOLOG.debug('%r.get() wake -> %r', self, self._queue[i]) + return self._queue.pop(i) + finally: + self._lock.release() + + def put(self, obj=None): + """ + Enqueue an object, waking the first thread waiting for a result, if one + exists. + + :param obj: + Object to enqueue. Defaults to :data:`None` as a convenience when + using :class:`Latch` only for synchronization. + :raises mitogen.core.LatchError: + :meth:`close` has been called, and the object is no longer valid. + """ + _vv and IOLOG.debug('%r.put(%r)', self, obj) + self._lock.acquire() + try: + if self.closed: + raise LatchError() + self._queue.append(obj) + + wsock = None + if self._waking < len(self._sleeping): + wsock, cookie = self._sleeping[self._waking] + self._waking += 1 + _vv and IOLOG.debug('%r.put() -> waking wfd=%r', + self, wsock.fileno()) + elif self.notify: + self.notify(self) + finally: + self._lock.release() + + if wsock: + self._wake(wsock, cookie) + + def _wake(self, wsock, cookie): + written, disconnected = io_op(os.write, wsock.fileno(), cookie) + assert written == len(cookie) and not disconnected + + def __repr__(self): + return 'Latch(%#x, size=%d, t=%r)' % ( + id(self), + len(self._queue), + threading__thread_name(threading__current_thread()), + ) + + +class Waker(Protocol): + """ + :class:`Protocol` implementing the `UNIX self-pipe trick`_. Used to wake + :class:`Broker` when another thread needs to modify its state, by enqueing + a function call to run on the :class:`Broker` thread. + + .. _UNIX self-pipe trick: https://cr.yp.to/docs/selfpipe.html + """ + read_size = 1 + broker_ident = None + + @classmethod + def build_stream(cls, broker): + stream = super(Waker, cls).build_stream(broker) + stream.accept(*pipe()) + return stream + + def __init__(self, broker): + self._broker = broker + self._deferred = collections.deque() + + def __repr__(self): + return 'Waker(fd=%r/%r)' % ( + self.stream.receive_side and self.stream.receive_side.fd, + self.stream.transmit_side and self.stream.transmit_side.fd, + ) + + @property + def keep_alive(self): + """ + Prevent immediate Broker shutdown while deferred functions remain. + """ + return len(self._deferred) + + def on_receive(self, broker, buf): + """ + Drain the pipe and fire callbacks. Since :attr:`_deferred` is + synchronized, :meth:`defer` and :meth:`on_receive` can conspire to + ensure only one byte needs to be pending regardless of queue length. + """ + _vv and IOLOG.debug('%r.on_receive()', self) + while True: + try: + func, args, kwargs = self._deferred.popleft() + except IndexError: + return + + try: + func(*args, **kwargs) + except Exception: + LOG.exception('defer() crashed: %r(*%r, **%r)', + func, args, kwargs) + broker.shutdown() + + def _wake(self): + """ + Wake the multiplexer by writing a byte. If Broker is midway through + teardown, the FD may already be closed, so ignore EBADF. + """ + try: + self.stream.transmit_side.write(b(' ')) + except OSError: + e = sys.exc_info()[1] + if e.args[0] not in (errno.EBADF, errno.EWOULDBLOCK): + raise + + broker_shutdown_msg = ( + "An attempt was made to enqueue a message with a Broker that has " + "already exitted. It is likely your program called Broker.shutdown() " + "too early." + ) + + def defer(self, func, *args, **kwargs): + """ + Arrange for `func()` to execute on the broker thread. This function + returns immediately without waiting the result of `func()`. Use + :meth:`defer_sync` to block until a result is available. + + :raises mitogen.core.Error: + :meth:`defer` was called after :class:`Broker` has begun shutdown. + """ + if thread.get_ident() == self.broker_ident: + _vv and IOLOG.debug('%r.defer() [immediate]', self) + return func(*args, **kwargs) + if self._broker._exitted: + raise Error(self.broker_shutdown_msg) + + _vv and IOLOG.debug('%r.defer() [fd=%r]', self, + self.stream.transmit_side.fd) + self._deferred.append((func, args, kwargs)) + self._wake() + + +class IoLoggerProtocol(DelimitedProtocol): + """ + Attached to one end of a socket pair whose other end overwrites one of the + standard ``stdout`` or ``stderr`` file descriptors in a child context. + Received data is split up into lines, decoded as UTF-8 and logged to the + :mod:`logging` package as either the ``stdout`` or ``stderr`` logger. + + Logging in child contexts is in turn forwarded to the master process using + :class:`LogHandler`. + """ + @classmethod + def build_stream(cls, name, dest_fd): + """ + Even though the file descriptor `dest_fd` will hold the opposite end of + the socket open, we must keep a separate dup() of it (i.e. wsock) in + case some code decides to overwrite `dest_fd` later, which would + prevent break :meth:`on_shutdown` from calling :meth:`shutdown() + ` on it. + """ + rsock, wsock = socket.socketpair() + os.dup2(wsock.fileno(), dest_fd) + stream = super(IoLoggerProtocol, cls).build_stream(name) + stream.name = name + stream.accept(rsock, wsock) + return stream + + def __init__(self, name): + self._log = logging.getLogger(name) + # #453: prevent accidental log initialization in a child creating a + # feedback loop. + self._log.propagate = False + self._log.handlers = logging.getLogger().handlers[:] + + def on_shutdown(self, broker): + """ + Shut down the write end of the socket, preventing any further writes to + it by this process, or subprocess that inherited it. This allows any + remaining kernel-buffered data to be drained during graceful shutdown + without the buffer continuously refilling due to some out of control + child process. + """ + _v and LOG.debug('%r: shutting down', self) + if not IS_WSL: + # #333: WSL generates invalid readiness indication on shutdown(). + # This modifies the *kernel object* inherited by children, causing + # EPIPE on subsequent writes to any dupped FD in any process. The + # read side can then drain completely of prior buffered data. + self.stream.transmit_side.fp.shutdown(socket.SHUT_WR) + self.stream.transmit_side.close() + + def on_line_received(self, line): + """ + Decode the received line as UTF-8 and pass it to the logging framework. + """ + self._log.info('%s', line.decode('utf-8', 'replace')) + + +class Router(object): + """ + Route messages between contexts, and invoke local handlers for messages + addressed to this context. :meth:`Router.route() ` straddles the + :class:`Broker` thread and user threads, it is safe to call anywhere. + + **Note:** This is the somewhat limited core version of the Router class + used by child contexts. The master subclass is documented below this one. + """ + #: The :class:`mitogen.core.Context` subclass to use when constructing new + #: :class:`Context` objects in :meth:`myself` and :meth:`context_by_id`. + #: Permits :class:`Router` subclasses to extend the :class:`Context` + #: interface, as done in :class:`mitogen.parent.Router`. + context_class = Context + + max_message_size = 128 * 1048576 + + #: When :data:`True`, permit children to only communicate with the current + #: context or a parent of the current context. Routing between siblings or + #: children of parents is prohibited, ensuring no communication is possible + #: between intentionally partitioned networks, such as when a program + #: simultaneously manipulates hosts spread across a corporate and a + #: production network, or production networks that are otherwise + #: air-gapped. + #: + #: Sending a prohibited message causes an error to be logged and a dead + #: message to be sent in reply to the errant message, if that message has + #: ``reply_to`` set. + #: + #: The value of :data:`unidirectional` becomes the default for the + #: :meth:`local() ` `unidirectional` + #: parameter. + unidirectional = False + + duplicate_handle_msg = 'cannot register a handle that already exists' + refused_msg = 'refused by policy' + invalid_handle_msg = 'invalid handle' + too_large_msg = 'message too large (max %d bytes)' + respondent_disconnect_msg = 'the respondent Context has disconnected' + broker_exit_msg = 'Broker has exitted' + no_route_msg = 'no route to %r, my ID is %r' + unidirectional_msg = ( + 'routing mode prevents forward of message from context %d to ' + 'context %d via context %d' + ) + + def __init__(self, broker): + self.broker = broker + listen(broker, 'exit', self._on_broker_exit) + self._setup_logging() + + self._write_lock = threading.Lock() + #: context ID -> Stream; must hold _write_lock to edit or iterate + self._stream_by_id = {} + #: List of contexts to notify of shutdown; must hold _write_lock + self._context_by_id = {} + self._last_handle = itertools.count(1000) + #: handle -> (persistent?, func(msg)) + self._handle_map = {} + #: Context -> set { handle, .. } + self._handles_by_respondent = {} + self.add_handler(self._on_del_route, DEL_ROUTE) + + def __repr__(self): + return 'Router(%r)' % (self.broker,) + + def _setup_logging(self): + """ + This is done in the :class:`Router` constructor for historical reasons. + It must be called before ExternalContext logs its first messages, but + after logging has been setup. It must also be called when any router is + constructed for a consumer app. + """ + # Here seems as good a place as any. + global _v, _vv + _v = logging.getLogger().level <= logging.DEBUG + _vv = IOLOG.level <= logging.DEBUG + + def _on_del_route(self, msg): + """ + Stub :data:`DEL_ROUTE` handler; fires 'disconnect' events on the + corresponding :attr:`_context_by_id` member. This is replaced by + :class:`mitogen.parent.RouteMonitor` in an upgraded context. + """ + if msg.is_dead: + return + + target_id_s, _, name = bytes_partition(msg.data, b(':')) + target_id = int(target_id_s, 10) + LOG.error('%r: deleting route to %s (%d)', + self, to_text(name), target_id) + context = self._context_by_id.get(target_id) + if context: + fire(context, 'disconnect') + else: + LOG.debug('DEL_ROUTE for unknown ID %r: %r', target_id, msg) + + def _on_stream_disconnect(self, stream): + notify = [] + self._write_lock.acquire() + try: + for context in list(self._context_by_id.values()): + stream_ = self._stream_by_id.get(context.context_id) + if stream_ is stream: + del self._stream_by_id[context.context_id] + notify.append(context) + finally: + self._write_lock.release() + + # Happens outside lock as e.g. RouteMonitor wants the same lock. + for context in notify: + context.on_disconnect() + + def _on_broker_exit(self): + """ + Called prior to broker exit, informs callbacks registered with + :meth:`add_handler` the connection is dead. + """ + _v and LOG.debug('%r: broker has exitted', self) + while self._handle_map: + _, (_, func, _, _) = self._handle_map.popitem() + func(Message.dead(self.broker_exit_msg)) + + def myself(self): + """ + Return a :class:`Context` referring to the current process. Since + :class:`Context` is serializable, this is convenient to use in remote + function call parameter lists. + """ + return self.context_class( + router=self, + context_id=mitogen.context_id, + name='self', + ) + + def context_by_id(self, context_id, via_id=None, create=True, name=None): + """ + Return or construct a :class:`Context` given its ID. An internal + mapping of ID to the canonical :class:`Context` representing that ID, + so that :ref:`signals` can be raised. + + This may be called from any thread, lookup and construction are atomic. + + :param int context_id: + The context ID to look up. + :param int via_id: + If the :class:`Context` does not already exist, set its + :attr:`Context.via` to the :class:`Context` matching this ID. + :param bool create: + If the :class:`Context` does not already exist, create it. + :param str name: + If the :class:`Context` does not already exist, set its name. + + :returns: + :class:`Context`, or return :data:`None` if `create` is + :data:`False` and no :class:`Context` previously existed. + """ + context = self._context_by_id.get(context_id) + if context: + return context + + if create and via_id is not None: + via = self.context_by_id(via_id) + else: + via = None + + self._write_lock.acquire() + try: + context = self._context_by_id.get(context_id) + if create and not context: + context = self.context_class(self, context_id, name=name) + context.via = via + self._context_by_id[context_id] = context + finally: + self._write_lock.release() + + return context + + def register(self, context, stream): + """ + Register a newly constructed context and its associated stream, and add + the stream's receive side to the I/O multiplexer. This method remains + public while the design has not yet settled. + """ + _v and LOG.debug('%s: registering %r to stream %r', + self, context, stream) + self._write_lock.acquire() + try: + self._stream_by_id[context.context_id] = stream + self._context_by_id[context.context_id] = context + finally: + self._write_lock.release() + + self.broker.start_receive(stream) + listen(stream, 'disconnect', lambda: self._on_stream_disconnect(stream)) + + def stream_by_id(self, dst_id): + """ + Return the :class:`Stream` that should be used to communicate with + `dst_id`. If a specific route for `dst_id` is not known, a reference to + the parent context's stream is returned. If the parent is disconnected, + or when running in the master context, return :data:`None` instead. + + This can be used from any thread, but its output is only meaningful + from the context of the :class:`Broker` thread, as disconnection or + replacement could happen in parallel on the broker thread at any + moment. + """ + return ( + self._stream_by_id.get(dst_id) or + self._stream_by_id.get(mitogen.parent_id) + ) + + def del_handler(self, handle): + """ + Remove the handle registered for `handle` + + :raises KeyError: + The handle wasn't registered. + """ + _, _, _, respondent = self._handle_map.pop(handle) + if respondent: + self._handles_by_respondent[respondent].discard(handle) + + def add_handler(self, fn, handle=None, persist=True, + policy=None, respondent=None, + overwrite=False): + """ + Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to + `handle` from this context. Unregister after one invocation if + `persist` is :data:`False`. If `handle` is :data:`None`, a new handle + is allocated and returned. + + :param int handle: + If not :data:`None`, an explicit handle to register, usually one of + the ``mitogen.core.*`` constants. If unspecified, a new unused + handle will be allocated. + + :param bool persist: + If :data:`False`, the handler will be unregistered after a single + message has been received. + + :param mitogen.core.Context respondent: + Context that messages to this handle are expected to be sent from. + If specified, arranges for a dead message to be delivered to `fn` + when disconnection of the context is detected. + + In future `respondent` will likely also be used to prevent other + contexts from sending messages to the handle. + + :param function policy: + Function invoked as `policy(msg, stream)` where `msg` is a + :class:`mitogen.core.Message` about to be delivered, and `stream` + is the :class:`mitogen.core.Stream` on which it was received. The + function must return :data:`True`, otherwise an error is logged and + delivery is refused. + + Two built-in policy functions exist: + + * :func:`has_parent_authority`: requires the message arrived from a + parent context, or a context acting with a parent context's + authority (``auth_id``). + + * :func:`mitogen.parent.is_immediate_child`: requires the + message arrived from an immediately connected child, for use in + messaging patterns where either something becomes buggy or + insecure by permitting indirect upstream communication. + + In case of refusal, and the message's ``reply_to`` field is + nonzero, a :class:`mitogen.core.CallError` is delivered to the + sender indicating refusal occurred. + + :param bool overwrite: + If :data:`True`, allow existing handles to be silently overwritten. + + :return: + `handle`, or if `handle` was :data:`None`, the newly allocated + handle. + :raises Error: + Attemp to register handle that was already registered. + """ + handle = handle or next(self._last_handle) + _vv and IOLOG.debug('%r.add_handler(%r, %r, %r)', self, fn, handle, persist) + if handle in self._handle_map and not overwrite: + raise Error(self.duplicate_handle_msg) + + self._handle_map[handle] = persist, fn, policy, respondent + if respondent: + if respondent not in self._handles_by_respondent: + self._handles_by_respondent[respondent] = set() + listen(respondent, 'disconnect', + lambda: self._on_respondent_disconnect(respondent)) + self._handles_by_respondent[respondent].add(handle) + + return handle + + def _on_respondent_disconnect(self, context): + for handle in self._handles_by_respondent.pop(context, ()): + _, fn, _, _ = self._handle_map[handle] + fn(Message.dead(self.respondent_disconnect_msg)) + del self._handle_map[handle] + + def _maybe_send_dead(self, unreachable, msg, reason, *args): + """ + Send a dead message to either the original sender or the intended + recipient of `msg`, if the original sender was expecting a reply + (because its `reply_to` was set), otherwise assume the message is a + reply of some sort, and send the dead message to the original + destination. + + :param bool unreachable: + If :data:`True`, the recipient is known to be dead or routing + failed due to a security precaution, so don't attempt to fallback + to sending the dead message to the recipient if the original sender + did not include a reply address. + :param mitogen.core.Message msg: + Message that triggered the dead message. + :param str reason: + Human-readable error reason. + :param tuple args: + Elements to interpolate with `reason`. + """ + if args: + reason %= args + LOG.debug('%r: %r is dead: %r', self, msg, reason) + if msg.reply_to and not msg.is_dead: + msg.reply(Message.dead(reason=reason), router=self) + elif not unreachable: + self._async_route( + Message.dead( + dst_id=msg.dst_id, + handle=msg.handle, + reason=reason, + ) + ) + + def _invoke(self, msg, stream): + # IOLOG.debug('%r._invoke(%r)', self, msg) + try: + persist, fn, policy, respondent = self._handle_map[msg.handle] + except KeyError: + self._maybe_send_dead(True, msg, reason=self.invalid_handle_msg) + return + + if respondent and not (msg.is_dead or + msg.src_id == respondent.context_id): + self._maybe_send_dead(True, msg, 'reply from unexpected context') + return + + if policy and not policy(msg, stream): + self._maybe_send_dead(True, msg, self.refused_msg) + return + + if not persist: + self.del_handler(msg.handle) + + try: + fn(msg) + except Exception: + LOG.exception('%r._invoke(%r): %r crashed', self, msg, fn) + + def _async_route(self, msg, in_stream=None): + """ + Arrange for `msg` to be forwarded towards its destination. If its + destination is the local context, then arrange for it to be dispatched + using the local handlers. + + This is a lower overhead version of :meth:`route` that may only be + called from the :class:`Broker` thread. + + :param Stream in_stream: + If not :data:`None`, the stream the message arrived on. Used for + performing source route verification, to ensure sensitive messages + such as ``CALL_FUNCTION`` arrive only from trusted contexts. + """ + _vv and IOLOG.debug('%r._async_route(%r, %r)', self, msg, in_stream) + + if len(msg.data) > self.max_message_size: + self._maybe_send_dead(False, msg, self.too_large_msg % ( + self.max_message_size, + )) + return + + parent_stream = self._stream_by_id.get(mitogen.parent_id) + src_stream = self._stream_by_id.get(msg.src_id, parent_stream) + + # When the ingress stream is known, verify the message was received on + # the same as the stream we would expect to receive messages from the + # src_id and auth_id. This is like Reverse Path Filtering in IP, and + # ensures messages from a privileged context cannot be spoofed by a + # child. + if in_stream: + auth_stream = self._stream_by_id.get(msg.auth_id, parent_stream) + if in_stream != auth_stream: + LOG.error('%r: bad auth_id: got %r via %r, not %r: %r', + self, msg.auth_id, in_stream, auth_stream, msg) + return + + if msg.src_id != msg.auth_id and in_stream != src_stream: + LOG.error('%r: bad src_id: got %r via %r, not %r: %r', + self, msg.src_id, in_stream, src_stream, msg) + return + + # If the stream's MitogenProtocol has auth_id set, copy it to the + # message. This allows subtrees to become privileged by stamping a + # parent's context ID. It is used by mitogen.unix to mark client + # streams (like Ansible WorkerProcess) as having the same rights as + # the parent. + if in_stream.protocol.auth_id is not None: + msg.auth_id = in_stream.protocol.auth_id + if in_stream.protocol.on_message is not None: + in_stream.protocol.on_message(in_stream, msg) + + # Record the IDs the source ever communicated with. + in_stream.protocol.egress_ids.add(msg.dst_id) + + if msg.dst_id == mitogen.context_id: + return self._invoke(msg, in_stream) + + out_stream = self._stream_by_id.get(msg.dst_id) + if (not out_stream) and (parent_stream != src_stream or not in_stream): + # No downstream route exists. The message could be from a child or + # ourselves for a parent, in which case we must forward it + # upstream, or it could be from a parent for a dead child, in which + # case its src_id/auth_id would fail verification if returned to + # the parent, so in that case reply with a dead message instead. + out_stream = parent_stream + + if out_stream is None: + self._maybe_send_dead(True, msg, self.no_route_msg, + msg.dst_id, mitogen.context_id) + return + + if in_stream and self.unidirectional and not \ + (in_stream.protocol.is_privileged or + out_stream.protocol.is_privileged): + self._maybe_send_dead(True, msg, self.unidirectional_msg, + in_stream.protocol.remote_id, + out_stream.protocol.remote_id, + mitogen.context_id) + return + + out_stream.protocol._send(msg) + + def route(self, msg): + """ + Arrange for the :class:`Message` `msg` to be delivered to its + destination using any relevant downstream context, or if none is found, + by forwarding the message upstream towards the master context. If `msg` + is destined for the local context, it is dispatched using the handles + registered with :meth:`add_handler`. + + This may be called from any thread. + """ + self.broker.defer(self._async_route, msg) + + +class NullTimerList(object): + def get_timeout(self): + return None + + +class Broker(object): + """ + Responsible for handling I/O multiplexing in a private thread. + + **Note:** This somewhat limited core version is used by children. The + master subclass is documented below. + """ + poller_class = Poller + _waker = None + _thread = None + + # :func:`mitogen.parent._upgrade_broker` replaces this with + # :class:`mitogen.parent.TimerList` during upgrade. + timers = NullTimerList() + + #: Seconds grace to allow :class:`streams ` to shutdown gracefully + #: before force-disconnecting them during :meth:`shutdown`. + shutdown_timeout = 3.0 + + def __init__(self, poller_class=None, activate_compat=True): + self._alive = True + self._exitted = False + self._waker = Waker.build_stream(self) + #: Arrange for `func(\*args, \**kwargs)` to be executed on the broker + #: thread, or immediately if the current thread is the broker thread. + #: Safe to call from any thread. + self.defer = self._waker.protocol.defer + self.poller = self.poller_class() + self.poller.start_receive( + self._waker.receive_side.fd, + (self._waker.receive_side, self._waker.on_receive) + ) + self._thread = threading.Thread( + target=self._broker_main, + name='mitogen.broker' + ) + self._thread.start() + if activate_compat: + self._py24_25_compat() + + def _py24_25_compat(self): + """ + Python 2.4/2.5 have grave difficulties with threads/fork. We + mandatorily quiesce all running threads during fork using a + monkey-patch there. + """ + if sys.version_info < (2, 6): + # import_module() is used to avoid dep scanner. + os_fork = import_module('mitogen.os_fork') + os_fork._notice_broker_or_pool(self) + + def start_receive(self, stream): + """ + Mark the :attr:`receive_side ` on `stream` as + ready for reading. Safe to call from any thread. When the associated + file descriptor becomes ready for reading, + :meth:`BasicStream.on_receive` will be called. + """ + _vv and IOLOG.debug('%r.start_receive(%r)', self, stream) + side = stream.receive_side + assert side and not side.closed + self.defer(self.poller.start_receive, + side.fd, (side, stream.on_receive)) + + def stop_receive(self, stream): + """ + Mark the :attr:`receive_side ` on `stream` as not + ready for reading. Safe to call from any thread. + """ + _vv and IOLOG.debug('%r.stop_receive(%r)', self, stream) + self.defer(self.poller.stop_receive, stream.receive_side.fd) + + def _start_transmit(self, stream): + """ + Mark the :attr:`transmit_side ` on `stream` as + ready for writing. Must only be called from the Broker thread. When the + associated file descriptor becomes ready for writing, + :meth:`BasicStream.on_transmit` will be called. + """ + _vv and IOLOG.debug('%r._start_transmit(%r)', self, stream) + side = stream.transmit_side + assert side and not side.closed + self.poller.start_transmit(side.fd, (side, stream.on_transmit)) + + def _stop_transmit(self, stream): + """ + Mark the :attr:`transmit_side ` on `stream` as not + ready for writing. + """ + _vv and IOLOG.debug('%r._stop_transmit(%r)', self, stream) + self.poller.stop_transmit(stream.transmit_side.fd) + + def keep_alive(self): + """ + Return :data:`True` if any reader's :attr:`Side.keep_alive` attribute + is :data:`True`, or any :class:`Context` is still registered that is + not the master. Used to delay shutdown while some important work is in + progress (e.g. log draining). + """ + it = (side.keep_alive for (_, (side, _)) in self.poller.readers) + return sum(it, 0) > 0 or self.timers.get_timeout() is not None + + def defer_sync(self, func): + """ + Arrange for `func()` to execute on :class:`Broker` thread, blocking the + current thread until a result or exception is available. + + :returns: + Return value of `func()`. + """ + latch = Latch() + def wrapper(): + try: + latch.put(func()) + except Exception: + latch.put(sys.exc_info()[1]) + self.defer(wrapper) + res = latch.get() + if isinstance(res, Exception): + raise res + return res + + def _call(self, stream, func): + """ + Call `func(self)`, catching any exception that might occur, logging it, + and force-disconnecting the related `stream`. + """ + try: + func(self) + except Exception: + LOG.exception('%r crashed', stream) + stream.on_disconnect(self) + + def _loop_once(self, timeout=None): + """ + Execute a single :class:`Poller` wait, dispatching any IO events that + caused the wait to complete. + + :param float timeout: + If not :data:`None`, maximum time in seconds to wait for events. + """ + _vv and IOLOG.debug('%r._loop_once(%r, %r)', + self, timeout, self.poller) + + timer_to = self.timers.get_timeout() + if timeout is None: + timeout = timer_to + elif timer_to is not None and timer_to < timeout: + timeout = timer_to + + #IOLOG.debug('readers =\n%s', pformat(self.poller.readers)) + #IOLOG.debug('writers =\n%s', pformat(self.poller.writers)) + for side, func in self.poller.poll(timeout): + self._call(side.stream, func) + if timer_to is not None: + self.timers.expire() + + def _broker_exit(self): + """ + Forcefully call :meth:`Stream.on_disconnect` on any streams that failed + to shut down gracefully, then discard the :class:`Poller`. + """ + for _, (side, _) in self.poller.readers + self.poller.writers: + LOG.debug('%r: force disconnecting %r', self, side) + side.stream.on_disconnect(self) + + self.poller.close() + + def _broker_shutdown(self): + """ + Invoke :meth:`Stream.on_shutdown` for every active stream, then allow + up to :attr:`shutdown_timeout` seconds for the streams to unregister + themselves, logging an error if any did not unregister during the grace + period. + """ + for _, (side, _) in self.poller.readers + self.poller.writers: + self._call(side.stream, side.stream.on_shutdown) + + deadline = now() + self.shutdown_timeout + while self.keep_alive() and now() < deadline: + self._loop_once(max(0, deadline - now())) + + if self.keep_alive(): + LOG.error('%r: pending work still existed %d seconds after ' + 'shutdown began. This may be due to a timer that is yet ' + 'to expire, or a child connection that did not fully ' + 'shut down.', self, self.shutdown_timeout) + + def _do_broker_main(self): + """ + Broker thread main function. Dispatches IO events until + :meth:`shutdown` is called. + """ + # For Python 2.4, no way to retrieve ident except on thread. + self._waker.protocol.broker_ident = thread.get_ident() + try: + while self._alive: + self._loop_once() + + fire(self, 'before_shutdown') + fire(self, 'shutdown') + self._broker_shutdown() + except Exception: + e = sys.exc_info()[1] + LOG.exception('broker crashed') + syslog.syslog(syslog.LOG_ERR, 'broker crashed: %s' % (e,)) + syslog.closelog() # prevent test 'fd leak'. + + self._alive = False # Ensure _alive is consistent on crash. + self._exitted = True + self._broker_exit() + + def _broker_main(self): + try: + _profile_hook('mitogen.broker', self._do_broker_main) + finally: + # 'finally' to ensure _on_broker_exit() can always SIGTERM. + fire(self, 'exit') + + def shutdown(self): + """ + Request broker gracefully disconnect streams and stop. Safe to call + from any thread. + """ + _v and LOG.debug('%r: shutting down', self) + def _shutdown(): + self._alive = False + if self._alive and not self._exitted: + self.defer(_shutdown) + + def join(self): + """ + Wait for the broker to stop, expected to be called after + :meth:`shutdown`. + """ + self._thread.join() + + def __repr__(self): + return 'Broker(%04x)' % (id(self) & 0xffff,) + + +class Dispatcher(object): + """ + Implementation of the :data:`CALL_FUNCTION` handle for a child context. + Listens on the child's main thread for messages sent by + :class:`mitogen.parent.CallChain` and dispatches the function calls they + describe. + + If a :class:`mitogen.parent.CallChain` sending a message is in pipelined + mode, any exception that occurs is recorded, and causes all subsequent + calls with the same `chain_id` to fail with the same exception. + """ + _service_recv = None + + def __repr__(self): + return 'Dispatcher' + + def __init__(self, econtext): + self.econtext = econtext + #: Chain ID -> CallError if prior call failed. + self._error_by_chain_id = {} + self.recv = Receiver( + router=econtext.router, + handle=CALL_FUNCTION, + policy=has_parent_authority, + ) + #: The :data:`CALL_SERVICE` :class:`Receiver` that will eventually be + #: reused by :class:`mitogen.service.Pool`, should it ever be loaded. + #: This is necessary for race-free reception of all service requests + #: delivered regardless of whether the stub or real service pool are + #: loaded. See #547 for related sorrows. + Dispatcher._service_recv = Receiver( + router=econtext.router, + handle=CALL_SERVICE, + policy=has_parent_authority, + ) + self._service_recv.notify = self._on_call_service + listen(econtext.broker, 'shutdown', self._on_broker_shutdown) + + def _on_broker_shutdown(self): + if self._service_recv.notify == self._on_call_service: + self._service_recv.notify = None + self.recv.close() + + @classmethod + @takes_econtext + def forget_chain(cls, chain_id, econtext): + econtext.dispatcher._error_by_chain_id.pop(chain_id, None) + + def _parse_request(self, msg): + data = msg.unpickle(throw=False) + _v and LOG.debug('%r: dispatching %r', self, data) + + chain_id, modname, klass, func, args, kwargs = data + obj = import_module(modname) + if klass: + obj = getattr(obj, klass) + fn = getattr(obj, func) + if getattr(fn, 'mitogen_takes_econtext', None): + kwargs.setdefault('econtext', self.econtext) + if getattr(fn, 'mitogen_takes_router', None): + kwargs.setdefault('router', self.econtext.router) + + return chain_id, fn, args, kwargs + + def _dispatch_one(self, msg): + try: + chain_id, fn, args, kwargs = self._parse_request(msg) + except Exception: + return None, CallError(sys.exc_info()[1]) + + if chain_id in self._error_by_chain_id: + return chain_id, self._error_by_chain_id[chain_id] + + try: + return chain_id, fn(*args, **kwargs) + except Exception: + e = CallError(sys.exc_info()[1]) + if chain_id is not None: + self._error_by_chain_id[chain_id] = e + return chain_id, e + + def _on_call_service(self, recv): + """ + Notifier for the :data:`CALL_SERVICE` receiver. This is called on the + :class:`Broker` thread for any service messages arriving at this + context, for as long as no real service pool implementation is loaded. + + In order to safely bootstrap the service pool implementation a sentinel + message is enqueued on the :data:`CALL_FUNCTION` receiver in order to + wake the main thread, where the importer can run without any + possibility of suffering deadlock due to concurrent uses of the + importer. + + Should the main thread be blocked indefinitely, preventing the import + from ever running, if it is blocked waiting on a service call, then it + means :mod:`mitogen.service` has already been imported and + :func:`mitogen.service.get_or_create_pool` has already run, meaning the + service pool is already active and the duplicate initialization was not + needed anyway. + + #547: This trickery is needed to avoid the alternate option of spinning + a temporary thread to import the service pool, which could deadlock if + a custom import hook executing on the main thread (under the importer + lock) would block waiting for some data that was in turn received by a + service. Main thread import lock can't be released until service is + running, service cannot satisfy request until import lock is released. + """ + self.recv._on_receive(Message(handle=STUB_CALL_SERVICE)) + + def _init_service_pool(self): + import mitogen.service + mitogen.service.get_or_create_pool(router=self.econtext.router) + + def _dispatch_calls(self): + for msg in self.recv: + if msg.handle == STUB_CALL_SERVICE: + if msg.src_id == mitogen.context_id: + self._init_service_pool() + continue + + chain_id, ret = self._dispatch_one(msg) + _v and LOG.debug('%r: %r -> %r', self, msg, ret) + if msg.reply_to: + msg.reply(ret) + elif isinstance(ret, CallError) and chain_id is None: + LOG.error('No-reply function call failed: %s', ret) + + def run(self): + if self.econtext.config.get('on_start'): + self.econtext.config['on_start'](self.econtext) + + _profile_hook('mitogen.child_main', self._dispatch_calls) + + +class ExternalContext(object): + """ + External context implementation. + + This class contains the main program implementation for new children. It is + responsible for setting up everything about the process environment, import + hooks, standard IO redirection, logging, configuring a :class:`Router` and + :class:`Broker`, and finally arranging for :class:`Dispatcher` to take over + the main thread after initialization is complete. + + .. attribute:: broker + + The :class:`mitogen.core.Broker` instance. + + .. attribute:: context + + The :class:`mitogen.core.Context` instance. + + .. attribute:: channel + + The :class:`mitogen.core.Channel` over which :data:`CALL_FUNCTION` + requests are received. + + .. attribute:: importer + + The :class:`mitogen.core.Importer` instance. + + .. attribute:: stdout_log + + The :class:`IoLogger` connected to :data:`sys.stdout`. + + .. attribute:: stderr_log + + The :class:`IoLogger` connected to :data:`sys.stderr`. + """ + detached = False + + def __init__(self, config): + self.config = config + + def _on_broker_exit(self): + if not self.config['profiling']: + os.kill(os.getpid(), signal.SIGTERM) + + def _on_shutdown_msg(self, msg): + if not msg.is_dead: + _v and LOG.debug('shutdown request from context %d', msg.src_id) + self.broker.shutdown() + + def _on_parent_disconnect(self): + if self.detached: + mitogen.parent_ids = [] + mitogen.parent_id = None + LOG.info('Detachment complete') + else: + _v and LOG.debug('parent stream is gone, dying.') + self.broker.shutdown() + + def detach(self): + self.detached = True + stream = self.router.stream_by_id(mitogen.parent_id) + if stream: # not double-detach()'d + os.setsid() + self.parent.send_await(Message(handle=DETACHING)) + LOG.info('Detaching from %r; parent is %s', stream, self.parent) + for x in range(20): + pending = self.broker.defer_sync(stream.protocol.pending_bytes) + if not pending: + break + time.sleep(0.05) + if pending: + LOG.error('Stream had %d bytes after 2000ms', pending) + self.broker.defer(stream.on_disconnect, self.broker) + + def _setup_master(self): + Router.max_message_size = self.config['max_message_size'] + if self.config['profiling']: + enable_profiling() + self.broker = Broker(activate_compat=False) + self.router = Router(self.broker) + self.router.debug = self.config.get('debug', False) + self.router.unidirectional = self.config['unidirectional'] + self.router.add_handler( + fn=self._on_shutdown_msg, + handle=SHUTDOWN, + policy=has_parent_authority, + ) + self.master = Context(self.router, 0, 'master') + parent_id = self.config['parent_ids'][0] + if parent_id == 0: + self.parent = self.master + else: + self.parent = Context(self.router, parent_id, 'parent') + + in_fd = self.config.get('in_fd', 100) + in_fp = os.fdopen(os.dup(in_fd), 'rb', 0) + os.close(in_fd) + + out_fp = os.fdopen(os.dup(self.config.get('out_fd', 1)), 'wb', 0) + self.stream = MitogenProtocol.build_stream( + self.router, + parent_id, + local_id=self.config['context_id'], + parent_ids=self.config['parent_ids'] + ) + self.stream.accept(in_fp, out_fp) + self.stream.name = 'parent' + self.stream.receive_side.keep_alive = False + + listen(self.stream, 'disconnect', self._on_parent_disconnect) + listen(self.broker, 'exit', self._on_broker_exit) + + def _reap_first_stage(self): + try: + os.wait() # Reap first stage. + except OSError: + pass # No first stage exists (e.g. fakessh) + + def _setup_logging(self): + self.log_handler = LogHandler(self.master) + root = logging.getLogger() + root.setLevel(self.config['log_level']) + root.handlers = [self.log_handler] + if self.config['debug']: + enable_debug_logging() + + def _setup_importer(self): + importer = self.config.get('importer') + if importer: + importer._install_handler(self.router) + importer._context = self.parent + else: + core_src_fd = self.config.get('core_src_fd', 101) + if core_src_fd: + fp = os.fdopen(core_src_fd, 'rb', 0) + try: + core_src = fp.read() + # Strip "ExternalContext.main()" call from last line. + core_src = b('\n').join(core_src.splitlines()[:-1]) + finally: + fp.close() + else: + core_src = None + + importer = Importer( + self.router, + self.parent, + core_src, + self.config.get('whitelist', ()), + self.config.get('blacklist', ()), + ) + + self.importer = importer + self.router.importer = importer + sys.meta_path.insert(0, self.importer) + + def _setup_package(self): + global mitogen + mitogen = types.ModuleType('mitogen') + mitogen.__package__ = 'mitogen' + mitogen.__path__ = [] + mitogen.__loader__ = self.importer + mitogen.main = lambda *args, **kwargs: (lambda func: None) + mitogen.core = sys.modules['__main__'] + mitogen.core.__file__ = 'x/mitogen/core.py' # For inspect.getsource() + mitogen.core.__loader__ = self.importer + sys.modules['mitogen'] = mitogen + sys.modules['mitogen.core'] = mitogen.core + del sys.modules['__main__'] + + def _setup_globals(self): + mitogen.is_master = False + mitogen.__version__ = self.config['version'] + mitogen.context_id = self.config['context_id'] + mitogen.parent_ids = self.config['parent_ids'][:] + mitogen.parent_id = mitogen.parent_ids[0] + + def _nullify_stdio(self): + """ + Open /dev/null to replace stdio temporarily. In case of odd startup, + assume we may be allocated a standard handle. + """ + for stdfd, mode in ((0, os.O_RDONLY), (1, os.O_RDWR), (2, os.O_RDWR)): + fd = os.open('/dev/null', mode) + if fd != stdfd: + os.dup2(fd, stdfd) + os.close(fd) + + def _preserve_tty_fp(self): + """ + #481: when stderr is a TTY due to being started via tty_create_child() + or hybrid_tty_create_child(), and some privilege escalation tool like + prehistoric versions of sudo exec this process over the top of itself, + there is nothing left to keep the slave PTY open after we replace our + stdio. Therefore if stderr is a TTY, keep around a permanent dup() to + avoid receiving SIGHUP. + """ + try: + if os.isatty(2): + self.reserve_tty_fp = os.fdopen(os.dup(2), 'r+b', 0) + set_cloexec(self.reserve_tty_fp.fileno()) + except OSError: + pass + + def _setup_stdio(self): + self._preserve_tty_fp() + # When sys.stdout was opened by the runtime, overwriting it will not + # close FD 1. However when forking from a child that previously used + # fdopen(), overwriting it /will/ close FD 1. So we must swallow the + # close before IoLogger overwrites FD 1, otherwise its new FD 1 will be + # clobbered. Additionally, stdout must be replaced with /dev/null prior + # to stdout.close(), since if block buffering was active in the parent, + # any pre-fork buffered data will be flushed on close(), corrupting the + # connection to the parent. + self._nullify_stdio() + sys.stdout.close() + self._nullify_stdio() + + self.loggers = [] + for name, fd in (('stdout', 1), ('stderr', 2)): + log = IoLoggerProtocol.build_stream(name, fd) + self.broker.start_receive(log) + self.loggers.append(log) + + # Reopen with line buffering. + sys.stdout = os.fdopen(1, 'w', 1) + + def main(self): + self._setup_master() + try: + try: + self._setup_logging() + self._setup_importer() + self._reap_first_stage() + if self.config.get('setup_package', True): + self._setup_package() + self._setup_globals() + if self.config.get('setup_stdio', True): + self._setup_stdio() + + self.dispatcher = Dispatcher(self) + self.router.register(self.parent, self.stream) + self.router._setup_logging() + + _v and LOG.debug('Python version is %s', sys.version) + _v and LOG.debug('Parent is context %r (%s); my ID is %r', + self.parent.context_id, self.parent.name, + mitogen.context_id) + _v and LOG.debug('pid:%r ppid:%r uid:%r/%r, gid:%r/%r host:%r', + os.getpid(), os.getppid(), os.geteuid(), + os.getuid(), os.getegid(), os.getgid(), + socket.gethostname()) + + sys.executable = os.environ.pop('ARGV0', sys.executable) + _v and LOG.debug('Recovered sys.executable: %r', sys.executable) + + if self.config.get('send_ec2', True): + self.stream.transmit_side.write(b('MITO002\n')) + self.broker._py24_25_compat() + self.log_handler.uncork() + self.dispatcher.run() + _v and LOG.debug('ExternalContext.main() normal exit') + except KeyboardInterrupt: + LOG.debug('KeyboardInterrupt received, exiting gracefully.') + except BaseException: + LOG.exception('ExternalContext.main() crashed') + raise + finally: + self.broker.shutdown() diff --git a/mitogen-0.3.9/mitogen/debug.py b/mitogen-0.3.9/mitogen/debug.py new file mode 100644 index 0000000..dbab550 --- /dev/null +++ b/mitogen-0.3.9/mitogen/debug.py @@ -0,0 +1,236 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +Basic signal handler for dumping thread stacks. +""" + +import difflib +import logging +import os +import gc +import signal +import sys +import threading +import time +import traceback + +import mitogen.core +import mitogen.parent + + +LOG = logging.getLogger(__name__) +_last = None + + +def enable_evil_interrupts(): + signal.signal(signal.SIGALRM, (lambda a, b: None)) + signal.setitimer(signal.ITIMER_REAL, 0.01, 0.01) + + +def disable_evil_interrupts(): + signal.setitimer(signal.ITIMER_REAL, 0, 0) + + +def _hex(n): + return '%08x' % n + + +def get_subclasses(klass): + """ + Rather than statically import every interesting subclass, forcing it all to + be transferred and potentially disrupting the debugged environment, + enumerate only those loaded in memory. Also returns the original class. + """ + stack = [klass] + seen = set() + while stack: + klass = stack.pop() + seen.add(klass) + stack.extend(klass.__subclasses__()) + return seen + + +def get_routers(): + return dict( + (_hex(id(router)), router) + for klass in get_subclasses(mitogen.core.Router) + for router in gc.get_referrers(klass) + if isinstance(router, mitogen.core.Router) + ) + + +def get_router_info(): + return { + 'routers': dict( + (id_, { + 'id': id_, + 'streams': len(set(router._stream_by_id.values())), + 'contexts': len(set(router._context_by_id.values())), + 'handles': len(router._handle_map), + }) + for id_, router in get_routers().items() + ) + } + + +def get_stream_info(router_id): + router = get_routers().get(router_id) + return { + 'streams': dict( + (_hex(id(stream)), ({ + 'name': stream.name, + 'remote_id': stream.remote_id, + 'sent_module_count': len(getattr(stream, 'sent_modules', [])), + 'routes': sorted(getattr(stream, 'routes', [])), + 'type': type(stream).__module__, + })) + for via_id, stream in router._stream_by_id.items() + ) + } + + +def format_stacks(): + name_by_id = dict( + (t.ident, t.name) + for t in threading.enumerate() + ) + + l = ['', ''] + for threadId, stack in sys._current_frames().items(): + l += ["# PID %d ThreadID: (%s) %s; %r" % ( + os.getpid(), + name_by_id.get(threadId, ''), + threadId, + stack, + )] + #stack = stack.f_back.f_back + + for filename, lineno, name, line in traceback.extract_stack(stack): + l += [ + 'File: "%s", line %d, in %s' % ( + filename, + lineno, + name + ) + ] + if line: + l += [' ' + line.strip()] + l += [''] + + l += ['', ''] + return '\n'.join(l) + + +def get_snapshot(): + global _last + + s = format_stacks() + snap = s + if _last: + snap += '\n' + diff = list(difflib.unified_diff( + a=_last.splitlines(), + b=s.splitlines(), + fromfile='then', + tofile='now' + )) + + if diff: + snap += '\n'.join(diff) + '\n' + else: + snap += '(no change since last time)\n' + _last = s + return snap + + +def _handler(*_): + fp = open('/dev/tty', 'w', 1) + fp.write(get_snapshot()) + fp.close() + + +def install_handler(): + signal.signal(signal.SIGUSR2, _handler) + + +def _logging_main(secs): + while True: + time.sleep(secs) + LOG.info('PERIODIC THREAD DUMP\n\n%s', get_snapshot()) + + +def dump_to_logger(secs=5): + th = threading.Thread( + target=_logging_main, + kwargs={'secs': secs}, + name='mitogen.debug.dump_to_logger', + ) + th.setDaemon(True) + th.start() + + +class ContextDebugger(object): + @classmethod + @mitogen.core.takes_econtext + def _configure_context(cls, econtext): + mitogen.parent.upgrade_router(econtext) + econtext.debugger = cls(econtext.router) + + def __init__(self, router): + self.router = router + self.router.add_handler( + func=self._on_debug_msg, + handle=mitogen.core.DEBUG, + persist=True, + policy=mitogen.core.has_parent_authority, + ) + mitogen.core.listen(router, 'register', self._on_stream_register) + LOG.debug('Context debugging configured.') + + def _on_stream_register(self, context, stream): + LOG.debug('_on_stream_register: sending configure() to %r', stream) + context.call_async(ContextDebugger._configure_context) + + def _on_debug_msg(self, msg): + if msg != mitogen.core._DEAD: + threading.Thread( + target=self._handle_debug_msg, + name='ContextDebuggerHandler', + args=(msg,) + ).start() + + def _handle_debug_msg(self, msg): + try: + method, args, kwargs = msg.unpickle() + msg.reply(getattr(self, method)(*args, **kwargs)) + except Exception: + e = sys.exc_info()[1] + msg.reply(mitogen.core.CallError(e)) diff --git a/mitogen-0.3.9/mitogen/doas.py b/mitogen-0.3.9/mitogen/doas.py new file mode 100644 index 0000000..5b212b9 --- /dev/null +++ b/mitogen-0.3.9/mitogen/doas.py @@ -0,0 +1,142 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import logging +import re + +import mitogen.core +import mitogen.parent + + +LOG = logging.getLogger(__name__) + +password_incorrect_msg = 'doas password is incorrect' +password_required_msg = 'doas password is required' + + +class PasswordError(mitogen.core.StreamError): + pass + + +class Options(mitogen.parent.Options): + username = u'root' + password = None + doas_path = 'doas' + password_prompt = u'Password:' + incorrect_prompts = ( + u'doas: authentication failed', # slicer69/doas + u'doas: Authorization failed', # openbsd/src + ) + + def __init__(self, username=None, password=None, doas_path=None, + password_prompt=None, incorrect_prompts=None, **kwargs): + super(Options, self).__init__(**kwargs) + if username is not None: + self.username = mitogen.core.to_text(username) + if password is not None: + self.password = mitogen.core.to_text(password) + if doas_path is not None: + self.doas_path = doas_path + if password_prompt is not None: + self.password_prompt = mitogen.core.to_text(password_prompt) + if incorrect_prompts is not None: + self.incorrect_prompts = [ + mitogen.core.to_text(p) + for p in incorrect_prompts + ] + + +class BootstrapProtocol(mitogen.parent.RegexProtocol): + password_sent = False + + def setup_patterns(self, conn): + prompt_pattern = re.compile( + re.escape(conn.options.password_prompt).encode('utf-8'), + re.I + ) + incorrect_prompt_pattern = re.compile( + u'|'.join( + re.escape(s) + for s in conn.options.incorrect_prompts + ).encode('utf-8'), + re.I + ) + + self.PATTERNS = [ + (incorrect_prompt_pattern, type(self)._on_incorrect_password), + ] + self.PARTIAL_PATTERNS = [ + (prompt_pattern, type(self)._on_password_prompt), + ] + + def _on_incorrect_password(self, line, match): + if self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + + def _on_password_prompt(self, line, match): + if self.stream.conn.options.password is None: + self.stream.conn._fail_connection( + PasswordError(password_required_msg) + ) + return + + if self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + return + + LOG.debug('sending password') + self.stream.transmit_side.write( + (self.stream.conn.options.password + '\n').encode('utf-8') + ) + self.password_sent = True + + +class Connection(mitogen.parent.Connection): + options_class = Options + diag_protocol_class = BootstrapProtocol + + create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) + child_is_immediate_subprocess = False + + def _get_name(self): + return u'doas.' + self.options.username + + def stderr_stream_factory(self): + stream = super(Connection, self).stderr_stream_factory() + stream.protocol.setup_patterns(self) + return stream + + def get_boot_command(self): + bits = [self.options.doas_path, '-u', self.options.username, '--'] + return bits + super(Connection, self).get_boot_command() diff --git a/mitogen-0.3.9/mitogen/docker.py b/mitogen-0.3.9/mitogen/docker.py new file mode 100644 index 0000000..48848c8 --- /dev/null +++ b/mitogen-0.3.9/mitogen/docker.py @@ -0,0 +1,83 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import logging + +import mitogen.core +import mitogen.parent + + +LOG = logging.getLogger(__name__) + + +class Options(mitogen.parent.Options): + container = None + image = None + username = None + docker_path = u'docker' + + def __init__(self, container=None, image=None, docker_path=None, + username=None, **kwargs): + super(Options, self).__init__(**kwargs) + assert container or image + if container: + self.container = mitogen.core.to_text(container) + if image: + self.image = mitogen.core.to_text(image) + if docker_path: + self.docker_path = mitogen.core.to_text(docker_path) + if username: + self.username = mitogen.core.to_text(username) + + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = False + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } + + def _get_name(self): + return u'docker.' + (self.options.container or self.options.image) + + def get_boot_command(self): + args = ['--interactive'] + if self.options.username: + args += ['--user=' + self.options.username] + + bits = [self.options.docker_path] + if self.options.container: + bits += ['exec'] + args + [self.options.container] + elif self.options.image: + bits += ['run'] + args + ['--rm', self.options.image] + + return bits + super(Connection, self).get_boot_command() diff --git a/mitogen-0.3.9/mitogen/fakessh.py b/mitogen-0.3.9/mitogen/fakessh.py new file mode 100644 index 0000000..2d66024 --- /dev/null +++ b/mitogen-0.3.9/mitogen/fakessh.py @@ -0,0 +1,455 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +:mod:`mitogen.fakessh` is a stream implementation that starts a subprocess with +its environment modified such that ``PATH`` searches for `ssh` return a Mitogen +implementation of SSH. When invoked, this implementation arranges for the +command line supplied by the caller to be executed in a remote context, reusing +the parent context's (possibly proxied) connection to that remote context. + +This allows tools like `rsync` and `scp` to transparently reuse the connections +and tunnels already established by the host program to connect to a target +machine, without wasteful redundant SSH connection setup, 3-way handshakes, or +firewall hopping configurations, and enables these tools to be used in +impossible scenarios, such as over `sudo` with ``requiretty`` enabled. + +The fake `ssh` command source is written to a temporary file on disk, and +consists of a copy of the :py:mod:`mitogen.core` source code (just like any +other child context), with a line appended to cause it to connect back to the +host process over an FD it inherits. As there is no reliance on an existing +filesystem file, it is possible for child contexts to use fakessh. + +As a consequence of connecting back through an inherited FD, only one SSH +invocation is possible, which is fine for tools like `rsync`, however in future +this restriction will be lifted. + +Sequence: + + 1. ``fakessh`` Context and Stream created by parent context. The stream's + buffer has a :py:func:`_fakessh_main` :py:data:`CALL_FUNCTION + ` enqueued. + 2. Target program (`rsync/scp/sftp`) invoked, which internally executes + `ssh` from ``PATH``. + 3. :py:mod:`mitogen.core` bootstrap begins, recovers the stream FD + inherited via the target program, established itself as the fakessh + context. + 4. :py:func:`_fakessh_main` :py:data:`CALL_FUNCTION + ` is read by fakessh context, + + a. sets up :py:class:`IoPump` for stdio, registers + stdin_handle for local context. + b. Enqueues :py:data:`CALL_FUNCTION ` for + :py:func:`_start_slave` invoked in target context, + + i. the program from the `ssh` command line is started + ii. sets up :py:class:`IoPump` for `ssh` command line process's + stdio pipes + iii. returns `(control_handle, stdin_handle)` to + :py:func:`_fakessh_main` + + 5. :py:func:`_fakessh_main` receives control/stdin handles from from + :py:func:`_start_slave`, + + a. registers remote's stdin_handle with local :py:class:`IoPump`. + b. sends `("start", local_stdin_handle)` to remote's control_handle + c. registers local :py:class:`IoPump` with + :py:class:`mitogen.core.Broker`. + d. loops waiting for `local stdout closed && remote stdout closed` + + 6. :py:func:`_start_slave` control channel receives `("start", stdin_handle)`, + + a. registers remote's stdin_handle with local :py:class:`IoPump` + b. registers local :py:class:`IoPump` with + :py:class:`mitogen.core.Broker`. + c. loops waiting for `local stdout closed && remote stdout closed` +""" + +import getopt +import inspect +import os +import shutil +import socket +import subprocess +import sys +import tempfile +import threading + +import mitogen.core +import mitogen.parent + +from mitogen.core import LOG, IOLOG + + +SSH_GETOPTS = ( + "1246ab:c:e:fgi:kl:m:no:p:qstvx" + "ACD:E:F:I:KL:MNO:PQ:R:S:TVw:W:XYy" +) + +_mitogen = None + + +class IoPump(mitogen.core.Protocol): + _output_buf = '' + _closed = False + + def __init__(self, broker): + self._broker = broker + + def write(self, s): + self._output_buf += s + self._broker._start_transmit(self) + + def close(self): + self._closed = True + # If local process hasn't exitted yet, ensure its write buffer is + # drained before lazily triggering disconnect in on_transmit. + if self.transmit_side.fp.fileno() is not None: + self._broker._start_transmit(self) + + def on_shutdown(self, stream, broker): + self.close() + + def on_transmit(self, stream, broker): + written = self.transmit_side.write(self._output_buf) + IOLOG.debug('%r.on_transmit() -> len %r', self, written) + if written is None: + self.on_disconnect(broker) + else: + self._output_buf = self._output_buf[written:] + + if not self._output_buf: + broker._stop_transmit(self) + if self._closed: + self.on_disconnect(broker) + + def on_receive(self, stream, broker): + s = stream.receive_side.read() + IOLOG.debug('%r.on_receive() -> len %r', self, len(s)) + if s: + mitogen.core.fire(self, 'receive', s) + else: + self.on_disconnect(broker) + + def __repr__(self): + return 'IoPump(%r, %r)' % ( + self.receive_side.fp.fileno(), + self.transmit_side.fp.fileno(), + ) + + +class Process(object): + """ + Manages the lifetime and pipe connections of the SSH command running in the + slave. + """ + def __init__(self, router, stdin, stdout, proc=None): + self.router = router + self.stdin = stdin + self.stdout = stdout + self.proc = proc + self.control_handle = router.add_handler(self._on_control) + self.stdin_handle = router.add_handler(self._on_stdin) + self.pump = IoPump.build_stream(router.broker) + self.pump.accept(stdin, stdout) + self.stdin = None + self.control = None + self.wake_event = threading.Event() + + mitogen.core.listen(self.pump, 'disconnect', self._on_pump_disconnect) + mitogen.core.listen(self.pump, 'receive', self._on_pump_receive) + + if proc: + pmon = mitogen.parent.ProcessMonitor.instance() + pmon.add(proc.pid, self._on_proc_exit) + + def __repr__(self): + return 'Process(%r, %r)' % (self.stdin, self.stdout) + + def _on_proc_exit(self, status): + LOG.debug('%r._on_proc_exit(%r)', self, status) + self.control.put(('exit', status)) + + def _on_stdin(self, msg): + if msg.is_dead: + IOLOG.debug('%r._on_stdin() -> %r', self, msg) + self.pump.protocol.close() + return + + data = msg.unpickle() + IOLOG.debug('%r._on_stdin() -> len %d', self, len(data)) + self.pump.protocol.write(data) + + def _on_control(self, msg): + if not msg.is_dead: + command, arg = msg.unpickle(throw=False) + LOG.debug('%r._on_control(%r, %s)', self, command, arg) + + func = getattr(self, '_on_%s' % (command,), None) + if func: + return func(msg, arg) + + LOG.warning('%r: unknown command %r', self, command) + + def _on_start(self, msg, arg): + dest = mitogen.core.Context(self.router, msg.src_id) + self.control = mitogen.core.Sender(dest, arg[0]) + self.stdin = mitogen.core.Sender(dest, arg[1]) + self.router.broker.start_receive(self.pump) + + def _on_exit(self, msg, arg): + LOG.debug('on_exit: proc = %r', self.proc) + if self.proc: + self.proc.terminate() + else: + self.router.broker.shutdown() + + def _on_pump_receive(self, s): + IOLOG.info('%r._on_pump_receive(len %d)', self, len(s)) + self.stdin.put(s) + + def _on_pump_disconnect(self): + LOG.debug('%r._on_pump_disconnect()', self) + mitogen.core.fire(self, 'disconnect') + self.stdin.close() + self.wake_event.set() + + def start_master(self, stdin, control): + self.stdin = stdin + self.control = control + control.put(('start', (self.control_handle, self.stdin_handle))) + self.router.broker.start_receive(self.pump) + + def wait(self): + while not self.wake_event.isSet(): + # Timeout is used so that sleep is interruptible, as blocking + # variants of libc thread operations cannot be interrupted e.g. via + # KeyboardInterrupt. isSet() test and wait() are separate since in + # <2.7 wait() always returns None. + self.wake_event.wait(0.1) + + +@mitogen.core.takes_router +def _start_slave(src_id, cmdline, router): + """ + This runs in the target context, it is invoked by _fakessh_main running in + the fakessh context immediately after startup. It starts the slave process + (the the point where it has a stdin_handle to target but not stdout_chan to + write to), and waits for main to. + """ + LOG.debug('_start_slave(%r, %r)', router, cmdline) + + proc = subprocess.Popen( + cmdline, + # SSH server always uses user's shell. + shell=True, + # SSH server always executes new commands in the user's HOME. + cwd=os.path.expanduser('~'), + + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + ) + + process = Process(router, proc.stdin, proc.stdout, proc) + return process.control_handle, process.stdin_handle + + +# +# SSH client interface. +# + + +def exit(): + _mitogen.broker.shutdown() + + +def die(msg, *args): + if args: + msg %= args + sys.stderr.write('%s\n' % (msg,)) + exit() + + +def parse_args(): + hostname = None + remain = sys.argv[1:] + allopts = [] + restarted = 0 + + while remain and restarted < 2: + opts, args = getopt.getopt(remain, SSH_GETOPTS) + remain = remain[:] # getopt bug! + allopts += opts + if not args: + break + + if not hostname: + hostname = args.pop(0) + remain = remain[remain.index(hostname) + 1:] + + restarted += 1 + + return hostname, allopts, args + + +@mitogen.core.takes_econtext +def _fakessh_main(dest_context_id, econtext): + hostname, opts, args = parse_args() + if not hostname: + die('Missing hostname') + + subsystem = False + for opt, optarg in opts: + if opt == '-s': + subsystem = True + else: + LOG.debug('Warning option %s %s is ignored.', opt, optarg) + + LOG.debug('hostname: %r', hostname) + LOG.debug('opts: %r', opts) + LOG.debug('args: %r', args) + + if subsystem: + die('-s is not yet supported') + + if not args: + die('fakessh: login mode not supported and no command specified') + + dest = mitogen.parent.Context(econtext.router, dest_context_id) + + # Even though SSH receives an argument vector, it still cats the vector + # together before sending to the server, the server just uses /bin/sh -c to + # run the command. We must remain puke-for-puke compatible. + control_handle, stdin_handle = dest.call(_start_slave, + mitogen.context_id, ' '.join(args)) + + LOG.debug('_fakessh_main: received control_handle=%r, stdin_handle=%r', + control_handle, stdin_handle) + + process = Process(econtext.router, + stdin=os.fdopen(1, 'w+b', 0), + stdout=os.fdopen(0, 'r+b', 0)) + process.start_master( + stdin=mitogen.core.Sender(dest, stdin_handle), + control=mitogen.core.Sender(dest, control_handle), + ) + process.wait() + process.control.put(('exit', None)) + + +def _get_econtext_config(context, sock2): + parent_ids = mitogen.parent_ids[:] + parent_ids.insert(0, mitogen.context_id) + return { + 'context_id': context.context_id, + 'core_src_fd': None, + 'debug': getattr(context.router, 'debug', False), + 'in_fd': sock2.fileno(), + 'log_level': mitogen.parent.get_log_level(), + 'max_message_size': context.router.max_message_size, + 'out_fd': sock2.fileno(), + 'parent_ids': parent_ids, + 'profiling': getattr(context.router, 'profiling', False), + 'unidirectional': getattr(context.router, 'unidirectional', False), + 'setup_stdio': False, + 'version': mitogen.__version__, + } + + +# +# Public API. +# + +@mitogen.core.takes_econtext +@mitogen.core.takes_router +def run(dest, router, args, deadline=None, econtext=None): + """ + Run the command specified by `args` such that ``PATH`` searches for SSH by + the command will cause its attempt to use SSH to execute a remote program + to be redirected to use mitogen to execute that program using the context + `dest` instead. + + :param list args: + Argument vector. + :param mitogen.core.Context dest: + The destination context to execute the SSH command line in. + + :param mitogen.core.Router router: + + :param list[str] args: + Command line arguments for local program, e.g. + ``['rsync', '/tmp', 'remote:/tmp']`` + + :returns: + Exit status of the child process. + """ + if econtext is not None: + mitogen.parent.upgrade_router(econtext) + + context_id = router.allocate_id() + fakessh = mitogen.parent.Context(router, context_id) + fakessh.name = u'fakessh.%d' % (context_id,) + + sock1, sock2 = socket.socketpair() + + stream = mitogen.core.Stream(router, context_id) + stream.name = u'fakessh' + stream.accept(sock1, sock1) + router.register(fakessh, stream) + + # Held in socket buffer until process is booted. + fakessh.call_async(_fakessh_main, dest.context_id) + + tmp_path = tempfile.mkdtemp(prefix='mitogen_fakessh') + try: + ssh_path = os.path.join(tmp_path, 'ssh') + fp = open(ssh_path, 'w') + try: + fp.write('#!%s\n' % (mitogen.parent.get_sys_executable(),)) + fp.write(inspect.getsource(mitogen.core)) + fp.write('\n') + fp.write('ExternalContext(%r).main()\n' % ( + _get_econtext_config(econtext, sock2), + )) + finally: + fp.close() + + os.chmod(ssh_path, int('0755', 8)) + env = os.environ.copy() + env.update({ + 'PATH': '%s:%s' % (tmp_path, env.get('PATH', '')), + 'ARGV0': mitogen.parent.get_sys_executable(), + 'SSH_PATH': ssh_path, + }) + + proc = subprocess.Popen(args, env=env) + return proc.wait() + finally: + shutil.rmtree(tmp_path) diff --git a/mitogen-0.3.9/mitogen/fork.py b/mitogen-0.3.9/mitogen/fork.py new file mode 100644 index 0000000..f0c2d7e --- /dev/null +++ b/mitogen-0.3.9/mitogen/fork.py @@ -0,0 +1,250 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import errno +import logging +import os +import random +import sys +import threading +import traceback + +import mitogen.core +import mitogen.parent +from mitogen.core import b + + +LOG = logging.getLogger(__name__) + +# Python 2.4/2.5 cannot support fork+threads whatsoever, it doesn't even fix up +# interpreter state. So 2.4/2.5 interpreters start .local() contexts for +# isolation instead. Since we don't have any crazy memory sharing problems to +# avoid, there is no virginal fork parent either. The child is started directly +# from the login/become process. In future this will be default everywhere, +# fork is brainwrong from the stone age. +FORK_SUPPORTED = sys.version_info >= (2, 6) + + +class Error(mitogen.core.StreamError): + pass + + +def fixup_prngs(): + """ + Add 256 bits of /dev/urandom to OpenSSL's PRNG in the child, and re-seed + the random package with the same data. + """ + s = os.urandom(256 // 8) + random.seed(s) + if 'ssl' in sys.modules: + sys.modules['ssl'].RAND_add(s, 75.0) + + +def reset_logging_framework(): + """ + After fork, ensure any logging.Handler locks are recreated, as a variety of + threads in the parent may have been using the logging package at the moment + of fork. + + It is not possible to solve this problem in general; see :gh:issue:`150` + for a full discussion. + """ + logging._lock = threading.RLock() + + # The root logger does not appear in the loggerDict. + logging.Logger.manager.loggerDict = {} + logging.getLogger().handlers = [] + + +def on_fork(): + """ + Should be called by any program integrating Mitogen each time the process + is forked, in the context of the new child. + """ + reset_logging_framework() # Must be first! + fixup_prngs() + mitogen.core.Latch._on_fork() + mitogen.core.Side._on_fork() + mitogen.core.ExternalContext.service_stub_lock = threading.Lock() + + mitogen__service = sys.modules.get('mitogen.service') + if mitogen__service: + mitogen__service._pool_lock = threading.Lock() + + +def handle_child_crash(): + """ + Respond to _child_main() crashing by ensuring the relevant exception is + logged to /dev/tty. + """ + tty = open('/dev/tty', 'wb') + tty.write('\n\nFORKED CHILD PID %d CRASHED\n%s\n\n' % ( + os.getpid(), + traceback.format_exc(), + )) + tty.close() + os._exit(1) + + +def _convert_exit_status(status): + """ + Convert a :func:`os.waitpid`-style exit status to a :mod:`subprocess` style + exit status. + """ + if os.WIFEXITED(status): + return os.WEXITSTATUS(status) + elif os.WIFSIGNALED(status): + return -os.WTERMSIG(status) + elif os.WIFSTOPPED(status): + return -os.WSTOPSIG(status) + + +class Process(mitogen.parent.Process): + def poll(self): + try: + pid, status = os.waitpid(self.pid, os.WNOHANG) + except OSError: + e = sys.exc_info()[1] + if e.args[0] == errno.ECHILD: + LOG.warn('%r: waitpid(%r) produced ECHILD', self, self.pid) + return + raise + + if not pid: + return + return _convert_exit_status(status) + + +class Options(mitogen.parent.Options): + #: Reference to the importer, if any, recovered from the parent. + importer = None + + #: User-supplied function for cleaning up child process state. + on_fork = None + + def __init__(self, old_router, max_message_size, on_fork=None, debug=False, + profiling=False, unidirectional=False, on_start=None, + name=None): + if not FORK_SUPPORTED: + raise Error(self.python_version_msg) + + # fork method only supports a tiny subset of options. + super(Options, self).__init__( + max_message_size=max_message_size, debug=debug, + profiling=profiling, unidirectional=unidirectional, name=name, + ) + self.on_fork = on_fork + self.on_start = on_start + + responder = getattr(old_router, 'responder', None) + if isinstance(responder, mitogen.parent.ModuleForwarder): + self.importer = responder.importer + + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = True + + python_version_msg = ( + "The mitogen.fork method is not supported on Python versions " + "prior to 2.6, since those versions made no attempt to repair " + "critical interpreter state following a fork. Please use the " + "local() method instead." + ) + + name_prefix = u'fork' + + def start_child(self): + parentfp, childfp = mitogen.parent.create_socketpair() + pid = os.fork() + if pid: + childfp.close() + return Process(pid, stdin=parentfp, stdout=parentfp) + else: + parentfp.close() + self._wrap_child_main(childfp) + + def _wrap_child_main(self, childfp): + try: + self._child_main(childfp) + except BaseException: + handle_child_crash() + + def get_econtext_config(self): + config = super(Connection, self).get_econtext_config() + config['core_src_fd'] = None + config['importer'] = self.options.importer + config['send_ec2'] = False + config['setup_package'] = False + if self.options.on_start: + config['on_start'] = self.options.on_start + return config + + def _child_main(self, childfp): + on_fork() + if self.options.on_fork: + self.options.on_fork() + mitogen.core.set_block(childfp.fileno()) + + childfp.send(b('MITO002\n')) + + # Expected by the ExternalContext.main(). + os.dup2(childfp.fileno(), 1) + os.dup2(childfp.fileno(), 100) + + # Overwritten by ExternalContext.main(); we must replace the + # parent-inherited descriptors that were closed by Side._on_fork() to + # avoid ExternalContext.main() accidentally allocating new files over + # the standard handles. + os.dup2(childfp.fileno(), 0) + + # Avoid corrupting the stream on fork crash by dupping /dev/null over + # stderr. Instead, handle_child_crash() uses /dev/tty to log errors. + devnull = os.open('/dev/null', os.O_WRONLY) + if devnull != 2: + os.dup2(devnull, 2) + os.close(devnull) + + # If we're unlucky, childfp.fileno() may coincidentally be one of our + # desired FDs. In that case closing it breaks ExternalContext.main(). + if childfp.fileno() not in (0, 1, 100): + childfp.close() + + mitogen.core.IOLOG.setLevel(logging.INFO) + + try: + try: + mitogen.core.ExternalContext(self.get_econtext_config()).main() + except Exception: + # TODO: report exception somehow. + os._exit(72) + finally: + # Don't trigger atexit handlers, they were copied from the parent. + os._exit(0) diff --git a/mitogen-0.3.9/mitogen/jail.py b/mitogen-0.3.9/mitogen/jail.py new file mode 100644 index 0000000..4da7eb0 --- /dev/null +++ b/mitogen-0.3.9/mitogen/jail.py @@ -0,0 +1,65 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import mitogen.core +import mitogen.parent + + +class Options(mitogen.parent.Options): + container = None + username = None + jexec_path = u'/usr/sbin/jexec' + + def __init__(self, container, jexec_path=None, username=None, **kwargs): + super(Options, self).__init__(**kwargs) + self.container = mitogen.core.to_text(container) + if username: + self.username = mitogen.core.to_text(username) + if jexec_path: + self.jexec_path = jexec_path + + +class Connection(mitogen.parent.Connection): + options_class = Options + + child_is_immediate_subprocess = False + create_child_args = { + 'merge_stdio': True + } + + def _get_name(self): + return u'jail.' + self.options.container + + def get_boot_command(self): + bits = [self.options.jexec_path] + if self.options.username: + bits += ['-U', self.options.username] + bits += [self.options.container] + return bits + super(Connection, self).get_boot_command() diff --git a/mitogen-0.3.9/mitogen/kubectl.py b/mitogen-0.3.9/mitogen/kubectl.py new file mode 100644 index 0000000..5d3994a --- /dev/null +++ b/mitogen-0.3.9/mitogen/kubectl.py @@ -0,0 +1,66 @@ +# Copyright 2018, Yannig Perre +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import mitogen.parent + + +class Options(mitogen.parent.Options): + pod = None + kubectl_path = 'kubectl' + kubectl_args = None + + def __init__(self, pod, kubectl_path=None, kubectl_args=None, **kwargs): + super(Options, self).__init__(**kwargs) + assert pod + self.pod = pod + if kubectl_path: + self.kubectl_path = kubectl_path + self.kubectl_args = kubectl_args or [] + + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = True + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } + + def _get_name(self): + return u'kubectl.%s%s' % (self.options.pod, self.options.kubectl_args) + + def get_boot_command(self): + bits = [ + self.options.kubectl_path + ] + self.options.kubectl_args + [ + 'exec', '-it', self.options.pod + ] + return bits + ["--"] + super(Connection, self).get_boot_command() diff --git a/mitogen-0.3.9/mitogen/lxc.py b/mitogen-0.3.9/mitogen/lxc.py new file mode 100644 index 0000000..21dfef5 --- /dev/null +++ b/mitogen-0.3.9/mitogen/lxc.py @@ -0,0 +1,73 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import mitogen.parent + + +class Options(mitogen.parent.Options): + container = None + lxc_attach_path = 'lxc-attach' + + def __init__(self, container, lxc_attach_path=None, **kwargs): + super(Options, self).__init__(**kwargs) + self.container = container + if lxc_attach_path: + self.lxc_attach_path = lxc_attach_path + + +class Connection(mitogen.parent.Connection): + options_class = Options + + child_is_immediate_subprocess = False + create_child_args = { + # If lxc-attach finds any of stdin, stdout, stderr connected to a TTY, + # to prevent input injection it creates a proxy pty, forcing all IO to + # be buffered in <4KiB chunks. So ensure stderr is also routed to the + # socketpair. + 'merge_stdio': True + } + + eof_error_hint = ( + 'Note: many versions of LXC do not report program execution failure ' + 'meaningfully. Please check the host logs (/var/log) for more ' + 'information.' + ) + + def _get_name(self): + return u'lxc.' + self.options.container + + def get_boot_command(self): + bits = [ + self.options.lxc_attach_path, + '--clear-env', + '--name', self.options.container, + '--', + ] + return bits + super(Connection, self).get_boot_command() diff --git a/mitogen-0.3.9/mitogen/lxd.py b/mitogen-0.3.9/mitogen/lxd.py new file mode 100644 index 0000000..09034ab --- /dev/null +++ b/mitogen-0.3.9/mitogen/lxd.py @@ -0,0 +1,75 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import mitogen.parent + + +class Options(mitogen.parent.Options): + container = None + lxc_path = 'lxc' + python_path = 'python' + + def __init__(self, container, lxc_path=None, **kwargs): + super(Options, self).__init__(**kwargs) + self.container = container + if lxc_path: + self.lxc_path = lxc_path + + +class Connection(mitogen.parent.Connection): + options_class = Options + + child_is_immediate_subprocess = False + create_child_args = { + # If lxc finds any of stdin, stdout, stderr connected to a TTY, to + # prevent input injection it creates a proxy pty, forcing all IO to be + # buffered in <4KiB chunks. So ensure stderr is also routed to the + # socketpair. + 'merge_stdio': True + } + + eof_error_hint = ( + 'Note: many versions of LXC do not report program execution failure ' + 'meaningfully. Please check the host logs (/var/log) for more ' + 'information.' + ) + + def _get_name(self): + return u'lxd.' + self.options.container + + def get_boot_command(self): + bits = [ + self.options.lxc_path, + 'exec', + '--mode=noninteractive', + self.options.container, + '--', + ] + return bits + super(Connection, self).get_boot_command() diff --git a/mitogen-0.3.9/mitogen/master.py b/mitogen-0.3.9/mitogen/master.py new file mode 100644 index 0000000..b1e0a1d --- /dev/null +++ b/mitogen-0.3.9/mitogen/master.py @@ -0,0 +1,1572 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +This module implements functionality required by master processes, such as +starting new contexts via SSH. Its size is also restricted, since it must +be sent to any context that will be used to establish additional child +contexts. +""" + +import dis +import errno +import inspect +import itertools +import logging +import os +import pkgutil +import re +import string +import sys +import threading +import types +import zlib + +try: + # Python >= 3.4, PEP 451 ModuleSpec API + import importlib.machinery + import importlib.util + from _imp import is_builtin as _is_builtin +except ImportError: + # Python < 3.4, PEP 302 Import Hooks + import imp + from imp import is_builtin as _is_builtin + +try: + import sysconfig +except ImportError: + sysconfig = None + +if not hasattr(pkgutil, 'find_loader'): + # find_loader() was new in >=2.5, but the modern pkgutil.py syntax has + # been kept intentionally 2.3 compatible so we can reuse it. + from mitogen.compat import pkgutil + +import mitogen +import mitogen.core +import mitogen.minify +import mitogen.parent + +from mitogen.core import b +from mitogen.core import IOLOG +from mitogen.core import LOG +from mitogen.core import str_partition +from mitogen.core import str_rpartition +from mitogen.core import to_text + +imap = getattr(itertools, 'imap', map) +izip = getattr(itertools, 'izip', zip) + +try: + any +except NameError: + from mitogen.core import any + +try: + next +except NameError: + from mitogen.core import next + + +RLOG = logging.getLogger('mitogen.ctx') + + +# there are some cases where modules are loaded in memory only, such as +# ansible collections, and the module "filename" doesn't actually exist +SPECIAL_FILE_PATHS = { + "__synthetic__", + "" +} + + +def _stdlib_paths(): + """ + Return a set of paths from which Python imports the standard library. + """ + attr_candidates = [ + 'prefix', + 'real_prefix', # virtualenv: only set inside a virtual environment. + 'base_prefix', # venv: always set, equal to prefix if outside. + ] + prefixes = (getattr(sys, a, None) for a in attr_candidates) + version = 'python%s.%s' % sys.version_info[0:2] + s = set(os.path.realpath(os.path.join(p, 'lib', version)) + for p in prefixes if p is not None) + + # When running 'unit2 tests/module_finder_test.py' in a Py2 venv on Ubuntu + # 18.10, above is insufficient to catch the real directory. + if sysconfig is not None: + s.add(sysconfig.get_config_var('DESTLIB')) + return s + + +def is_stdlib_name(modname): + """ + Return :data:`True` if `modname` appears to come from the standard library. + """ + # `(_imp|imp).is_builtin()` isn't a documented part of Python's stdlib. + # Returns 1 if modname names a module that is "builtin" to the the Python + # interpreter (e.g. '_sre'). Otherwise 0 (e.g. 're', 'netifaces'). + # + # """ + # Main is a little special - imp.is_builtin("__main__") will return False, + # but BuiltinImporter is still the most appropriate initial setting for + # its __loader__ attribute. + # """ -- comment in CPython pylifecycle.c:add_main_module() + if _is_builtin(modname) != 0: + return True + + module = sys.modules.get(modname) + if module is None: + return False + + # six installs crap with no __file__ + modpath = os.path.abspath(getattr(module, '__file__', '')) + return is_stdlib_path(modpath) + + +_STDLIB_PATHS = _stdlib_paths() + + +def is_stdlib_path(path): + return any( + os.path.commonprefix((libpath, path)) == libpath + and 'site-packages' not in path + and 'dist-packages' not in path + for libpath in _STDLIB_PATHS + ) + + +def get_child_modules(path, fullname): + """ + Return the suffixes of submodules directly neated beneath of the package + directory at `path`. + + :param str path: + Path to the module's source code on disk, or some PEP-302-recognized + equivalent. Usually this is the module's ``__file__`` attribute, but + is specified explicitly to avoid loading the module. + :param str fullname: + Name of the package we're trying to get child modules for + + :return: + List of submodule name suffixes. + """ + mod_path = os.path.dirname(path) + if mod_path != '': + return [to_text(name) for _, name, _ in pkgutil.iter_modules([mod_path])] + else: + # we loaded some weird package in memory, so we'll see if it has a custom loader we can use + loader = pkgutil.find_loader(fullname) + return [to_text(name) for name, _ in loader.iter_modules(None)] if loader else [] + + +def _looks_like_script(path): + """ + Return :data:`True` if the (possibly extensionless) file at `path` + resembles a Python script. For now we simply verify the file contains + ASCII text. + """ + try: + fp = open(path, 'rb') + except IOError: + e = sys.exc_info()[1] + if e.args[0] == errno.EISDIR: + return False + raise + + try: + sample = fp.read(512).decode('latin-1') + return not set(sample).difference(string.printable) + finally: + fp.close() + + +def _py_filename(path): + """ + Returns a tuple of a Python path (if the file looks Pythonic) and whether or not + the Python path is special. Special file paths/modules might only exist in memory + """ + if not path: + return None, False + + if path[-4:] in ('.pyc', '.pyo'): + path = path.rstrip('co') + + if path.endswith('.py'): + return path, False + + if os.path.exists(path) and _looks_like_script(path): + return path, False + + basepath = os.path.basename(path) + if basepath in SPECIAL_FILE_PATHS: + return path, True + + # return None, False means that the filename passed to _py_filename does not appear + # to be python, and code later will handle when this function returns None + # see https://github.com/dw/mitogen/pull/715#discussion_r532380528 for how this + # decision was made to handle non-python files in this manner + return None, False + + +def _get_core_source(): + """ + Master version of parent.get_core_source(). + """ + source = inspect.getsource(mitogen.core) + return mitogen.minify.minimize_source(source) + + +if mitogen.is_master: + # TODO: find a less surprising way of installing this. + mitogen.parent._get_core_source = _get_core_source + + +LOAD_CONST = dis.opname.index('LOAD_CONST') +IMPORT_NAME = dis.opname.index('IMPORT_NAME') + + +def _getarg(nextb, c): + if c >= dis.HAVE_ARGUMENT: + return nextb() | (nextb() << 8) + + +if sys.version_info < (3, 0): + def iter_opcodes(co): + # Yield `(op, oparg)` tuples from the code object `co`. + ordit = imap(ord, co.co_code) + nextb = ordit.next + return ((c, _getarg(nextb, c)) for c in ordit) +elif sys.version_info < (3, 6): + def iter_opcodes(co): + # Yield `(op, oparg)` tuples from the code object `co`. + ordit = iter(co.co_code) + nextb = ordit.__next__ + return ((c, _getarg(nextb, c)) for c in ordit) +else: + def iter_opcodes(co): + # Yield `(op, oparg)` tuples from the code object `co`. + ordit = iter(co.co_code) + nextb = ordit.__next__ + # https://github.com/abarnert/cpython/blob/c095a32f/Python/wordcode.md + return ((c, nextb()) for c in ordit) + + +def scan_code_imports(co): + """ + Given a code object `co`, scan its bytecode yielding any ``IMPORT_NAME`` + and associated prior ``LOAD_CONST`` instructions representing an `Import` + statement or `ImportFrom` statement. + + :return: + Generator producing `(level, modname, namelist)` tuples, where: + + * `level`: -1 for normal import, 0, for absolute import, and >0 for + relative import. + * `modname`: Name of module to import, or from where `namelist` names + are imported. + * `namelist`: for `ImportFrom`, the list of names to be imported from + `modname`. + """ + opit = iter_opcodes(co) + opit, opit2, opit3 = itertools.tee(opit, 3) + + try: + next(opit2) + next(opit3) + next(opit3) + except StopIteration: + return + + if sys.version_info >= (2, 5): + for oparg1, oparg2, (op3, arg3) in izip(opit, opit2, opit3): + if op3 == IMPORT_NAME: + op2, arg2 = oparg2 + op1, arg1 = oparg1 + if op1 == op2 == LOAD_CONST: + yield (co.co_consts[arg1], + co.co_names[arg3], + co.co_consts[arg2] or ()) + else: + # Python 2.4 did not yet have 'level', so stack format differs. + for oparg1, (op2, arg2) in izip(opit, opit2): + if op2 == IMPORT_NAME: + op1, arg1 = oparg1 + if op1 == LOAD_CONST: + yield (-1, co.co_names[arg2], co.co_consts[arg1] or ()) + + +class ThreadWatcher(object): + """ + Manage threads that wait for another thread to shut down, before invoking + `on_join()` for each associated ThreadWatcher. + + In CPython it seems possible to use this method to ensure a non-main thread + is signalled when the main thread has exited, using a third thread as a + proxy. + """ + #: Protects remaining _cls_* members. + _cls_lock = threading.Lock() + + #: PID of the process that last modified the class data. If the PID + #: changes, it means the thread watch dict refers to threads that no longer + #: exist in the current process (since it forked), and so must be reset. + _cls_pid = None + + #: Map watched Thread -> list of ThreadWatcher instances. + _cls_instances_by_target = {} + + #: Map watched Thread -> watcher Thread for each watched thread. + _cls_thread_by_target = {} + + @classmethod + def _reset(cls): + """ + If we have forked since the watch dictionaries were initialized, all + that has is garbage, so clear it. + """ + if os.getpid() != cls._cls_pid: + cls._cls_pid = os.getpid() + cls._cls_instances_by_target.clear() + cls._cls_thread_by_target.clear() + + def __init__(self, target, on_join): + self.target = target + self.on_join = on_join + + @classmethod + def _watch(cls, target): + target.join() + for watcher in cls._cls_instances_by_target[target]: + watcher.on_join() + + def install(self): + self._cls_lock.acquire() + try: + self._reset() + lst = self._cls_instances_by_target.setdefault(self.target, []) + lst.append(self) + if self.target not in self._cls_thread_by_target: + self._cls_thread_by_target[self.target] = threading.Thread( + name='mitogen.master.join_thread_async', + target=self._watch, + args=(self.target,) + ) + self._cls_thread_by_target[self.target].start() + finally: + self._cls_lock.release() + + def remove(self): + self._cls_lock.acquire() + try: + self._reset() + lst = self._cls_instances_by_target.get(self.target, []) + if self in lst: + lst.remove(self) + finally: + self._cls_lock.release() + + @classmethod + def watch(cls, target, on_join): + watcher = cls(target, on_join) + watcher.install() + return watcher + + +class LogForwarder(object): + """ + Install a :data:`mitogen.core.FORWARD_LOG` handler that delivers forwarded + log events into the local logging framework. This is used by the master's + :class:`Router`. + + The forwarded :class:`logging.LogRecord` objects are delivered to loggers + under ``mitogen.ctx.*`` corresponding to their + :attr:`mitogen.core.Context.name`, with the message prefixed with the + logger name used in the child. The records include some extra attributes: + + * ``mitogen_message``: Unicode original message without the logger name + prepended. + * ``mitogen_context``: :class:`mitogen.parent.Context` reference to the + source context. + * ``mitogen_name``: Original logger name. + + :param mitogen.master.Router router: + Router to install the handler on. + """ + def __init__(self, router): + self._router = router + self._cache = {} + router.add_handler( + fn=self._on_forward_log, + handle=mitogen.core.FORWARD_LOG, + ) + + def _on_forward_log(self, msg): + if msg.is_dead: + return + + context = self._router.context_by_id(msg.src_id) + if context is None: + LOG.error('%s: dropping log from unknown context %d', + self, msg.src_id) + return + + name, level_s, s = msg.data.decode('utf-8', 'replace').split('\x00', 2) + + logger_name = '%s.[%s]' % (name, context.name) + logger = self._cache.get(logger_name) + if logger is None: + self._cache[logger_name] = logger = logging.getLogger(logger_name) + + # See logging.Handler.makeRecord() + record = logging.LogRecord( + name=logger.name, + level=int(level_s), + pathname='(unknown file)', + lineno=0, + msg=s, + args=(), + exc_info=None, + ) + record.mitogen_message = s + record.mitogen_context = self._router.context_by_id(msg.src_id) + record.mitogen_name = name + logger.handle(record) + + def __repr__(self): + return 'LogForwarder(%r)' % (self._router,) + + +class FinderMethod(object): + """ + Interface to a method for locating a Python module or package given its + name according to the running Python interpreter. You'd think this was a + simple task, right? Naive young fellow, welcome to the real world. + """ + def __init__(self): + self.log = LOG.getChild(self.__class__.__name__) + + def __repr__(self): + return '%s()' % (type(self).__name__,) + + def find(self, fullname): + """ + Accept a canonical module name as would be found in :data:`sys.modules` + and return a `(path, source, is_pkg)` tuple, where: + + * `path`: Unicode string containing path to source file. + * `source`: Bytestring containing source file's content. + * `is_pkg`: :data:`True` if `fullname` is a package. + + :returns: + :data:`None` if not found, or tuple as described above. + """ + raise NotImplementedError() + + +class DefectivePython3xMainMethod(FinderMethod): + """ + Recent versions of Python 3.x introduced an incomplete notion of + importer specs, and in doing so created permanent asymmetry in the + :mod:`pkgutil` interface handling for the :mod:`__main__` module. Therefore + we must handle :mod:`__main__` specially. + """ + def find(self, fullname): + """ + Find :mod:`__main__` using its :data:`__file__` attribute. + """ + if fullname != '__main__': + return None + + mod = sys.modules.get(fullname) + if not mod: + return None + + path = getattr(mod, '__file__', None) + if not (path is not None and os.path.exists(path) and _looks_like_script(path)): + return None + + fp = open(path, 'rb') + try: + source = fp.read() + finally: + fp.close() + + return path, source, False + + +class PkgutilMethod(FinderMethod): + """ + Attempt to fetch source code via pkgutil. In an ideal world, this would + be the only required implementation of get_module(). + """ + def find(self, fullname): + """ + Find `fullname` using :func:`pkgutil.find_loader`. + """ + try: + # If fullname refers to a submodule that's not already imported + # then the containing package is imported. + # Pre-'import spec' this returned None, in Python3.6 it raises + # ImportError. + loader = pkgutil.find_loader(fullname) + except ImportError: + e = sys.exc_info()[1] + LOG.debug('%r: find_loader(%r) failed: %s', self, fullname, e) + return None + + if not loader: + LOG.debug('%r: find_loader(%r) returned %r, aborting', + self, fullname, loader) + return + + try: + path = loader.get_filename(fullname) + except (AttributeError, ImportError, ValueError): + # - get_filename() may throw ImportError if pkgutil.find_loader() + # picks a "parent" package's loader for some crap that's been + # stuffed in sys.modules, for example in the case of urllib3: + # "loader for urllib3.contrib.pyopenssl cannot handle + # requests.packages.urllib3.contrib.pyopenssl" + e = sys.exc_info()[1] + LOG.debug('%r: %r.get_file_name(%r) failed: %r', self, loader, fullname, e) + return + + path, is_special = _py_filename(path) + + try: + source = loader.get_source(fullname) + except AttributeError: + # Per PEP-302, get_source() is optional, + e = sys.exc_info()[1] + LOG.debug('%r: %r.get_source() failed: %r', self, loader, fullname, e) + return + + try: + is_pkg = loader.is_package(fullname) + except AttributeError: + # Per PEP-302, is_package() is optional, + e = sys.exc_info()[1] + LOG.debug('%r: %r.is_package(%r) failed: %r', self, loader, fullname, e) + return + + # workaround for special python modules that might only exist in memory + if is_special and is_pkg and not source: + source = '\n' + + if path is None or source is None: + LOG.debug('%r: path=%r, source=%r, aborting', self, path, source) + return + + if isinstance(source, mitogen.core.UnicodeType): + # get_source() returns "string" according to PEP-302, which was + # reinterpreted for Python 3 to mean a Unicode string. + source = source.encode('utf-8') + + return path, source, is_pkg + + +class SysModulesMethod(FinderMethod): + """ + Attempt to fetch source code via :data:`sys.modules`. This was originally + specifically to support :mod:`__main__`, but it may catch a few more cases. + """ + def find(self, fullname): + """ + Find `fullname` using its :data:`__file__` attribute. + """ + try: + module = sys.modules[fullname] + except KeyError: + LOG.debug('%r: sys.modules[%r] absent, aborting', self, fullname) + return + + if not isinstance(module, types.ModuleType): + LOG.debug('%r: sys.modules[%r] is %r, aborting', + self, fullname, module) + return + + try: + resolved_name = module.__name__ + except AttributeError: + LOG.debug('%r: %r has no __name__, aborting', self, module) + return + + if resolved_name != fullname: + LOG.debug('%r: %r.__name__ is %r, aborting', + self, module, resolved_name) + return + + try: + path = module.__file__ + except AttributeError: + LOG.debug('%r: %r has no __file__, aborting', self, module) + return + + path, _ = _py_filename(path) + if not path: + LOG.debug('%r: %r.__file__ is %r, aborting', self, module, path) + return + + LOG.debug('%r: sys.modules[%r]: found %s', self, fullname, path) + is_pkg = hasattr(module, '__path__') + try: + source = inspect.getsource(module) + except IOError: + # Work around inspect.getsourcelines() bug for 0-byte __init__.py + # files. + if not is_pkg: + raise + source = '\n' + + if isinstance(source, mitogen.core.UnicodeType): + # get_source() returns "string" according to PEP-302, which was + # reinterpreted for Python 3 to mean a Unicode string. + source = source.encode('utf-8') + + return path, source, is_pkg + + +class ParentImpEnumerationMethod(FinderMethod): + """ + Attempt to fetch source code by examining the module's (hopefully less + insane) parent package, and if no insane parents exist, simply use + :mod:`sys.path` to search for it from scratch on the filesystem using the + normal Python lookup mechanism. + + This is required for older versions of :mod:`ansible.compat.six`, + :mod:`plumbum.colors`, Ansible 2.8 :mod:`ansible.module_utils.distro` and + its submodule :mod:`ansible.module_utils.distro._distro`. + + When some package dynamically replaces itself in :data:`sys.modules`, but + only conditionally according to some program logic, it is possible that + children may attempt to load modules and subpackages from it that can no + longer be resolved by examining a (corrupted) parent. + + For cases like :mod:`ansible.module_utils.distro`, this must handle cases + where a package transmuted itself into a totally unrelated module during + import and vice versa, where :data:`sys.modules` is replaced with junk that + makes it impossible to discover the loaded module using the in-memory + module object or any parent package's :data:`__path__`, since they have all + been overwritten. Some men just want to watch the world burn. + """ + + @staticmethod + def _iter_parents(fullname): + """ + >>> list(ParentEnumerationMethod._iter_parents('a')) + [('', 'a')] + >>> list(ParentEnumerationMethod._iter_parents('a.b.c')) + [('a.b', 'c'), ('a', 'b'), ('', 'a')] + """ + while fullname: + fullname, _, modname = str_rpartition(fullname, u'.') + yield fullname, modname + + def _find_sane_parent(self, fullname): + """ + Iteratively search :data:`sys.modules` for the least indirect parent of + `fullname` that's from the same package and has a :data:`__path__` + attribute. + + :return: + `(parent_name, path, modpath)` tuple, where: + + * `modname`: canonical name of the found package, or the empty + string if none is found. + * `search_path`: :data:`__path__` attribute of the least + indirect parent found, or :data:`None` if no indirect parent + was found. + * `modpath`: list of module name components leading from `path` + to the target module. + """ + modpath = [] + for pkgname, modname in self._iter_parents(fullname): + modpath.insert(0, modname) + if not pkgname: + return [], None, modpath + + try: + pkg = sys.modules[pkgname] + except KeyError: + LOG.debug('%r: sys.modules[%r] absent, skipping', self, pkgname) + continue + + try: + resolved_pkgname = pkg.__name__ + except AttributeError: + LOG.debug('%r: %r has no __name__, skipping', self, pkg) + continue + + if resolved_pkgname != pkgname: + LOG.debug('%r: %r.__name__ is %r, skipping', + self, pkg, resolved_pkgname) + continue + + try: + path = pkg.__path__ + except AttributeError: + LOG.debug('%r: %r has no __path__, skipping', self, pkg) + continue + + if not path: + LOG.debug('%r: %r.__path__ is %r, skipping', self, pkg, path) + continue + + return pkgname.split('.'), path, modpath + + def _found_package(self, fullname, path): + path = os.path.join(path, '__init__.py') + LOG.debug('%r: %r is PKG_DIRECTORY: %r', self, fullname, path) + return self._found_module( + fullname=fullname, + path=path, + fp=open(path, 'rb'), + is_pkg=True, + ) + + def _found_module(self, fullname, path, fp, is_pkg=False): + try: + path, _ = _py_filename(path) + if not path: + return + + source = fp.read() + finally: + if fp: + fp.close() + + if isinstance(source, mitogen.core.UnicodeType): + # get_source() returns "string" according to PEP-302, which was + # reinterpreted for Python 3 to mean a Unicode string. + source = source.encode('utf-8') + return path, source, is_pkg + + def _find_one_component(self, modname, search_path): + try: + #fp, path, (suffix, _, kind) = imp.find_module(modname, search_path) + # FIXME The imp module was removed in Python 3.12. + return imp.find_module(modname, search_path) + except ImportError: + e = sys.exc_info()[1] + LOG.debug('%r: imp.find_module(%r, %r) -> %s', + self, modname, [search_path], e) + return None + + def find(self, fullname): + """ + See implementation for a description of how this works. + """ + if sys.version_info >= (3, 4): + return None + + #if fullname not in sys.modules: + # Don't attempt this unless a module really exists in sys.modules, + # else we could return junk. + #return + + fullname = to_text(fullname) + modname, search_path, modpath = self._find_sane_parent(fullname) + while True: + tup = self._find_one_component(modpath.pop(0), search_path) + if tup is None: + return None + + fp, path, (suffix, _, kind) = tup + if modpath: + # Still more components to descent. Result must be a package + if fp: + fp.close() + if kind != imp.PKG_DIRECTORY: + LOG.debug('%r: %r appears to be child of non-package %r', + self, fullname, path) + return None + search_path = [path] + elif kind == imp.PKG_DIRECTORY: + return self._found_package(fullname, path) + else: + return self._found_module(fullname, path, fp) + + +class ParentSpecEnumerationMethod(ParentImpEnumerationMethod): + def _find_parent_spec(self, fullname): + #history = [] + debug = self.log.debug + children = [] + for parent_name, child_name in self._iter_parents(fullname): + children.insert(0, child_name) + if not parent_name: + debug('abandoning %r, reached top-level', fullname) + return None, children + + try: + parent = sys.modules[parent_name] + except KeyError: + debug('skipping %r, not in sys.modules', parent_name) + continue + + try: + spec = parent.__spec__ + except AttributeError: + debug('skipping %r: %r.__spec__ is absent', + parent_name, parent) + continue + + if not spec: + debug('skipping %r: %r.__spec__=%r', + parent_name, parent, spec) + continue + + if spec.name != parent_name: + debug('skipping %r: %r.__spec__.name=%r does not match', + parent_name, parent, spec.name) + continue + + if not spec.submodule_search_locations: + debug('skipping %r: %r.__spec__.submodule_search_locations=%r', + parent_name, parent, spec.submodule_search_locations) + continue + + return spec, children + + raise ValueError('%s._find_parent_spec(%r) unexpectedly reached bottom' + % (self.__class__.__name__, fullname)) + + def find(self, fullname): + # Returns absolute path, ParentImpEnumerationMethod returns relative + # >>> spec_pem.find('six_brokenpkg._six')[::2] + # ('/Users/alex/src/mitogen/tests/data/importer/six_brokenpkg/_six.py', False) + + if sys.version_info < (3, 4): + return None + + fullname = to_text(fullname) + spec, children = self._find_parent_spec(fullname) + for child_name in children: + if spec: + name = '%s.%s' % (spec.name, child_name) + submodule_search_locations = spec.submodule_search_locations + else: + name = child_name + submodule_search_locations = None + spec = importlib.util._find_spec(name, submodule_search_locations) + if spec is None: + self.log.debug('%r spec unavailable from %s', fullname, spec) + return None + + is_package = spec.submodule_search_locations is not None + if name != fullname: + if not is_package: + self.log.debug('%r appears to be child of non-package %r', + fullname, spec) + return None + continue + + if not spec.has_location: + self.log.debug('%r.origin cannot be read as a file', spec) + return None + + if os.path.splitext(spec.origin)[1] != '.py': + self.log.debug('%r.origin does not contain Python source code', + spec) + return None + + # FIXME This should use loader.get_source() + with open(spec.origin, 'rb') as f: + source = f.read() + + return spec.origin, source, is_package + + raise ValueError('%s.find(%r) unexpectedly reached bottom' + % (self.__class__.__name__, fullname)) + + +class ModuleFinder(object): + """ + Given the name of a loaded module, make a best-effort attempt at finding + related modules likely needed by a child context requesting the original + module. + """ + def __init__(self): + #: Import machinery is expensive, keep :py:meth`:get_module_source` + #: results around. + self._found_cache = {} + + #: Avoid repeated dependency scanning, which is expensive. + self._related_cache = {} + + def __repr__(self): + return 'ModuleFinder()' + + def add_source_override(self, fullname, path, source, is_pkg): + """ + Explicitly install a source cache entry, preventing usual lookup + methods from being used. + + Beware the value of `path` is critical when `is_pkg` is specified, + since it directs where submodules are searched for. + + :param str fullname: + Name of the module to override. + :param str path: + Module's path as it will appear in the cache. + :param bytes source: + Module source code as a bytestring. + :param bool is_pkg: + :data:`True` if the module is a package. + """ + self._found_cache[fullname] = (path, source, is_pkg) + + get_module_methods = [ + DefectivePython3xMainMethod(), + PkgutilMethod(), + SysModulesMethod(), + ParentSpecEnumerationMethod(), + ParentImpEnumerationMethod(), + ] + + def get_module_source(self, fullname): + """ + Given the name of a loaded module `fullname`, attempt to find its + source code. + + :returns: + Tuple of `(module path, source text, is package?)`, or :data:`None` + if the source cannot be found. + """ + tup = self._found_cache.get(fullname) + if tup: + return tup + + for method in self.get_module_methods: + tup = method.find(fullname) + if tup: + #LOG.debug('%r returned %r', method, tup) + break + else: + tup = None, None, None + LOG.debug('get_module_source(%r): cannot find source', fullname) + + self._found_cache[fullname] = tup + return tup + + def resolve_relpath(self, fullname, level): + """ + Given an ImportFrom AST node, guess the prefix that should be tacked on + to an alias name to produce a canonical name. `fullname` is the name of + the module in which the ImportFrom appears. + """ + mod = sys.modules.get(fullname, None) + if hasattr(mod, '__path__'): + fullname += '.__init__' + + if level == 0 or not fullname: + return '' + + bits = fullname.split('.') + if len(bits) <= level: + # This would be an ImportError in real code. + return '' + + return '.'.join(bits[:-level]) + '.' + + def generate_parent_names(self, fullname): + while '.' in fullname: + fullname, _, _ = str_rpartition(to_text(fullname), u'.') + yield fullname + + def find_related_imports(self, fullname): + """ + Return a list of non-stdlib modules that are directly imported by + `fullname`, plus their parents. + + The list is determined by retrieving the source code of + `fullname`, compiling it, and examining all IMPORT_NAME ops. + + :param fullname: Fully qualified name of an *already imported* module + for which source code can be retrieved + :type fullname: str + """ + related = self._related_cache.get(fullname) + if related is not None: + return related + + modpath, src, _ = self.get_module_source(fullname) + if src is None: + return [] + + maybe_names = list(self.generate_parent_names(fullname)) + + co = compile(src, modpath, 'exec') + for level, modname, namelist in scan_code_imports(co): + if level == -1: + modnames = [modname, '%s.%s' % (fullname, modname)] + else: + modnames = [ + '%s%s' % (self.resolve_relpath(fullname, level), modname) + ] + + maybe_names.extend(modnames) + maybe_names.extend( + '%s.%s' % (mname, name) + for mname in modnames + for name in namelist + ) + + return self._related_cache.setdefault(fullname, sorted( + set( + mitogen.core.to_text(name) + for name in maybe_names + if sys.modules.get(name) is not None + and not is_stdlib_name(name) + and u'six.moves' not in name # TODO: crap + ) + )) + + def find_related(self, fullname): + """ + Return a list of non-stdlib modules that are imported directly or + indirectly by `fullname`, plus their parents. + + This method is like :py:meth:`find_related_imports`, but also + recursively searches any modules which are imported by `fullname`. + + :param fullname: Fully qualified name of an *already imported* module + for which source code can be retrieved + :type fullname: str + """ + stack = [fullname] + found = set() + + while stack: + name = stack.pop(0) + names = self.find_related_imports(name) + stack.extend(set(names).difference(set(found).union(stack))) + found.update(names) + + found.discard(fullname) + return sorted(found) + + +class ModuleResponder(object): + def __init__(self, router): + self._log = logging.getLogger('mitogen.responder') + self._router = router + self._finder = ModuleFinder() + self._cache = {} # fullname -> pickled + self.blacklist = [] + self.whitelist = [''] + + #: Context -> set([fullname, ..]) + self._forwarded_by_context = {} + + #: Number of GET_MODULE messages received. + self.get_module_count = 0 + #: Total time spent in uncached GET_MODULE. + self.get_module_secs = 0.0 + #: Total time spent minifying modules. + self.minify_secs = 0.0 + #: Number of successful LOAD_MODULE messages sent. + self.good_load_module_count = 0 + #: Total bytes in successful LOAD_MODULE payloads. + self.good_load_module_size = 0 + #: Number of negative LOAD_MODULE messages sent. + self.bad_load_module_count = 0 + + router.add_handler( + fn=self._on_get_module, + handle=mitogen.core.GET_MODULE, + ) + + def __repr__(self): + return 'ModuleResponder' + + def add_source_override(self, fullname, path, source, is_pkg): + """ + See :meth:`ModuleFinder.add_source_override`. + """ + self._finder.add_source_override(fullname, path, source, is_pkg) + + MAIN_RE = re.compile(b(r'^if\s+__name__\s*==\s*.__main__.\s*:'), re.M) + main_guard_msg = ( + "A child context attempted to import __main__, however the main " + "module present in the master process lacks an execution guard. " + "Update %r to prevent unintended execution, using a guard like:\n" + "\n" + " if __name__ == '__main__':\n" + " # your code here.\n" + ) + + def whitelist_prefix(self, fullname): + if self.whitelist == ['']: + self.whitelist = ['mitogen'] + self.whitelist.append(fullname) + + def blacklist_prefix(self, fullname): + self.blacklist.append(fullname) + + def neutralize_main(self, path, src): + """ + Given the source for the __main__ module, try to find where it begins + conditional execution based on a "if __name__ == '__main__'" guard, and + remove any code after that point. + """ + match = self.MAIN_RE.search(src) + if match: + return src[:match.start()] + + if b('mitogen.main(') in src: + return src + + self._log.error(self.main_guard_msg, path) + raise ImportError('refused') + + def _make_negative_response(self, fullname): + return (fullname, None, None, None, ()) + + minify_safe_re = re.compile(b(r'\s+#\s*!mitogen:\s*minify_safe')) + + def _build_tuple(self, fullname): + if fullname in self._cache: + return self._cache[fullname] + + if mitogen.core.is_blacklisted_import(self, fullname): + raise ImportError('blacklisted') + + path, source, is_pkg = self._finder.get_module_source(fullname) + if path and is_stdlib_path(path): + # Prevent loading of 2.x<->3.x stdlib modules! This costs one + # RTT per hit, so a client-side solution is also required. + self._log.debug('refusing to serve stdlib module %r', fullname) + tup = self._make_negative_response(fullname) + self._cache[fullname] = tup + return tup + + if source is None: + # TODO: make this .warning() or similar again once importer has its + # own logging category. + self._log.debug('could not find source for %r', fullname) + tup = self._make_negative_response(fullname) + self._cache[fullname] = tup + return tup + + if self.minify_safe_re.search(source): + # If the module contains a magic marker, it's safe to minify. + t0 = mitogen.core.now() + source = mitogen.minify.minimize_source(source).encode('utf-8') + self.minify_secs += mitogen.core.now() - t0 + + if is_pkg: + pkg_present = get_child_modules(path, fullname) + self._log.debug('%s is a package at %s with submodules %r', + fullname, path, pkg_present) + else: + pkg_present = None + + if fullname == '__main__': + source = self.neutralize_main(path, source) + compressed = mitogen.core.Blob(zlib.compress(source, 9)) + related = [ + to_text(name) + for name in self._finder.find_related(fullname) + if not mitogen.core.is_blacklisted_import(self, name) + ] + # 0:fullname 1:pkg_present 2:path 3:compressed 4:related + tup = ( + to_text(fullname), + pkg_present, + to_text(path), + compressed, + related + ) + self._cache[fullname] = tup + return tup + + def _send_load_module(self, stream, fullname): + if fullname not in stream.protocol.sent_modules: + tup = self._build_tuple(fullname) + msg = mitogen.core.Message.pickled( + tup, + dst_id=stream.protocol.remote_id, + handle=mitogen.core.LOAD_MODULE, + ) + self._log.debug('sending %s (%.2f KiB) to %s', + fullname, len(msg.data) / 1024.0, stream.name) + self._router._async_route(msg) + stream.protocol.sent_modules.add(fullname) + if tup[2] is not None: + self.good_load_module_count += 1 + self.good_load_module_size += len(msg.data) + else: + self.bad_load_module_count += 1 + + def _send_module_load_failed(self, stream, fullname): + self.bad_load_module_count += 1 + stream.protocol.send( + mitogen.core.Message.pickled( + self._make_negative_response(fullname), + dst_id=stream.protocol.remote_id, + handle=mitogen.core.LOAD_MODULE, + ) + ) + + def _send_module_and_related(self, stream, fullname): + if fullname in stream.protocol.sent_modules: + return + + try: + tup = self._build_tuple(fullname) + for name in tup[4]: # related + parent, _, _ = str_partition(name, '.') + if parent != fullname and parent not in stream.protocol.sent_modules: + # Parent hasn't been sent, so don't load submodule yet. + continue + + self._send_load_module(stream, name) + self._send_load_module(stream, fullname) + except Exception: + LOG.debug('While importing %r', fullname, exc_info=True) + self._send_module_load_failed(stream, fullname) + + def _on_get_module(self, msg): + if msg.is_dead: + return + + stream = self._router.stream_by_id(msg.src_id) + if stream is None: + return + + fullname = msg.data.decode() + self._log.debug('%s requested module %s', stream.name, fullname) + self.get_module_count += 1 + if fullname in stream.protocol.sent_modules: + LOG.warning('_on_get_module(): dup request for %r from %r', + fullname, stream) + + t0 = mitogen.core.now() + try: + self._send_module_and_related(stream, fullname) + finally: + self.get_module_secs += mitogen.core.now() - t0 + + def _send_forward_module(self, stream, context, fullname): + if stream.protocol.remote_id != context.context_id: + stream.protocol._send( + mitogen.core.Message( + data=b('%s\x00%s' % (context.context_id, fullname)), + handle=mitogen.core.FORWARD_MODULE, + dst_id=stream.protocol.remote_id, + ) + ) + + def _forward_one_module(self, context, fullname): + forwarded = self._forwarded_by_context.get(context) + if forwarded is None: + forwarded = set() + self._forwarded_by_context[context] = forwarded + + if fullname in forwarded: + return + + path = [] + while fullname: + path.append(fullname) + fullname, _, _ = str_rpartition(fullname, u'.') + + stream = self._router.stream_by_id(context.context_id) + if stream is None: + LOG.debug('%r: dropping forward of %s to no longer existent ' + '%r', self, path[0], context) + return + + for fullname in reversed(path): + self._send_module_and_related(stream, fullname) + self._send_forward_module(stream, context, fullname) + + def _forward_modules(self, context, fullnames): + IOLOG.debug('%r._forward_modules(%r, %r)', self, context, fullnames) + for fullname in fullnames: + self._forward_one_module(context, mitogen.core.to_text(fullname)) + + def forward_modules(self, context, fullnames): + self._router.broker.defer(self._forward_modules, context, fullnames) + + +class Broker(mitogen.core.Broker): + """ + .. note:: + + You may construct as many brokers as desired, and use the same broker + for multiple routers, however usually only one broker need exist. + Multiple brokers may be useful when dealing with sets of children with + differing lifetimes. For example, a subscription service where + non-payment results in termination for one customer. + + :param bool install_watcher: + If :data:`True`, an additional thread is started to monitor the + lifetime of the main thread, triggering :meth:`shutdown` + automatically in case the user forgets to call it, or their code + crashed. + + You should not rely on this functionality in your program, it is only + intended as a fail-safe and to simplify the API for new users. In + particular, alternative Python implementations may not be able to + support watching the main thread. + """ + shutdown_timeout = 5.0 + _watcher = None + poller_class = mitogen.parent.PREFERRED_POLLER + + def __init__(self, install_watcher=True): + if install_watcher: + self._watcher = ThreadWatcher.watch( + target=mitogen.core.threading__current_thread(), + on_join=self.shutdown, + ) + super(Broker, self).__init__() + self.timers = mitogen.parent.TimerList() + + def shutdown(self): + super(Broker, self).shutdown() + if self._watcher: + self._watcher.remove() + + +class Router(mitogen.parent.Router): + """ + Extend :class:`mitogen.core.Router` with functionality useful to masters, + and child contexts who later become masters. Currently when this class is + required, the target context's router is upgraded at runtime. + + .. note:: + + You may construct as many routers as desired, and use the same broker + for multiple routers, however usually only one broker and router need + exist. Multiple routers may be useful when dealing with separate trust + domains, for example, manipulating infrastructure belonging to separate + customers or projects. + + :param mitogen.master.Broker broker: + Broker to use. If not specified, a private :class:`Broker` is created. + + :param int max_message_size: + Override the maximum message size this router is willing to receive or + transmit. Any value set here is automatically inherited by any children + created by the router. + + This has a liberal default of 128 MiB, but may be set much lower. + Beware that setting it below 64KiB may encourage unexpected failures as + parents and children can no longer route large Python modules that may + be required by your application. + """ + + broker_class = Broker + + #: When :data:`True`, cause the broker thread and any subsequent broker and + #: main threads existing in any child to write + #: ``/tmp/mitogen.stats...log`` containing a + #: :mod:`cProfile` dump on graceful exit. Must be set prior to construction + #: of any :class:`Broker`, e.g. via:: + #: + #: mitogen.master.Router.profiling = True + profiling = os.environ.get('MITOGEN_PROFILING') is not None + + def __init__(self, broker=None, max_message_size=None): + if broker is None: + broker = self.broker_class() + if max_message_size: + self.max_message_size = max_message_size + super(Router, self).__init__(broker) + self.upgrade() + + def upgrade(self): + self.id_allocator = IdAllocator(self) + self.responder = ModuleResponder(self) + self.log_forwarder = LogForwarder(self) + self.route_monitor = mitogen.parent.RouteMonitor(router=self) + self.add_handler( # TODO: cutpaste. + fn=self._on_detaching, + handle=mitogen.core.DETACHING, + persist=True, + ) + + def _on_broker_exit(self): + super(Router, self)._on_broker_exit() + dct = self.get_stats() + dct['self'] = self + dct['minify_ms'] = 1000 * dct['minify_secs'] + dct['get_module_ms'] = 1000 * dct['get_module_secs'] + dct['good_load_module_size_kb'] = dct['good_load_module_size'] / 1024.0 + dct['good_load_module_size_avg'] = ( + ( + dct['good_load_module_size'] / + (float(dct['good_load_module_count']) or 1.0) + ) / 1024.0 + ) + + LOG.debug( + '%(self)r: stats: ' + '%(get_module_count)d module requests in ' + '%(get_module_ms)d ms, ' + '%(good_load_module_count)d sent ' + '(%(minify_ms)d ms minify time), ' + '%(bad_load_module_count)d negative responses. ' + 'Sent %(good_load_module_size_kb).01f kb total, ' + '%(good_load_module_size_avg).01f kb avg.' + % dct + ) + + def get_stats(self): + """ + Return performance data for the module responder. + + :returns: + + Dict containing keys: + + * `get_module_count`: Integer count of + :data:`mitogen.core.GET_MODULE` messages received. + * `get_module_secs`: Floating point total seconds spent servicing + :data:`mitogen.core.GET_MODULE` requests. + * `good_load_module_count`: Integer count of successful + :data:`mitogen.core.LOAD_MODULE` messages sent. + * `good_load_module_size`: Integer total bytes sent in + :data:`mitogen.core.LOAD_MODULE` message payloads. + * `bad_load_module_count`: Integer count of negative + :data:`mitogen.core.LOAD_MODULE` messages sent. + * `minify_secs`: CPU seconds spent minifying modules marked + minify-safe. + """ + return { + 'get_module_count': self.responder.get_module_count, + 'get_module_secs': self.responder.get_module_secs, + 'good_load_module_count': self.responder.good_load_module_count, + 'good_load_module_size': self.responder.good_load_module_size, + 'bad_load_module_count': self.responder.bad_load_module_count, + 'minify_secs': self.responder.minify_secs, + } + + def enable_debug(self): + """ + Cause this context and any descendant child contexts to write debug + logs to ``/tmp/mitogen..log``. + """ + mitogen.core.enable_debug_logging() + self.debug = True + + def __enter__(self): + return self + + def __exit__(self, e_type, e_val, tb): + self.broker.shutdown() + self.broker.join() + + def disconnect_stream(self, stream): + self.broker.defer(stream.on_disconnect, self.broker) + + def disconnect_all(self): + # making stream_by_id python3-safe by converting stream_by_id values iter to list + for stream in list(self._stream_by_id.values()): + self.disconnect_stream(stream) + + +class IdAllocator(object): + """ + Allocate IDs for new contexts constructed locally, and blocks of IDs for + children to allocate their own IDs using + :class:`mitogen.parent.ChildIdAllocator` without risk of conflict, and + without necessitating network round-trips for each new context. + + This class responds to :data:`mitogen.core.ALLOCATE_ID` messages received + from children by replying with fresh block ID allocations. + + The master's :class:`IdAllocator` instance can be accessed via + :attr:`mitogen.master.Router.id_allocator`. + """ + #: Block allocations are made in groups of 1000 by default. + BLOCK_SIZE = 1000 + + def __init__(self, router): + self.router = router + self.next_id = 1 + self.lock = threading.Lock() + router.add_handler( + fn=self.on_allocate_id, + handle=mitogen.core.ALLOCATE_ID, + ) + + def __repr__(self): + return 'IdAllocator(%r)' % (self.router,) + + def allocate(self): + """ + Allocate a context ID by directly incrementing an internal counter. + + :returns: + The new context ID. + """ + self.lock.acquire() + try: + id_ = self.next_id + self.next_id += 1 + return id_ + finally: + self.lock.release() + + def allocate_block(self): + """ + Allocate a block of IDs for use in a child context. + + This function is safe to call from any thread. + + :returns: + Tuple of the form `(id, end_id)` where `id` is the first usable ID + and `end_id` is the last usable ID. + """ + self.lock.acquire() + try: + id_ = self.next_id + self.next_id += self.BLOCK_SIZE + end_id = id_ + self.BLOCK_SIZE + LOG.debug('%r: allocating [%d..%d)', self, id_, end_id) + return id_, end_id + finally: + self.lock.release() + + def on_allocate_id(self, msg): + if msg.is_dead: + return + + id_, last_id = self.allocate_block() + requestee = self.router.context_by_id(msg.src_id) + LOG.debug('%r: allocating [%r..%r) to %r', + self, id_, last_id, requestee) + msg.reply((id_, last_id)) diff --git a/mitogen-0.3.9/mitogen/minify.py b/mitogen-0.3.9/mitogen/minify.py new file mode 100644 index 0000000..09fdc4e --- /dev/null +++ b/mitogen-0.3.9/mitogen/minify.py @@ -0,0 +1,143 @@ +# Copyright 2017, Alex Willmer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import sys + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +import mitogen.core + +if sys.version_info < (2, 7, 11): + from mitogen.compat import tokenize +else: + import tokenize + + +def minimize_source(source): + """ + Remove comments and docstrings from Python `source`, preserving line + numbers and syntax of empty blocks. + + :param str source: + The source to minimize. + + :returns str: + The minimized source. + """ + source = mitogen.core.to_text(source) + tokens = tokenize.generate_tokens(StringIO(source).readline) + tokens = strip_comments(tokens) + tokens = strip_docstrings(tokens) + tokens = reindent(tokens) + return tokenize.untokenize(tokens) + + +def strip_comments(tokens): + """ + Drop comment tokens from a `tokenize` stream. + + Comments on lines 1-2 are kept, to preserve hashbang and encoding. + Trailing whitespace is remove from all lines. + """ + prev_typ = None + prev_end_col = 0 + for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens: + if typ in (tokenize.NL, tokenize.NEWLINE): + if prev_typ in (tokenize.NL, tokenize.NEWLINE): + start_col = 0 + else: + start_col = prev_end_col + end_col = start_col + 1 + elif typ == tokenize.COMMENT and start_row > 2: + continue + prev_typ = typ + prev_end_col = end_col + yield typ, tok, (start_row, start_col), (end_row, end_col), line + + +def strip_docstrings(tokens): + """ + Replace docstring tokens with NL tokens in a `tokenize` stream. + + Any STRING token not part of an expression is deemed a docstring. + Indented docstrings are not yet recognised. + """ + stack = [] + state = 'wait_string' + for t in tokens: + typ = t[0] + if state == 'wait_string': + if typ in (tokenize.NL, tokenize.COMMENT): + yield t + elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING): + stack.append(t) + elif typ == tokenize.NEWLINE: + stack.append(t) + start_line, end_line = stack[0][2][0], stack[-1][3][0]+1 + for i in range(start_line, end_line): + yield tokenize.NL, '\n', (i, 0), (i,1), '\n' + for t in stack: + if t[0] in (tokenize.DEDENT, tokenize.INDENT): + yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4] + del stack[:] + else: + stack.append(t) + for t in stack: yield t + del stack[:] + state = 'wait_newline' + elif state == 'wait_newline': + if typ == tokenize.NEWLINE: + state = 'wait_string' + yield t + + +def reindent(tokens, indent=' '): + """ + Replace existing indentation in a token steam, with `indent`. + """ + old_levels = [] + old_level = 0 + new_level = 0 + for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens: + if typ == tokenize.INDENT: + old_levels.append(old_level) + old_level = len(tok) + new_level += 1 + tok = indent * new_level + elif typ == tokenize.DEDENT: + old_level = old_levels.pop() + new_level -= 1 + start_col = max(0, start_col - old_level + new_level) + if start_row == end_row: + end_col = start_col + len(tok) + yield typ, tok, (start_row, start_col), (end_row, end_col), line diff --git a/mitogen-0.3.9/mitogen/os_fork.py b/mitogen-0.3.9/mitogen/os_fork.py new file mode 100644 index 0000000..9c649d0 --- /dev/null +++ b/mitogen-0.3.9/mitogen/os_fork.py @@ -0,0 +1,186 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +Support for operating in a mixed threading/forking environment. +""" + +import os +import socket +import sys +import weakref + +import mitogen.core + + +# List of weakrefs. On Python 2.4, mitogen.core registers its Broker on this +# list and mitogen.service registers its Pool too. +_brokers = weakref.WeakKeyDictionary() +_pools = weakref.WeakKeyDictionary() + + +def _notice_broker_or_pool(obj): + """ + Used by :mod:`mitogen.core` and :mod:`mitogen.service` to automatically + register every broker and pool on Python 2.4/2.5. + """ + if isinstance(obj, mitogen.core.Broker): + _brokers[obj] = True + else: + _pools[obj] = True + + +def wrap_os__fork(): + corker = Corker( + brokers=list(_brokers), + pools=list(_pools), + ) + try: + corker.cork() + return os__fork() + finally: + corker.uncork() + + +# If Python 2.4/2.5 where threading state is not fixed up, subprocess.Popen() +# may still deadlock due to the broker thread. In this case, pause os.fork() so +# that all active threads are paused during fork. +if sys.version_info < (2, 6): + os__fork = os.fork + os.fork = wrap_os__fork + + +class Corker(object): + """ + Arrange for :class:`mitogen.core.Broker` and optionally + :class:`mitogen.service.Pool` to be temporarily "corked" while fork + operations may occur. + + In a mixed threading/forking environment, it is critical no threads are + active at the moment of fork, as they could hold mutexes whose state is + unrecoverably snapshotted in the locked state in the fork child, causing + deadlocks at random future moments. + + To ensure a target thread has all locks dropped, it is made to write a + large string to a socket with a small buffer that has :data:`os.O_NONBLOCK` + disabled. CPython will drop the GIL and enter the ``write()`` system call, + where it will block until the socket buffer is drained, or the write side + is closed. + + :class:`mitogen.core.Poller` is used to ensure the thread really has + blocked outside any Python locks, by checking if the socket buffer has + started to fill. + + Since this necessarily involves posting a message to every existent thread + and verifying acknowledgement, it will never be a fast operation. + + This does not yet handle the case of corking being initiated from within a + thread that is also a cork target. + + :param brokers: + Sequence of :class:`mitogen.core.Broker` instances to cork. + :param pools: + Sequence of :class:`mitogen.core.Pool` instances to cork. + """ + def __init__(self, brokers=(), pools=()): + self.brokers = brokers + self.pools = pools + + def _do_cork(self, s, wsock): + try: + try: + while True: + # at least EINTR is possible. Do our best to keep handling + # outside the GIL in this case using sendall(). + wsock.sendall(s) + except socket.error: + pass + finally: + wsock.close() + + def _cork_one(self, s, obj): + """ + Construct a socketpair, saving one side of it, and passing the other to + `obj` to be written to by one of its threads. + """ + rsock, wsock = mitogen.parent.create_socketpair(size=4096) + mitogen.core.set_cloexec(rsock.fileno()) + mitogen.core.set_cloexec(wsock.fileno()) + mitogen.core.set_block(wsock) # gevent + self._rsocks.append(rsock) + obj.defer(self._do_cork, s, wsock) + + def _verify_one(self, rsock): + """ + Pause until the socket `rsock` indicates readability, due to + :meth:`_do_cork` triggering a blocking write on another thread. + """ + poller = mitogen.core.Poller() + poller.start_receive(rsock.fileno()) + try: + while True: + for fd in poller.poll(): + return + finally: + poller.close() + + def cork(self): + """ + Arrange for any associated brokers and pools to be paused with no locks + held. This will not return until each thread acknowledges it has ceased + execution. + """ + current = mitogen.core.threading__current_thread() + s = mitogen.core.b('CORK') * ((128 // 4) * 1024) + self._rsocks = [] + + # Pools must be paused first, as existing work may require the + # participation of a broker in order to complete. + for pool in self.pools: + if not pool.closed: + for th in pool._threads: + if th != current: + self._cork_one(s, pool) + + for broker in self.brokers: + if broker._alive: + if broker._thread != current: + self._cork_one(s, broker) + + # Pause until we can detect every thread has entered write(). + for rsock in self._rsocks: + self._verify_one(rsock) + + def uncork(self): + """ + Arrange for paused threads to resume operation. + """ + for rsock in self._rsocks: + rsock.close() diff --git a/mitogen-0.3.9/mitogen/parent.py b/mitogen-0.3.9/mitogen/parent.py new file mode 100644 index 0000000..4b96dcf --- /dev/null +++ b/mitogen-0.3.9/mitogen/parent.py @@ -0,0 +1,2771 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +This module defines functionality common to master and parent processes. It is +sent to any child context that is due to become a parent, due to recursive +connection. +""" + +import binascii +import errno +import fcntl +import getpass +import heapq +import inspect +import logging +import os +import re +import signal +import socket +import struct +import subprocess +import sys +import termios +import textwrap +import threading +import zlib + +# Absolute imports for <2.5. +select = __import__('select') + +try: + import thread +except ImportError: + import threading as thread + +import mitogen.core +from mitogen.core import b +from mitogen.core import bytes_partition +from mitogen.core import IOLOG + + +LOG = logging.getLogger(__name__) + +# #410: we must avoid the use of socketpairs if SELinux is enabled. +try: + fp = open('/sys/fs/selinux/enforce', 'rb') + try: + SELINUX_ENABLED = bool(int(fp.read())) + finally: + fp.close() +except IOError: + SELINUX_ENABLED = False + + +try: + next +except NameError: + # Python 2.4/2.5 + from mitogen.core import next + + +itervalues = getattr(dict, 'itervalues', dict.values) + +if mitogen.core.PY3: + xrange = range + closure_attr = '__closure__' + IM_SELF_ATTR = '__self__' +else: + closure_attr = 'func_closure' + IM_SELF_ATTR = 'im_self' + + +try: + SC_OPEN_MAX = os.sysconf('SC_OPEN_MAX') +except ValueError: + SC_OPEN_MAX = 1024 + +BROKER_SHUTDOWN_MSG = ( + 'Connection cancelled because the associated Broker began to shut down.' +) + +OPENPTY_MSG = ( + "Failed to create a PTY: %s. It is likely the maximum number of PTYs has " + "been reached. Consider increasing the 'kern.tty.ptmx_max' sysctl on OS " + "X, the 'kernel.pty.max' sysctl on Linux, or modifying your configuration " + "to avoid PTY use." +) + +SYS_EXECUTABLE_MSG = ( + "The Python sys.executable variable is unset, indicating Python was " + "unable to determine its original program name. Unless explicitly " + "configured otherwise, child contexts will be started using " + "'/usr/bin/python'" +) +_sys_executable_warning_logged = False + + +def _ioctl_cast(n): + """ + Linux ioctl() request parameter is unsigned, whereas on BSD/Darwin it is + signed. Until 2.5 Python exclusively implemented the BSD behaviour, + preventing use of large unsigned int requests like the TTY layer uses + below. So on 2.4, we cast our unsigned to look like signed for Python. + """ + if sys.version_info < (2, 5): + n, = struct.unpack('i', struct.pack('I', n)) + return n + + +# If not :data:`None`, called prior to exec() of any new child process. Used by +# :func:`mitogen.utils.reset_affinity` to allow the child to be freely +# scheduled. +_preexec_hook = None + +# Get PTY number; asm-generic/ioctls.h +LINUX_TIOCGPTN = _ioctl_cast(2147767344) + +# Lock/unlock PTY; asm-generic/ioctls.h +LINUX_TIOCSPTLCK = _ioctl_cast(1074025521) + +IS_LINUX = os.uname()[0] == 'Linux' + +SIGNAL_BY_NUM = dict( + (getattr(signal, name), name) + for name in sorted(vars(signal), reverse=True) + if name.startswith('SIG') and not name.startswith('SIG_') +) + +_core_source_lock = threading.Lock() +_core_source_partial = None + + +def get_log_level(): + return (LOG.getEffectiveLevel() or logging.INFO) + + +def get_sys_executable(): + """ + Return :data:`sys.executable` if it is set, otherwise return + ``"/usr/bin/python"`` and log a warning. + """ + if sys.executable: + return sys.executable + + global _sys_executable_warning_logged + if not _sys_executable_warning_logged: + LOG.warn(SYS_EXECUTABLE_MSG) + _sys_executable_warning_logged = True + + return '/usr/bin/python' + + +def _get_core_source(): + """ + In non-masters, simply fetch the cached mitogen.core source code via the + import mechanism. In masters, this function is replaced with a version that + performs minification directly. + """ + return inspect.getsource(mitogen.core) + + +def get_core_source_partial(): + """ + _get_core_source() is expensive, even with @lru_cache in minify.py, threads + can enter it simultaneously causing severe slowdowns. + """ + global _core_source_partial + + if _core_source_partial is None: + _core_source_lock.acquire() + try: + if _core_source_partial is None: + _core_source_partial = PartialZlib( + _get_core_source().encode('utf-8') + ) + finally: + _core_source_lock.release() + + return _core_source_partial + + +def get_default_remote_name(): + """ + Return the default name appearing in argv[0] of remote machines. + """ + s = u'%s@%s:%d' + s %= (getpass.getuser(), socket.gethostname(), os.getpid()) + # In mixed UNIX/Windows environments, the username may contain slashes. + return s.translate({ + ord(u'\\'): ord(u'_'), + ord(u'/'): ord(u'_') + }) + + +def is_immediate_child(msg, stream): + """ + Handler policy that requires messages to arrive only from immediately + connected children. + """ + return msg.src_id == stream.protocol.remote_id + + +def flags(names): + """ + Return the result of ORing a set of (space separated) :py:mod:`termios` + module constants together. + """ + return sum(getattr(termios, name, 0) + for name in names.split()) + + +def cfmakeraw(tflags): + """ + Given a list returned by :py:func:`termios.tcgetattr`, return a list + modified in a manner similar to the `cfmakeraw()` C library function, but + additionally disabling local echo. + """ + # BSD: github.com/freebsd/freebsd/blob/master/lib/libc/gen/termios.c#L162 + # Linux: github.com/lattera/glibc/blob/master/termios/cfmakeraw.c#L20 + iflag, oflag, cflag, lflag, ispeed, ospeed, cc = tflags + iflag &= ~flags('IMAXBEL IXOFF INPCK BRKINT PARMRK ' + 'ISTRIP INLCR ICRNL IXON IGNPAR') + iflag &= ~flags('IGNBRK BRKINT PARMRK') + oflag &= ~flags('OPOST') + lflag &= ~flags('ECHO ECHOE ECHOK ECHONL ICANON ISIG ' + 'IEXTEN NOFLSH TOSTOP PENDIN') + cflag &= ~flags('CSIZE PARENB') + cflag |= flags('CS8 CREAD') + return [iflag, oflag, cflag, lflag, ispeed, ospeed, cc] + + +def disable_echo(fd): + old = termios.tcgetattr(fd) + new = cfmakeraw(old) + flags = getattr(termios, 'TCSASOFT', 0) + if not mitogen.core.IS_WSL: + # issue #319: Windows Subsystem for Linux as of July 2018 throws EINVAL + # if TCSAFLUSH is specified. + flags |= termios.TCSAFLUSH + termios.tcsetattr(fd, flags, new) + + +def create_socketpair(size=None): + """ + Create a :func:`socket.socketpair` for use as a child's UNIX stdio + channels. As socketpairs are bidirectional, they are economical on file + descriptor usage as one descriptor can be used for ``stdin`` and + ``stdout``. As they are sockets their buffers are tunable, allowing large + buffers to improve file transfer throughput and reduce IO loop iterations. + """ + if size is None: + size = mitogen.core.CHUNK_SIZE + + parentfp, childfp = socket.socketpair() + for fp in parentfp, childfp: + fp.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size) + + return parentfp, childfp + + +def create_best_pipe(escalates_privilege=False): + """ + By default we prefer to communicate with children over a UNIX socket, as a + single file descriptor can represent bidirectional communication, and a + cross-platform API exists to align buffer sizes with the needs of the + library. + + SELinux prevents us setting up a privileged process to inherit an AF_UNIX + socket, a facility explicitly designed as a better replacement for pipes, + because at some point in the mid 90s it might have been commonly possible + for AF_INET sockets to end up undesirably connected to a privileged + process, so let's make up arbitrary rules breaking all sockets instead. + + If SELinux is detected, fall back to using pipes. + + :param bool escalates_privilege: + If :data:`True`, the target program may escalate privileges, causing + SELinux to disconnect AF_UNIX sockets, so avoid those. + :returns: + `(parent_rfp, child_wfp, child_rfp, parent_wfp)` + """ + if (not escalates_privilege) or (not SELINUX_ENABLED): + parentfp, childfp = create_socketpair() + return parentfp, childfp, childfp, parentfp + + parent_rfp, child_wfp = mitogen.core.pipe() + try: + child_rfp, parent_wfp = mitogen.core.pipe() + return parent_rfp, child_wfp, child_rfp, parent_wfp + except: + parent_rfp.close() + child_wfp.close() + raise + + +def popen(**kwargs): + """ + Wrap :class:`subprocess.Popen` to ensure any global :data:`_preexec_hook` + is invoked in the child. + """ + real_preexec_fn = kwargs.pop('preexec_fn', None) + def preexec_fn(): + if _preexec_hook: + _preexec_hook() + if real_preexec_fn: + real_preexec_fn() + return subprocess.Popen(preexec_fn=preexec_fn, **kwargs) + + +def create_child(args, merge_stdio=False, stderr_pipe=False, + escalates_privilege=False, preexec_fn=None): + """ + Create a child process whose stdin/stdout is connected to a socket. + + :param list args: + Program argument vector. + :param bool merge_stdio: + If :data:`True`, arrange for `stderr` to be connected to the `stdout` + socketpair, rather than inherited from the parent process. This may be + necessary to ensure that no TTY is connected to any stdio handle, for + instance when using LXC. + :param bool stderr_pipe: + If :data:`True` and `merge_stdio` is :data:`False`, arrange for + `stderr` to be connected to a separate pipe, to allow any ongoing debug + logs generated by e.g. SSH to be output as the session progresses, + without interfering with `stdout`. + :param bool escalates_privilege: + If :data:`True`, the target program may escalate privileges, causing + SELinux to disconnect AF_UNIX sockets, so avoid those. + :param function preexec_fn: + If not :data:`None`, a function to run within the post-fork child + before executing the target program. + :returns: + :class:`Process` instance. + """ + parent_rfp, child_wfp, child_rfp, parent_wfp = create_best_pipe( + escalates_privilege=escalates_privilege + ) + + stderr = None + stderr_r = None + if merge_stdio: + stderr = child_wfp + elif stderr_pipe: + stderr_r, stderr = mitogen.core.pipe() + mitogen.core.set_cloexec(stderr_r.fileno()) + + try: + proc = popen( + args=args, + stdin=child_rfp, + stdout=child_wfp, + stderr=stderr, + close_fds=True, + preexec_fn=preexec_fn, + ) + except: + child_rfp.close() + child_wfp.close() + parent_rfp.close() + parent_wfp.close() + if stderr_pipe: + stderr.close() + stderr_r.close() + raise + + child_rfp.close() + child_wfp.close() + if stderr_pipe: + stderr.close() + + return PopenProcess( + proc=proc, + stdin=parent_wfp, + stdout=parent_rfp, + stderr=stderr_r, + ) + + +def _acquire_controlling_tty(): + os.setsid() + if sys.platform in ('linux', 'linux2'): + # On Linux, the controlling tty becomes the first tty opened by a + # process lacking any prior tty. + os.close(os.open(os.ttyname(2), os.O_RDWR)) + if hasattr(termios, 'TIOCSCTTY') and not mitogen.core.IS_WSL: + # #550: prehistoric WSL does not like TIOCSCTTY. + # On BSD an explicit ioctl is required. For some inexplicable reason, + # Python 2.6 on Travis also requires it. + fcntl.ioctl(2, termios.TIOCSCTTY) + + +def _linux_broken_devpts_openpty(): + """ + #462: On broken Linux hosts with mismatched configuration (e.g. old + /etc/fstab template installed), /dev/pts may be mounted without the gid= + mount option, causing new slave devices to be created with the group ID of + the calling process. This upsets glibc, whose openpty() is required by + specification to produce a slave owned by a special group ID (which is + always the 'tty' group). + + Glibc attempts to use "pt_chown" to fix ownership. If that fails, it + chown()s the PTY directly, which fails due to non-root, causing openpty() + to fail with EPERM ("Operation not permitted"). Since we don't need the + magical TTY group to run sudo and su, open the PTY ourselves in this case. + """ + master_fd = None + try: + # Opening /dev/ptmx causes a PTY pair to be allocated, and the + # corresponding slave /dev/pts/* device to be created, owned by UID/GID + # matching this process. + master_fd = os.open('/dev/ptmx', os.O_RDWR) + # Clear the lock bit from the PTY. This a prehistoric feature from a + # time when slave device files were persistent. + fcntl.ioctl(master_fd, LINUX_TIOCSPTLCK, struct.pack('i', 0)) + # Since v4.13 TIOCGPTPEER exists to open the slave in one step, but we + # must support older kernels. Ask for the PTY number. + pty_num_s = fcntl.ioctl(master_fd, LINUX_TIOCGPTN, + struct.pack('i', 0)) + pty_num, = struct.unpack('i', pty_num_s) + pty_name = '/dev/pts/%d' % (pty_num,) + # Now open it with O_NOCTTY to ensure it doesn't change our controlling + # TTY. Otherwise when we close the FD we get killed by the kernel, and + # the child we spawn that should really attach to it will get EPERM + # during _acquire_controlling_tty(). + slave_fd = os.open(pty_name, os.O_RDWR|os.O_NOCTTY) + return master_fd, slave_fd + except OSError: + if master_fd is not None: + os.close(master_fd) + e = sys.exc_info()[1] + raise mitogen.core.StreamError(OPENPTY_MSG, e) + + +def openpty(): + """ + Call :func:`os.openpty`, raising a descriptive error if the call fails. + + :raises mitogen.core.StreamError: + Creating a PTY failed. + :returns: + `(master_fp, slave_fp)` file-like objects. + """ + try: + master_fd, slave_fd = os.openpty() + except OSError: + e = sys.exc_info()[1] + if not (IS_LINUX and e.args[0] == errno.EPERM): + raise mitogen.core.StreamError(OPENPTY_MSG, e) + master_fd, slave_fd = _linux_broken_devpts_openpty() + + master_fp = os.fdopen(master_fd, 'r+b', 0) + slave_fp = os.fdopen(slave_fd, 'r+b', 0) + disable_echo(master_fd) + disable_echo(slave_fd) + mitogen.core.set_block(slave_fd) + return master_fp, slave_fp + + +def tty_create_child(args): + """ + Return a file descriptor connected to the master end of a pseudo-terminal, + whose slave end is connected to stdin/stdout/stderr of a new child process. + The child is created such that the pseudo-terminal becomes its controlling + TTY, ensuring access to /dev/tty returns a new file descriptor open on the + slave end. + + :param list args: + Program argument vector. + :returns: + :class:`Process` instance. + """ + master_fp, slave_fp = openpty() + try: + proc = popen( + args=args, + stdin=slave_fp, + stdout=slave_fp, + stderr=slave_fp, + preexec_fn=_acquire_controlling_tty, + close_fds=True, + ) + except: + master_fp.close() + slave_fp.close() + raise + + slave_fp.close() + return PopenProcess( + proc=proc, + stdin=master_fp, + stdout=master_fp, + ) + + +def hybrid_tty_create_child(args, escalates_privilege=False): + """ + Like :func:`tty_create_child`, except attach stdin/stdout to a socketpair + like :func:`create_child`, but leave stderr and the controlling TTY + attached to a TTY. + + This permits high throughput communication with programs that are reached + via some program that requires a TTY for password input, like many + configurations of sudo. The UNIX TTY layer tends to have tiny (no more than + 14KiB) buffers, forcing many IO loop iterations when transferring bulk + data, causing significant performance loss. + + :param bool escalates_privilege: + If :data:`True`, the target program may escalate privileges, causing + SELinux to disconnect AF_UNIX sockets, so avoid those. + :param list args: + Program argument vector. + :returns: + :class:`Process` instance. + """ + master_fp, slave_fp = openpty() + try: + parent_rfp, child_wfp, child_rfp, parent_wfp = create_best_pipe( + escalates_privilege=escalates_privilege, + ) + try: + mitogen.core.set_block(child_rfp) + mitogen.core.set_block(child_wfp) + proc = popen( + args=args, + stdin=child_rfp, + stdout=child_wfp, + stderr=slave_fp, + preexec_fn=_acquire_controlling_tty, + close_fds=True, + ) + except: + parent_rfp.close() + child_wfp.close() + parent_wfp.close() + child_rfp.close() + raise + except: + master_fp.close() + slave_fp.close() + raise + + slave_fp.close() + child_rfp.close() + child_wfp.close() + return PopenProcess( + proc=proc, + stdin=parent_wfp, + stdout=parent_rfp, + stderr=master_fp, + ) + + +class Timer(object): + """ + Represents a future event. + """ + #: Set to :data:`False` if :meth:`cancel` has been called, or immediately + #: prior to being executed by :meth:`TimerList.expire`. + active = True + + def __init__(self, when, func): + self.when = when + self.func = func + + def __repr__(self): + return 'Timer(%r, %r)' % (self.when, self.func) + + def __eq__(self, other): + return self.when == other.when + + def __lt__(self, other): + return self.when < other.when + + def __le__(self, other): + return self.when <= other.when + + def cancel(self): + """ + Cancel this event. If it has not yet executed, it will not execute + during any subsequent :meth:`TimerList.expire` call. + """ + self.active = False + + +class TimerList(object): + """ + Efficiently manage a list of cancellable future events relative to wall + clock time. An instance of this class is installed as + :attr:`mitogen.master.Broker.timers` by default, and as + :attr:`mitogen.core.Broker.timers` in children after a call to + :func:`mitogen.parent.upgrade_router`. + + You can use :class:`TimerList` to cause the broker to wake at arbitrary + future moments, useful for implementing timeouts and polling in an + asynchronous context. + + :class:`TimerList` methods can only be called from asynchronous context, + for example via :meth:`mitogen.core.Broker.defer`. + + The broker automatically adjusts its sleep delay according to the installed + timer list, and arranges for timers to expire via automatic calls to + :meth:`expire`. The main user interface to :class:`TimerList` is + :meth:`schedule`. + """ + _now = mitogen.core.now + + def __init__(self): + self._lst = [] + + def get_timeout(self): + """ + Return the floating point seconds until the next event is due. + + :returns: + Floating point delay, or 0.0, or :data:`None` if no events are + scheduled. + """ + while self._lst and not self._lst[0].active: + heapq.heappop(self._lst) + if self._lst: + return max(0, self._lst[0].when - self._now()) + + def schedule(self, when, func): + """ + Schedule a future event. + + :param float when: + UNIX time in seconds when event should occur. + :param callable func: + Callable to invoke on expiry. + :returns: + A :class:`Timer` instance, exposing :meth:`Timer.cancel`, which may + be used to cancel the future invocation. + """ + timer = Timer(when, func) + heapq.heappush(self._lst, timer) + return timer + + def expire(self): + """ + Invoke callbacks for any events in the past. + """ + now = self._now() + while self._lst and self._lst[0].when <= now: + timer = heapq.heappop(self._lst) + if timer.active: + timer.active = False + timer.func() + + +class PartialZlib(object): + """ + Because the mitogen.core source has a line appended to it during bootstrap, + it must be recompressed for each connection. This is not a problem for a + small number of connections, but it amounts to 30 seconds CPU time by the + time 500 targets are in use. + + For that reason, build a compressor containing mitogen.core and flush as + much of it as possible into an initial buffer. Then to append the custom + line, clone the compressor and compress just that line. + + A full compression costs ~6ms on a modern machine, this method costs ~35 + usec. + """ + def __init__(self, s): + self.s = s + if sys.version_info > (2, 5): + self._compressor = zlib.compressobj(9) + self._out = self._compressor.compress(s) + self._out += self._compressor.flush(zlib.Z_SYNC_FLUSH) + else: + self._compressor = None + + def append(self, s): + """ + Append the bytestring `s` to the compressor state and return the + final compressed output. + """ + if self._compressor is None: + return zlib.compress(self.s + s, 9) + else: + compressor = self._compressor.copy() + out = self._out + out += compressor.compress(s) + return out + compressor.flush() + + +def _upgrade_broker(broker): + """ + Extract the poller state from Broker and replace it with the industrial + strength poller for this OS. Must run on the Broker thread. + """ + # This function is deadly! The act of calling start_receive() generates log + # messages which must be silenced as the upgrade progresses, otherwise the + # poller state will change as it is copied, resulting in write fds that are + # lost. (Due to LogHandler->Router->Stream->Protocol->Broker->Poller, where + # Stream only calls start_transmit() when transitioning from empty to + # non-empty buffer. If the start_transmit() is lost, writes from the child + # hang permanently). + root = logging.getLogger() + old_level = root.level + root.setLevel(logging.CRITICAL) + try: + old = broker.poller + new = PREFERRED_POLLER() + for fd, data in old.readers: + new.start_receive(fd, data) + for fd, data in old.writers: + new.start_transmit(fd, data) + + old.close() + broker.poller = new + finally: + root.setLevel(old_level) + + broker.timers = TimerList() + LOG.debug('upgraded %r with %r (new: %d readers, %d writers; ' + 'old: %d readers, %d writers)', old, new, + len(new._rfds), len(new._wfds), len(old._rfds), len(old._wfds)) + + +@mitogen.core.takes_econtext +def upgrade_router(econtext): + if not isinstance(econtext.router, Router): # TODO + econtext.broker.defer(_upgrade_broker, econtext.broker) + econtext.router.__class__ = Router # TODO + econtext.router.upgrade( + importer=econtext.importer, + parent=econtext.parent, + ) + + +def get_connection_class(name): + """ + Given the name of a Mitogen connection method, import its implementation + module and return its Stream subclass. + """ + if name == u'local': + name = u'parent' + module = mitogen.core.import_module(u'mitogen.' + name) + return module.Connection + + +@mitogen.core.takes_econtext +def _proxy_connect(name, method_name, kwargs, econtext): + """ + Implements the target portion of Router._proxy_connect() by upgrading the + local process to a parent if it was not already, then calling back into + Router._connect() using the arguments passed to the parent's + Router.connect(). + + :returns: + Dict containing: + * ``id``: :data:`None`, or integer new context ID. + * ``name``: :data:`None`, or string name attribute of new Context. + * ``msg``: :data:`None`, or StreamError exception text. + """ + upgrade_router(econtext) + + try: + context = econtext.router._connect( + klass=get_connection_class(method_name), + name=name, + **kwargs + ) + except mitogen.core.StreamError: + return { + u'id': None, + u'name': None, + u'msg': 'error occurred on host %s: %s' % ( + socket.gethostname(), + sys.exc_info()[1], + ), + } + + return { + u'id': context.context_id, + u'name': context.name, + u'msg': None, + } + + +def returncode_to_str(n): + """ + Parse and format a :func:`os.waitpid` exit status. + """ + if n < 0: + return 'exited due to signal %d (%s)' % (-n, SIGNAL_BY_NUM.get(-n)) + return 'exited with return code %d' % (n,) + + +class EofError(mitogen.core.StreamError): + """ + Raised by :class:`Connection` when an empty read is detected from the + remote process before bootstrap completes. + """ + # inherits from StreamError to maintain compatibility. + pass + + +class CancelledError(mitogen.core.StreamError): + """ + Raised by :class:`Connection` when :meth:`mitogen.core.Broker.shutdown` is + called before bootstrap completes. + """ + pass + + +class Argv(object): + """ + Wrapper to defer argv formatting when debug logging is disabled. + """ + def __init__(self, argv): + self.argv = argv + + must_escape = frozenset('\\$"`!') + must_escape_or_space = must_escape | frozenset(' ') + + def escape(self, x): + if not self.must_escape_or_space.intersection(x): + return x + + s = '"' + for c in x: + if c in self.must_escape: + s += '\\' + s += c + s += '"' + return s + + def __str__(self): + return ' '.join(map(self.escape, self.argv)) + + +class CallSpec(object): + """ + Wrapper to defer call argument formatting when debug logging is disabled. + """ + def __init__(self, func, args, kwargs): + self.func = func + self.args = args + self.kwargs = kwargs + + def _get_name(self): + bits = [self.func.__module__] + if inspect.ismethod(self.func): + im_self = getattr(self.func, IM_SELF_ATTR) + bits.append(getattr(im_self, '__name__', None) or + getattr(type(im_self), '__name__', None)) + bits.append(self.func.__name__) + return u'.'.join(bits) + + def _get_args(self): + return u', '.join(repr(a) for a in self.args) + + def _get_kwargs(self): + s = u'' + if self.kwargs: + s = u', '.join('%s=%r' % (k, v) for k, v in self.kwargs.items()) + if self.args: + s = u', ' + s + return s + + def __repr__(self): + return '%s(%s%s)' % ( + self._get_name(), + self._get_args(), + self._get_kwargs(), + ) + + +class PollPoller(mitogen.core.Poller): + """ + Poller based on the POSIX :linux:man2:`poll` interface. Not available on + some Python/OS X combinations. Otherwise the preferred poller for small + FD counts; or if many pollers are created, used once, then closed. + There there is no setup/teardown/configuration system call overhead. + """ + SUPPORTED = hasattr(select, 'poll') + _readmask = SUPPORTED and select.POLLIN | select.POLLHUP + + def __init__(self): + super(PollPoller, self).__init__() + self._pollobj = select.poll() + + # TODO: no proof we dont need writemask too + def _update(self, fd): + mask = (((fd in self._rfds) and self._readmask) | + ((fd in self._wfds) and select.POLLOUT)) + if mask: + self._pollobj.register(fd, mask) + else: + try: + self._pollobj.unregister(fd) + except KeyError: + pass + + def _poll(self, timeout): + if timeout: + timeout *= 1000 + + events, _ = mitogen.core.io_op(self._pollobj.poll, timeout) + for fd, event in events: + if event & self._readmask: + IOLOG.debug('%r: POLLIN|POLLHUP for %r', self, fd) + data, gen = self._rfds.get(fd, (None, None)) + if gen and gen < self._generation: + yield data + if event & select.POLLOUT: + IOLOG.debug('%r: POLLOUT for %r', self, fd) + data, gen = self._wfds.get(fd, (None, None)) + if gen and gen < self._generation: + yield data + + +class KqueuePoller(mitogen.core.Poller): + """ + Poller based on the FreeBSD/Darwin :freebsd:man2:`kqueue` interface. + """ + SUPPORTED = hasattr(select, 'kqueue') + + def __init__(self): + super(KqueuePoller, self).__init__() + self._kqueue = select.kqueue() + self._changelist = [] + + def close(self): + super(KqueuePoller, self).close() + self._kqueue.close() + + def _control(self, fd, filters, flags): + mitogen.core._vv and IOLOG.debug( + '%r._control(%r, %r, %r)', self, fd, filters, flags) + # TODO: at shutdown it is currently possible for KQ_EV_ADD/KQ_EV_DEL + # pairs to be pending after the associated file descriptor has already + # been closed. Fixing this requires maintaining extra state, or perhaps + # making fd closure the poller's responsibility. In the meantime, + # simply apply changes immediately. + # self._changelist.append(select.kevent(fd, filters, flags)) + changelist = [select.kevent(fd, filters, flags)] + events, _ = mitogen.core.io_op(self._kqueue.control, changelist, 0, 0) + assert not events + + def start_receive(self, fd, data=None): + mitogen.core._vv and IOLOG.debug('%r.start_receive(%r, %r)', + self, fd, data) + if fd not in self._rfds: + self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_ADD) + self._rfds[fd] = (data or fd, self._generation) + + def stop_receive(self, fd): + mitogen.core._vv and IOLOG.debug('%r.stop_receive(%r)', self, fd) + if fd in self._rfds: + self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE) + del self._rfds[fd] + + def start_transmit(self, fd, data=None): + mitogen.core._vv and IOLOG.debug('%r.start_transmit(%r, %r)', + self, fd, data) + if fd not in self._wfds: + self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD) + self._wfds[fd] = (data or fd, self._generation) + + def stop_transmit(self, fd): + mitogen.core._vv and IOLOG.debug('%r.stop_transmit(%r)', self, fd) + if fd in self._wfds: + self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE) + del self._wfds[fd] + + def _poll(self, timeout): + changelist = self._changelist + self._changelist = [] + events, _ = mitogen.core.io_op(self._kqueue.control, + changelist, 32, timeout) + for event in events: + fd = event.ident + if event.flags & select.KQ_EV_ERROR: + LOG.debug('ignoring stale event for fd %r: errno=%d: %s', + fd, event.data, errno.errorcode.get(event.data)) + elif event.filter == select.KQ_FILTER_READ: + data, gen = self._rfds.get(fd, (None, None)) + # Events can still be read for an already-discarded fd. + if gen and gen < self._generation: + mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd) + yield data + elif event.filter == select.KQ_FILTER_WRITE and fd in self._wfds: + data, gen = self._wfds.get(fd, (None, None)) + if gen and gen < self._generation: + mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd) + yield data + + +class EpollPoller(mitogen.core.Poller): + """ + Poller based on the Linux :linux:man7:`epoll` interface. + """ + SUPPORTED = hasattr(select, 'epoll') + _inmask = SUPPORTED and select.EPOLLIN | select.EPOLLHUP + + def __init__(self): + super(EpollPoller, self).__init__() + self._epoll = select.epoll(32) + self._registered_fds = set() + + def close(self): + super(EpollPoller, self).close() + self._epoll.close() + + def _control(self, fd): + mitogen.core._vv and IOLOG.debug('%r._control(%r)', self, fd) + mask = (((fd in self._rfds) and select.EPOLLIN) | + ((fd in self._wfds) and select.EPOLLOUT)) + if mask: + if fd in self._registered_fds: + self._epoll.modify(fd, mask) + else: + self._epoll.register(fd, mask) + self._registered_fds.add(fd) + elif fd in self._registered_fds: + self._epoll.unregister(fd) + self._registered_fds.remove(fd) + + def start_receive(self, fd, data=None): + mitogen.core._vv and IOLOG.debug('%r.start_receive(%r, %r)', + self, fd, data) + self._rfds[fd] = (data or fd, self._generation) + self._control(fd) + + def stop_receive(self, fd): + mitogen.core._vv and IOLOG.debug('%r.stop_receive(%r)', self, fd) + self._rfds.pop(fd, None) + self._control(fd) + + def start_transmit(self, fd, data=None): + mitogen.core._vv and IOLOG.debug('%r.start_transmit(%r, %r)', + self, fd, data) + self._wfds[fd] = (data or fd, self._generation) + self._control(fd) + + def stop_transmit(self, fd): + mitogen.core._vv and IOLOG.debug('%r.stop_transmit(%r)', self, fd) + self._wfds.pop(fd, None) + self._control(fd) + + def _poll(self, timeout): + the_timeout = -1 + if timeout is not None: + the_timeout = timeout + + events, _ = mitogen.core.io_op(self._epoll.poll, the_timeout, 32) + for fd, event in events: + if event & self._inmask: + data, gen = self._rfds.get(fd, (None, None)) + if gen and gen < self._generation: + # Events can still be read for an already-discarded fd. + mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd) + yield data + if event & select.EPOLLOUT: + data, gen = self._wfds.get(fd, (None, None)) + if gen and gen < self._generation: + mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd) + yield data + + +POLLERS = (EpollPoller, KqueuePoller, PollPoller, mitogen.core.Poller) +PREFERRED_POLLER = next(cls for cls in POLLERS if cls.SUPPORTED) + + +# For processes that start many threads or connections, it's possible Latch +# will also get high-numbered FDs, and so select() becomes useless there too. +POLLER_LIGHTWEIGHT = PollPoller.SUPPORTED and PollPoller or PREFERRED_POLLER +mitogen.core.Latch.poller_class = POLLER_LIGHTWEIGHT + + +class LineLoggingProtocolMixin(object): + def __init__(self, **kwargs): + super(LineLoggingProtocolMixin, self).__init__(**kwargs) + self.logged_lines = [] + self.logged_partial = None + + def on_line_received(self, line): + self.logged_partial = None + self.logged_lines.append((mitogen.core.now(), line)) + self.logged_lines[:] = self.logged_lines[-100:] + return super(LineLoggingProtocolMixin, self).on_line_received(line) + + def on_partial_line_received(self, line): + self.logged_partial = line + return super(LineLoggingProtocolMixin, self).on_partial_line_received(line) + + def on_disconnect(self, broker): + if self.logged_partial: + self.logged_lines.append((mitogen.core.now(), self.logged_partial)) + self.logged_partial = None + super(LineLoggingProtocolMixin, self).on_disconnect(broker) + + +def get_history(streams): + history = [] + for stream in streams: + if stream: + history.extend(getattr(stream.protocol, 'logged_lines', [])) + history.sort() + + s = b('\n').join(h[1] for h in history) + return mitogen.core.to_text(s) + + +class RegexProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol): + """ + Implement a delimited protocol where messages matching a set of regular + expressions are dispatched to individual handler methods. Input is + dispatches using :attr:`PATTERNS` and :attr:`PARTIAL_PATTERNS`, before + falling back to :meth:`on_unrecognized_line_received` and + :meth:`on_unrecognized_partial_line_received`. + """ + #: A sequence of 2-tuples of the form `(compiled pattern, method)` for + #: patterns that should be matched against complete (delimited) messages, + #: i.e. full lines. + PATTERNS = [] + + #: Like :attr:`PATTERNS`, but patterns that are matched against incomplete + #: lines. + PARTIAL_PATTERNS = [] + + def on_line_received(self, line): + super(RegexProtocol, self).on_line_received(line) + for pattern, func in self.PATTERNS: + match = pattern.search(line) + if match is not None: + return func(self, line, match) + + return self.on_unrecognized_line_received(line) + + def on_unrecognized_line_received(self, line): + LOG.debug('%s: (unrecognized): %s', + self.stream.name, line.decode('utf-8', 'replace')) + + def on_partial_line_received(self, line): + super(RegexProtocol, self).on_partial_line_received(line) + LOG.debug('%s: (partial): %s', + self.stream.name, line.decode('utf-8', 'replace')) + for pattern, func in self.PARTIAL_PATTERNS: + match = pattern.search(line) + if match is not None: + return func(self, line, match) + + return self.on_unrecognized_partial_line_received(line) + + def on_unrecognized_partial_line_received(self, line): + LOG.debug('%s: (unrecognized partial): %s', + self.stream.name, line.decode('utf-8', 'replace')) + + +class BootstrapProtocol(RegexProtocol): + """ + Respond to stdout of a child during bootstrap. Wait for :attr:`EC0_MARKER` + to be written by the first stage to indicate it can receive the bootstrap, + then await :attr:`EC1_MARKER` to indicate success, and + :class:`MitogenProtocol` can be enabled. + """ + #: Sentinel value emitted by the first stage to indicate it is ready to + #: receive the compressed bootstrap. For :mod:`mitogen.ssh` this must have + #: length of at least `max(len('password'), len('debug1:'))` + EC0_MARKER = b('MITO000') + EC1_MARKER = b('MITO001') + EC2_MARKER = b('MITO002') + + def __init__(self, broker): + super(BootstrapProtocol, self).__init__() + self._writer = mitogen.core.BufferedWriter(broker, self) + + def on_transmit(self, broker): + self._writer.on_transmit(broker) + + def _on_ec0_received(self, line, match): + LOG.debug('%r: first stage started succcessfully', self) + self._writer.write(self.stream.conn.get_preamble()) + + def _on_ec1_received(self, line, match): + LOG.debug('%r: first stage received mitogen.core source', self) + + def _on_ec2_received(self, line, match): + LOG.debug('%r: new child booted successfully', self) + self.stream.conn._complete_connection() + return False + + def on_unrecognized_line_received(self, line): + LOG.debug('%s: stdout: %s', self.stream.name, + line.decode('utf-8', 'replace')) + + PATTERNS = [ + (re.compile(EC0_MARKER), _on_ec0_received), + (re.compile(EC1_MARKER), _on_ec1_received), + (re.compile(EC2_MARKER), _on_ec2_received), + ] + + +class LogProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol): + """ + For "hybrid TTY/socketpair" mode, after connection setup a spare TTY master + FD exists that cannot be closed, and to which SSH or sudo may continue + writing log messages. + + The descriptor cannot be closed since the UNIX TTY layer sends SIGHUP to + processes whose controlling TTY is the slave whose master side was closed. + LogProtocol takes over this FD and creates log messages for anything + written to it. + """ + def on_line_received(self, line): + """ + Read a line, decode it as UTF-8, and log it. + """ + super(LogProtocol, self).on_line_received(line) + LOG.info(u'%s: %s', self.stream.name, line.decode('utf-8', 'replace')) + + +class MitogenProtocol(mitogen.core.MitogenProtocol): + """ + Extend core.MitogenProtocol to cause SHUTDOWN to be sent to the child + during graceful shutdown. + """ + def on_shutdown(self, broker): + """ + Respond to the broker's request for the stream to shut down by sending + SHUTDOWN to the child. + """ + LOG.debug('%r: requesting child shutdown', self) + self._send( + mitogen.core.Message( + src_id=mitogen.context_id, + dst_id=self.remote_id, + handle=mitogen.core.SHUTDOWN, + ) + ) + + +class Options(object): + name = None + + #: The path to the remote Python interpreter. + python_path = get_sys_executable() + + #: Maximum time to wait for a connection attempt. + connect_timeout = 30.0 + + #: True to cause context to write verbose /tmp/mitogen..log. + debug = False + + #: True to cause context to write /tmp/mitogen.stats...log. + profiling = False + + #: True if unidirectional routing is enabled in the new child. + unidirectional = False + + #: Passed via Router wrapper methods, must eventually be passed to + #: ExternalContext.main(). + max_message_size = None + + #: Remote name. + remote_name = None + + #: Derived from :py:attr:`connect_timeout`; absolute floating point + #: UNIX timestamp after which the connection attempt should be abandoned. + connect_deadline = None + + def __init__(self, max_message_size, name=None, remote_name=None, + python_path=None, debug=False, connect_timeout=None, + profiling=False, unidirectional=False, old_router=None): + self.name = name + self.max_message_size = max_message_size + if python_path: + self.python_path = python_path + if connect_timeout: + self.connect_timeout = connect_timeout + if remote_name is None: + remote_name = get_default_remote_name() + if '/' in remote_name or '\\' in remote_name: + raise ValueError('remote_name= cannot contain slashes') + if remote_name: + self.remote_name = mitogen.core.to_text(remote_name) + self.debug = debug + self.profiling = profiling + self.unidirectional = unidirectional + self.max_message_size = max_message_size + self.connect_deadline = mitogen.core.now() + self.connect_timeout + + +class Connection(object): + """ + Manage the lifetime of a set of :class:`Streams ` connecting to a + remote Python interpreter, including bootstrap, disconnection, and external + tool integration. + + Base for streams capable of starting children. + """ + options_class = Options + + #: The protocol attached to stdio of the child. + stream_protocol_class = BootstrapProtocol + + #: The protocol attached to stderr of the child. + diag_protocol_class = LogProtocol + + #: :class:`Process` + proc = None + + #: :class:`mitogen.core.Stream` with sides connected to stdin/stdout. + stdio_stream = None + + #: If `proc.stderr` is set, referencing either a plain pipe or the + #: controlling TTY, this references the corresponding + #: :class:`LogProtocol`'s stream, allowing it to be disconnected when this + #: stream is disconnected. + stderr_stream = None + + #: Function with the semantics of :func:`create_child` used to create the + #: child process. + create_child = staticmethod(create_child) + + #: Dictionary of extra kwargs passed to :attr:`create_child`. + create_child_args = {} + + #: :data:`True` if the remote has indicated that it intends to detach, and + #: should not be killed on disconnect. + detached = False + + #: If :data:`True`, indicates the child should not be killed during + #: graceful detachment, as it the actual process implementing the child + #: context. In all other cases, the subprocess is SSH, sudo, or a similar + #: tool that should be reminded to quit during disconnection. + child_is_immediate_subprocess = True + + #: Prefix given to default names generated by :meth:`connect`. + name_prefix = u'local' + + #: :class:`Timer` that runs :meth:`_on_timer_expired` when connection + #: timeout occurs. + _timer = None + + #: When disconnection completes, instance of :class:`Reaper` used to wait + #: on the exit status of the subprocess. + _reaper = None + + #: On failure, the exception object that should be propagated back to the + #: user. + exception = None + + #: Extra text appended to :class:`EofError` if that exception is raised on + #: a failed connection attempt. May be used in subclasses to hint at common + #: problems with a particular connection method. + eof_error_hint = None + + def __init__(self, options, router): + #: :class:`Options` + self.options = options + self._router = router + + def __repr__(self): + return 'Connection(%r)' % (self.stdio_stream,) + + # Minimised, gzipped, base64'd and passed to 'python -c'. It forks, dups + # file descriptor 0 as 100, creates a pipe, then execs a new interpreter + # with a custom argv. + # * Optimized for minimum byte count after minification & compression. + # The script preamble_size.py measures this. + # * 'CONTEXT_NAME' and 'PREAMBLE_COMPRESSED_LEN' are substituted with + # their respective values. + # * CONTEXT_NAME must be prefixed with the name of the Python binary in + # order to allow virtualenvs to detect their install prefix. + # + # macOS tweaks for Python 2.7 must be kept in sync with the the Ansible + # module test_echo_module, used by the integration tests. + # * macOS <= 10.14 (Darwin <= 18) install an unreliable Python version + # switcher as /usr/bin/python, which introspects argv0. To workaround + # it we redirect attempts to call /usr/bin/python with an explicit + # call to /usr/bin/python2.7. macOS 10.15 (Darwin 19) removed it. + # * macOS 11.x (Darwin 20, Big Sur) and macOS 12.x (Darwin 21, Montery) + # do something slightly different. The Python executable is patched to + # perform an extra execvp(). I don't fully understand the details, but + # setting PYTHON_LAUNCHED_FROM_WRAPPER=1 avoids it. + # * macOS 12.3+ (Darwin 21.4+, Monterey) doesn't ship Python. + # https://developer.apple.com/documentation/macos-release-notes/macos-12_3-release-notes#Python + # + # Locals: + # R: read side of interpreter stdin. + # W: write side of interpreter stdin. + # r: read side of core_src FD. + # w: write side of core_src FD. + # C: the decompressed core source. + + # Final os.close(2) to avoid --py-debug build from corrupting stream with + # "[1234 refs]" during exit. + @staticmethod + def _first_stage(): + R,W=os.pipe() + r,w=os.pipe() + if os.fork(): + os.dup2(0,100) + os.dup2(R,0) + os.dup2(r,101) + os.close(R) + os.close(r) + os.close(W) + os.close(w) + if os.uname()[0]=='Darwin'and os.uname()[2][:2]<'19'and sys.executable=='/usr/bin/python':sys.executable='/usr/bin/python2.7' + if os.uname()[0]=='Darwin'and os.uname()[2][:2]in'2021'and sys.version[:3]=='2.7':os.environ['PYTHON_LAUNCHED_FROM_WRAPPER']='1' + os.environ['ARGV0']=sys.executable + os.execl(sys.executable,sys.executable+'(mitogen:CONTEXT_NAME)') + os.write(1,'MITO000\n'.encode()) + C=zlib.decompress(os.fdopen(0,'rb').read(PREAMBLE_COMPRESSED_LEN)) + fp=os.fdopen(W,'wb',0) + fp.write(C) + fp.close() + fp=os.fdopen(w,'wb',0) + fp.write(C) + fp.close() + os.write(1,'MITO001\n'.encode()) + os.close(2) + + def get_python_argv(self): + """ + Return the initial argument vector elements necessary to invoke Python, + by returning a 1-element list containing :attr:`python_path` if it is a + string, or simply returning it if it is already a list. + + This allows emulation of existing tools where the Python invocation may + be set to e.g. `['/usr/bin/env', 'python']`. + """ + if isinstance(self.options.python_path, list): + return self.options.python_path + return [self.options.python_path] + + def get_boot_command(self): + source = inspect.getsource(self._first_stage) + source = textwrap.dedent('\n'.join(source.strip().split('\n')[2:])) + source = source.replace(' ', ' ') + source = source.replace('CONTEXT_NAME', self.options.remote_name) + preamble_compressed = self.get_preamble() + source = source.replace('PREAMBLE_COMPRESSED_LEN', + str(len(preamble_compressed))) + compressed = zlib.compress(source.encode(), 9) + encoded = binascii.b2a_base64(compressed).replace(b('\n'), b('')) + + # Just enough to decode, decompress, and exec the first stage. + # Priorities: wider compatibility, faster startup, shorter length. + # `import os` here, instead of stage 1, to save a few bytes. + # `sys.path=...` for https://github.com/python/cpython/issues/115911. + return self.get_python_argv() + [ + '-c', + 'import sys;sys.path=[p for p in sys.path if p];import binascii,os,zlib;' + 'exec(zlib.decompress(binascii.a2b_base64("%s")))' % (encoded.decode(),), + ] + + def get_econtext_config(self): + assert self.options.max_message_size is not None + parent_ids = mitogen.parent_ids[:] + parent_ids.insert(0, mitogen.context_id) + return { + 'parent_ids': parent_ids, + 'context_id': self.context.context_id, + 'debug': self.options.debug, + 'profiling': self.options.profiling, + 'unidirectional': self.options.unidirectional, + 'log_level': get_log_level(), + 'whitelist': self._router.get_module_whitelist(), + 'blacklist': self._router.get_module_blacklist(), + 'max_message_size': self.options.max_message_size, + 'version': mitogen.__version__, + } + + def get_preamble(self): + suffix = ( + '\nExternalContext(%r).main()\n' % + (self.get_econtext_config(),) + ) + partial = get_core_source_partial() + return partial.append(suffix.encode('utf-8')) + + def _get_name(self): + """ + Called by :meth:`connect` after :attr:`pid` is known. Subclasses can + override it to specify a default stream name, or set + :attr:`name_prefix` to generate a default format. + """ + return u'%s.%s' % (self.name_prefix, self.proc.pid) + + def start_child(self): + args = self.get_boot_command() + LOG.debug('command line for %r: %s', self, Argv(args)) + try: + return self.create_child(args=args, **self.create_child_args) + except OSError: + e = sys.exc_info()[1] + msg = 'Child start failed: %s. Command was: %s' % (e, Argv(args)) + raise mitogen.core.StreamError(msg) + + def _adorn_eof_error(self, e): + """ + Subclasses may provide additional information in the case of a failed + connection. + """ + if self.eof_error_hint: + e.args = ('%s\n\n%s' % (e.args[0], self.eof_error_hint),) + + def _complete_connection(self): + self._timer.cancel() + if not self.exception: + mitogen.core.unlisten(self._router.broker, 'shutdown', + self._on_broker_shutdown) + self._router.register(self.context, self.stdio_stream) + self.stdio_stream.set_protocol( + MitogenProtocol( + router=self._router, + remote_id=self.context.context_id, + ) + ) + self._router.route_monitor.notice_stream(self.stdio_stream) + self.latch.put() + + def _fail_connection(self, exc): + """ + Fail the connection attempt. + """ + LOG.debug('failing connection %s due to %r', + self.stdio_stream and self.stdio_stream.name, exc) + if self.exception is None: + self._adorn_eof_error(exc) + self.exception = exc + mitogen.core.unlisten(self._router.broker, 'shutdown', + self._on_broker_shutdown) + for stream in self.stdio_stream, self.stderr_stream: + if stream and not stream.receive_side.closed: + stream.on_disconnect(self._router.broker) + self._complete_connection() + + eof_error_msg = 'EOF on stream; last 100 lines received:\n' + + def on_stdio_disconnect(self): + """ + Handle stdio stream disconnection by failing the Connection if the + stderr stream has already been closed. Otherwise, wait for it to close + (or timeout), to allow buffered diagnostic logs to be consumed. + + It is normal that when a subprocess aborts, stdio has nothing buffered + when it is closed, thus signalling readability, causing an empty read + (interpreted as indicating disconnection) on the next loop iteration, + even if its stderr pipe has lots of diagnostic logs still buffered in + the kernel. Therefore we must wait for both pipes to indicate they are + empty before triggering connection failure. + """ + stderr = self.stderr_stream + if stderr is None or stderr.receive_side.closed: + self._on_streams_disconnected() + + def on_stderr_disconnect(self): + """ + Inverse of :func:`on_stdio_disconnect`. + """ + if self.stdio_stream.receive_side.closed: + self._on_streams_disconnected() + + def _on_streams_disconnected(self): + """ + When disconnection has been detected for both streams, cancel the + connection timer, mark the connection failed, and reap the child + process. Do nothing if the timer has already been cancelled, indicating + some existing failure has already been noticed. + """ + if self._timer.active: + self._timer.cancel() + self._fail_connection(EofError( + self.eof_error_msg + get_history( + [self.stdio_stream, self.stderr_stream] + ) + )) + + if self._reaper: + return + + self._reaper = Reaper( + broker=self._router.broker, + proc=self.proc, + kill=not ( + (self.detached and self.child_is_immediate_subprocess) or + # Avoid killing so child has chance to write cProfile data + self._router.profiling + ), + # Don't delay shutdown waiting for a detached child, since the + # detached child may expect to live indefinitely after its parent + # exited. + wait_on_shutdown=(not self.detached), + ) + self._reaper.reap() + + def _on_broker_shutdown(self): + """ + Respond to broker.shutdown() being called by failing the connection + attempt. + """ + self._fail_connection(CancelledError(BROKER_SHUTDOWN_MSG)) + + def stream_factory(self): + return self.stream_protocol_class.build_stream( + broker=self._router.broker, + ) + + def stderr_stream_factory(self): + return self.diag_protocol_class.build_stream() + + def _setup_stdio_stream(self): + stream = self.stream_factory() + stream.conn = self + stream.name = self.options.name or self._get_name() + stream.accept(self.proc.stdout, self.proc.stdin) + + mitogen.core.listen(stream, 'disconnect', self.on_stdio_disconnect) + self._router.broker.start_receive(stream) + return stream + + def _setup_stderr_stream(self): + stream = self.stderr_stream_factory() + stream.conn = self + stream.name = self.options.name or self._get_name() + stream.accept(self.proc.stderr, self.proc.stderr) + + mitogen.core.listen(stream, 'disconnect', self.on_stderr_disconnect) + self._router.broker.start_receive(stream) + return stream + + def _on_timer_expired(self): + self._fail_connection( + mitogen.core.TimeoutError( + 'Failed to setup connection after %.2f seconds', + self.options.connect_timeout, + ) + ) + + def _async_connect(self): + LOG.debug('creating connection to context %d using %s', + self.context.context_id, self.__class__.__module__) + mitogen.core.listen(self._router.broker, 'shutdown', + self._on_broker_shutdown) + self._timer = self._router.broker.timers.schedule( + when=self.options.connect_deadline, + func=self._on_timer_expired, + ) + + try: + self.proc = self.start_child() + except Exception: + LOG.debug('failed to start child', exc_info=True) + self._fail_connection(sys.exc_info()[1]) + return + + LOG.debug('child for %r started: pid:%r stdin:%r stdout:%r stderr:%r', + self, self.proc.pid, + self.proc.stdin.fileno(), + self.proc.stdout.fileno(), + self.proc.stderr and self.proc.stderr.fileno()) + + self.stdio_stream = self._setup_stdio_stream() + if self.context.name is None: + self.context.name = self.stdio_stream.name + self.proc.name = self.stdio_stream.name + if self.proc.stderr: + self.stderr_stream = self._setup_stderr_stream() + + def connect(self, context): + self.context = context + self.latch = mitogen.core.Latch() + self._router.broker.defer(self._async_connect) + self.latch.get() + if self.exception: + raise self.exception + + +class ChildIdAllocator(object): + """ + Allocate new context IDs from a block of unique context IDs allocated by + the master process. + """ + def __init__(self, router): + self.router = router + self.lock = threading.Lock() + self.it = iter(xrange(0)) + + def allocate(self): + """ + Allocate an ID, requesting a fresh block from the master if the + existing block is exhausted. + + :returns: + The new context ID. + + .. warning:: + + This method is not safe to call from the :class:`Broker` thread, as + it may block on IO of its own. + """ + self.lock.acquire() + try: + for id_ in self.it: + return id_ + + master = self.router.context_by_id(0) + start, end = master.send_await( + mitogen.core.Message(dst_id=0, handle=mitogen.core.ALLOCATE_ID) + ) + self.it = iter(xrange(start, end)) + finally: + self.lock.release() + + return self.allocate() + + +class CallChain(object): + """ + Deliver :data:`mitogen.core.CALL_FUNCTION` messages to a target context, + optionally threading related calls so an exception in an earlier call + cancels subsequent calls. + + :param mitogen.core.Context context: + Target context. + :param bool pipelined: + Enable pipelining. + + :meth:`call`, :meth:`call_no_reply` and :meth:`call_async` + normally issue calls and produce responses with no memory of prior + exceptions. If a call made with :meth:`call_no_reply` fails, the exception + is logged to the target context's logging framework. + + **Pipelining** + + When pipelining is enabled, if an exception occurs during a call, + subsequent calls made by the same :class:`CallChain` fail with the same + exception, including those already in-flight on the network, and no further + calls execute until :meth:`reset` is invoked. + + No exception is logged for calls made with :meth:`call_no_reply`, instead + the exception is saved and reported as the result of subsequent + :meth:`call` or :meth:`call_async` calls. + + Sequences of asynchronous calls can be made without wasting network + round-trips to discover if prior calls succeed, and chains originating from + multiple unrelated source contexts may overlap concurrently at a target + context without interference. + + In this example, 4 calls complete in one round-trip:: + + chain = mitogen.parent.CallChain(context, pipelined=True) + chain.call_no_reply(os.mkdir, '/tmp/foo') + + # If previous mkdir() failed, this never runs: + chain.call_no_reply(os.mkdir, '/tmp/foo/bar') + + # If either mkdir() failed, this never runs, and the exception is + # asynchronously delivered to the receiver. + recv = chain.call_async(subprocess.check_output, '/tmp/foo') + + # If anything so far failed, this never runs, and raises the exception. + chain.call(do_something) + + # If this code was executed, the exception would also be raised. + if recv.get().unpickle() == 'baz': + pass + + When pipelining is enabled, :meth:`reset` must be invoked to ensure any + exception is discarded, otherwise unbounded memory usage is possible in + long-running programs. The context manager protocol is supported to ensure + :meth:`reset` is always invoked:: + + with mitogen.parent.CallChain(context, pipelined=True) as chain: + chain.call_no_reply(...) + chain.call_no_reply(...) + chain.call_no_reply(...) + chain.call(...) + + # chain.reset() automatically invoked. + """ + def __init__(self, context, pipelined=False): + self.context = context + if pipelined: + self.chain_id = self.make_chain_id() + else: + self.chain_id = None + + @classmethod + def make_chain_id(cls): + return '%s-%s-%x-%x' % ( + socket.gethostname(), + os.getpid(), + thread.get_ident(), + int(1e6 * mitogen.core.now()), + ) + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.context) + + def __enter__(self): + return self + + def __exit__(self, _1, _2, _3): + self.reset() + + def reset(self): + """ + Instruct the target to forget any related exception. + """ + if not self.chain_id: + return + + saved, self.chain_id = self.chain_id, None + try: + self.call_no_reply(mitogen.core.Dispatcher.forget_chain, saved) + finally: + self.chain_id = saved + + closures_msg = ( + 'Mitogen cannot invoke closures, as doing so would require ' + 'serializing arbitrary program state, and no universal ' + 'method exists to recover a reference to them.' + ) + + lambda_msg = ( + 'Mitogen cannot invoke anonymous functions, as no universal method ' + 'exists to recover a reference to an anonymous function.' + ) + + method_msg = ( + 'Mitogen cannot invoke instance methods, as doing so would require ' + 'serializing arbitrary program state.' + ) + + def make_msg(self, fn, *args, **kwargs): + if getattr(fn, closure_attr, None) is not None: + raise TypeError(self.closures_msg) + if fn.__name__ == '': + raise TypeError(self.lambda_msg) + + if inspect.ismethod(fn): + im_self = getattr(fn, IM_SELF_ATTR) + if not inspect.isclass(im_self): + raise TypeError(self.method_msg) + klass = mitogen.core.to_text(im_self.__name__) + else: + klass = None + + tup = ( + self.chain_id, + mitogen.core.to_text(fn.__module__), + klass, + mitogen.core.to_text(fn.__name__), + args, + mitogen.core.Kwargs(kwargs) + ) + return mitogen.core.Message.pickled(tup, + handle=mitogen.core.CALL_FUNCTION) + + def call_no_reply(self, fn, *args, **kwargs): + """ + Like :meth:`call_async`, but do not wait for a return value, and inform + the target context no reply is expected. If the call fails and + pipelining is disabled, the exception will be logged to the target + context's logging framework. + """ + LOG.debug('starting no-reply function call to %r: %r', + self.context.name or self.context.context_id, + CallSpec(fn, args, kwargs)) + self.context.send(self.make_msg(fn, *args, **kwargs)) + + def call_async(self, fn, *args, **kwargs): + """ + Arrange for `fn(*args, **kwargs)` to be invoked on the context's main + thread. + + :param fn: + A free function in module scope or a class method of a class + directly reachable from module scope: + + .. code-block:: python + + # mymodule.py + + def my_func(): + '''A free function reachable as mymodule.my_func''' + + class MyClass: + @classmethod + def my_classmethod(cls): + '''Reachable as mymodule.MyClass.my_classmethod''' + + def my_instancemethod(self): + '''Unreachable: requires a class instance!''' + + class MyEmbeddedClass: + @classmethod + def my_classmethod(cls): + '''Not directly reachable from module scope!''' + + :param tuple args: + Function arguments, if any. See :ref:`serialization-rules` for + permitted types. + :param dict kwargs: + Function keyword arguments, if any. See :ref:`serialization-rules` + for permitted types. + :returns: + :class:`mitogen.core.Receiver` configured to receive the result of + the invocation: + + .. code-block:: python + + recv = context.call_async(os.check_output, 'ls /tmp/') + try: + # Prints output once it is received. + msg = recv.get() + print(msg.unpickle()) + except mitogen.core.CallError, e: + print('Call failed:', str(e)) + + Asynchronous calls may be dispatched in parallel to multiple + contexts and consumed as they complete using + :class:`mitogen.select.Select`. + """ + LOG.debug('starting function call to %s: %r', + self.context.name or self.context.context_id, + CallSpec(fn, args, kwargs)) + return self.context.send_async(self.make_msg(fn, *args, **kwargs)) + + def call(self, fn, *args, **kwargs): + """ + Like :meth:`call_async`, but block until the return value is available. + Equivalent to:: + + call_async(fn, *args, **kwargs).get().unpickle() + + :returns: + The function's return value. + :raises mitogen.core.CallError: + An exception was raised in the remote context during execution. + """ + receiver = self.call_async(fn, *args, **kwargs) + return receiver.get().unpickle(throw_dead=False) + + +class Context(mitogen.core.Context): + """ + Extend :class:`mitogen.core.Context` with functionality useful to masters, + and child contexts who later become parents. Currently when this class is + required, the target context's router is upgraded at runtime. + """ + #: A :class:`CallChain` instance constructed by default, with pipelining + #: disabled. :meth:`call`, :meth:`call_async` and :meth:`call_no_reply` use + #: this instance. + call_chain_class = CallChain + + via = None + + def __init__(self, *args, **kwargs): + super(Context, self).__init__(*args, **kwargs) + self.default_call_chain = self.call_chain_class(self) + + def __ne__(self, other): + return not (self == other) + + def __eq__(self, other): + return ( + isinstance(other, mitogen.core.Context) and + (other.context_id == self.context_id) and + (other.router == self.router) + ) + + def __hash__(self): + return hash((self.router, self.context_id)) + + def call_async(self, fn, *args, **kwargs): + """ + See :meth:`CallChain.call_async`. + """ + return self.default_call_chain.call_async(fn, *args, **kwargs) + + def call(self, fn, *args, **kwargs): + """ + See :meth:`CallChain.call`. + """ + return self.default_call_chain.call(fn, *args, **kwargs) + + def call_no_reply(self, fn, *args, **kwargs): + """ + See :meth:`CallChain.call_no_reply`. + """ + self.default_call_chain.call_no_reply(fn, *args, **kwargs) + + def shutdown(self, wait=False): + """ + Arrange for the context to receive a ``SHUTDOWN`` message, triggering + graceful shutdown. + + Due to a lack of support for timers, no attempt is made yet to force + terminate a hung context using this method. This will be fixed shortly. + + :param bool wait: + If :data:`True`, block the calling thread until the context has + completely terminated. + + :returns: + If `wait` is :data:`False`, returns a :class:`mitogen.core.Latch` + whose :meth:`get() ` method returns + :data:`None` when shutdown completes. The `timeout` parameter may + be used to implement graceful timeouts. + """ + LOG.debug('%r.shutdown() sending SHUTDOWN', self) + latch = mitogen.core.Latch() + mitogen.core.listen(self, 'disconnect', lambda: latch.put(None)) + self.send( + mitogen.core.Message( + handle=mitogen.core.SHUTDOWN, + ) + ) + + if wait: + latch.get() + else: + return latch + + +class RouteMonitor(object): + """ + Generate and respond to :data:`mitogen.core.ADD_ROUTE` and + :data:`mitogen.core.DEL_ROUTE` messages sent to the local context by + maintaining a table of available routes, and propagating messages towards + parents and siblings as appropriate. + + :class:`RouteMonitor` is responsible for generating routing messages for + directly attached children. It learns of new children via + :meth:`notice_stream` called by :class:`Router`, and subscribes to their + ``disconnect`` event to learn when they disappear. + + In children, constructing this class overwrites the stub + :data:`mitogen.core.DEL_ROUTE` handler installed by + :class:`mitogen.core.ExternalContext`, which is expected behaviour when a + child is beging upgraded in preparation to become a parent of children of + its own. + + By virtue of only being active while responding to messages from a handler, + RouteMonitor lives entirely on the broker thread, so its data requires no + locking. + + :param mitogen.master.Router router: + Router to install handlers on. + :param mitogen.core.Context parent: + :data:`None` in the master process, or reference to the parent context + we should propagate route updates towards. + """ + def __init__(self, router, parent=None): + self.router = router + self.parent = parent + self._log = logging.getLogger('mitogen.route_monitor') + #: Mapping of Stream instance to integer context IDs reachable via the + #: stream; used to cleanup routes during disconnection. + self._routes_by_stream = {} + self.router.add_handler( + fn=self._on_add_route, + handle=mitogen.core.ADD_ROUTE, + persist=True, + policy=is_immediate_child, + overwrite=True, + ) + self.router.add_handler( + fn=self._on_del_route, + handle=mitogen.core.DEL_ROUTE, + persist=True, + policy=is_immediate_child, + overwrite=True, + ) + + def __repr__(self): + return 'RouteMonitor()' + + def _send_one(self, stream, handle, target_id, name): + """ + Compose and send an update message on a stream. + + :param mitogen.core.Stream stream: + Stream to send it on. + :param int handle: + :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE` + :param int target_id: + ID of the connecting or disconnecting context. + :param str name: + Context name or :data:`None`. + """ + if not stream: + # We may not have a stream during shutdown. + return + + data = str(target_id) + if name: + data = '%s:%s' % (target_id, name) + stream.protocol.send( + mitogen.core.Message( + handle=handle, + data=data.encode('utf-8'), + dst_id=stream.protocol.remote_id, + ) + ) + + def _propagate_up(self, handle, target_id, name=None): + """ + In a non-master context, propagate an update towards the master. + + :param int handle: + :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE` + :param int target_id: + ID of the connecting or disconnecting context. + :param str name: + For :data:`mitogen.core.ADD_ROUTE`, the name of the new context + assigned by its parent. This is used by parents to assign the + :attr:`mitogen.core.Context.name` attribute. + """ + if self.parent: + stream = self.router.stream_by_id(self.parent.context_id) + self._send_one(stream, handle, target_id, name) + + def _propagate_down(self, handle, target_id): + """ + For DEL_ROUTE, we additionally want to broadcast the message to any + stream that has ever communicated with the disconnecting ID, so + core.py's :meth:`mitogen.core.Router._on_del_route` can turn the + message into a disconnect event. + + :param int handle: + :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE` + :param int target_id: + ID of the connecting or disconnecting context. + """ + for stream in self.router.get_streams(): + if target_id in stream.protocol.egress_ids and ( + (self.parent is None) or + (self.parent.context_id != stream.protocol.remote_id) + ): + self._send_one(stream, mitogen.core.DEL_ROUTE, target_id, None) + + def notice_stream(self, stream): + """ + When this parent is responsible for a new directly connected child + stream, we're also responsible for broadcasting + :data:`mitogen.core.DEL_ROUTE` upstream when that child disconnects. + """ + self._routes_by_stream[stream] = set([stream.protocol.remote_id]) + self._propagate_up(mitogen.core.ADD_ROUTE, stream.protocol.remote_id, + stream.name) + mitogen.core.listen( + obj=stream, + name='disconnect', + func=lambda: self._on_stream_disconnect(stream), + ) + + def get_routes(self, stream): + """ + Return the set of context IDs reachable on a stream. + + :param mitogen.core.Stream stream: + :returns: set([int]) + """ + return self._routes_by_stream.get(stream) or set() + + def _on_stream_disconnect(self, stream): + """ + Respond to disconnection of a local stream by propagating DEL_ROUTE for + any contexts we know were attached to it. + """ + # During a stream crash it is possible for disconnect signal to fire + # twice, in which case ignore the second instance. + routes = self._routes_by_stream.pop(stream, None) + if routes is None: + return + + self._log.debug('stream %s is gone; propagating DEL_ROUTE for %r', + stream.name, routes) + for target_id in routes: + self.router.del_route(target_id) + self._propagate_up(mitogen.core.DEL_ROUTE, target_id) + self._propagate_down(mitogen.core.DEL_ROUTE, target_id) + + context = self.router.context_by_id(target_id, create=False) + if context: + mitogen.core.fire(context, 'disconnect') + + def _on_add_route(self, msg): + """ + Respond to :data:`mitogen.core.ADD_ROUTE` by validating the source of + the message, updating the local table, and propagating the message + upwards. + """ + if msg.is_dead: + return + + target_id_s, _, target_name = bytes_partition(msg.data, b(':')) + target_name = target_name.decode() + target_id = int(target_id_s) + self.router.context_by_id(target_id).name = target_name + stream = self.router.stream_by_id(msg.src_id) + current = self.router.stream_by_id(target_id) + if current and current.protocol.remote_id != mitogen.parent_id: + self._log.error('Cannot add duplicate route to %r via %r, ' + 'already have existing route via %r', + target_id, stream, current) + return + + self._log.debug('Adding route to %d via %r', target_id, stream) + self._routes_by_stream[stream].add(target_id) + self.router.add_route(target_id, stream) + self._propagate_up(mitogen.core.ADD_ROUTE, target_id, target_name) + + def _on_del_route(self, msg): + """ + Respond to :data:`mitogen.core.DEL_ROUTE` by validating the source of + the message, updating the local table, propagating the message + upwards, and downwards towards any stream that every had a message + forwarded from it towards the disconnecting context. + """ + if msg.is_dead: + return + + target_id = int(msg.data) + registered_stream = self.router.stream_by_id(target_id) + if registered_stream is None: + return + + stream = self.router.stream_by_id(msg.src_id) + if registered_stream != stream: + self._log.error('received DEL_ROUTE for %d from %r, expected %r', + target_id, stream, registered_stream) + return + + context = self.router.context_by_id(target_id, create=False) + if context: + self._log.debug('firing local disconnect signal for %r', context) + mitogen.core.fire(context, 'disconnect') + + self._log.debug('deleting route to %d via %r', target_id, stream) + routes = self._routes_by_stream.get(stream) + if routes: + routes.discard(target_id) + + self.router.del_route(target_id) + if stream.protocol.remote_id != mitogen.parent_id: + self._propagate_up(mitogen.core.DEL_ROUTE, target_id) + self._propagate_down(mitogen.core.DEL_ROUTE, target_id) + + +class Router(mitogen.core.Router): + context_class = Context + debug = False + profiling = False + + id_allocator = None + responder = None + log_forwarder = None + route_monitor = None + + def upgrade(self, importer, parent): + LOG.debug('upgrading %r with capabilities to start new children', self) + self.id_allocator = ChildIdAllocator(router=self) + self.responder = ModuleForwarder( + router=self, + parent_context=parent, + importer=importer, + ) + self.route_monitor = RouteMonitor(self, parent) + self.add_handler( + fn=self._on_detaching, + handle=mitogen.core.DETACHING, + persist=True, + ) + + def _on_detaching(self, msg): + if msg.is_dead: + return + stream = self.stream_by_id(msg.src_id) + if stream.protocol.remote_id != msg.src_id or stream.conn.detached: + LOG.warning('bad DETACHING received on %r: %r', stream, msg) + return + LOG.debug('%r: marking as detached', stream) + stream.conn.detached = True + msg.reply(None) + + def get_streams(self): + """ + Return an atomic snapshot of all streams in existence at time of call. + This is safe to call from any thread. + """ + self._write_lock.acquire() + try: + return itervalues(self._stream_by_id) + finally: + self._write_lock.release() + + def disconnect(self, context): + """ + Disconnect a context and forget its stream, assuming the context is + directly connected. + """ + stream = self.stream_by_id(context) + if stream is None or stream.protocol.remote_id != context.context_id: + return + + l = mitogen.core.Latch() + mitogen.core.listen(stream, 'disconnect', l.put) + def disconnect(): + LOG.debug('Starting disconnect of %r', stream) + stream.on_disconnect(self.broker) + self.broker.defer(disconnect) + l.get() + + def add_route(self, target_id, stream): + """ + Arrange for messages whose `dst_id` is `target_id` to be forwarded on a + directly connected :class:`Stream`. Safe to call from any thread. + + This is called automatically by :class:`RouteMonitor` in response to + :data:`mitogen.core.ADD_ROUTE` messages, but remains public while the + design has not yet settled, and situations may arise where routing is + not fully automatic. + + :param int target_id: + Target context ID to add a route for. + :param mitogen.core.Stream stream: + Stream over which messages to the target should be routed. + """ + LOG.debug('%r: adding route to context %r via %r', + self, target_id, stream) + assert isinstance(target_id, int) + assert isinstance(stream, mitogen.core.Stream) + + self._write_lock.acquire() + try: + self._stream_by_id[target_id] = stream + finally: + self._write_lock.release() + + def del_route(self, target_id): + """ + Delete any route that exists for `target_id`. It is not an error to + delete a route that does not currently exist. Safe to call from any + thread. + + This is called automatically by :class:`RouteMonitor` in response to + :data:`mitogen.core.DEL_ROUTE` messages, but remains public while the + design has not yet settled, and situations may arise where routing is + not fully automatic. + + :param int target_id: + Target context ID to delete route for. + """ + LOG.debug('%r: deleting route to %r', self, target_id) + # DEL_ROUTE may be sent by a parent if it knows this context sent + # messages to a peer that has now disconnected, to let us raise + # 'disconnect' event on the appropriate Context instance. In that case, + # we won't a matching _stream_by_id entry for the disappearing route, + # so don't raise an error for a missing key here. + self._write_lock.acquire() + try: + self._stream_by_id.pop(target_id, None) + finally: + self._write_lock.release() + + def get_module_blacklist(self): + if mitogen.context_id == 0: + return self.responder.blacklist + return self.importer.master_blacklist + + def get_module_whitelist(self): + if mitogen.context_id == 0: + return self.responder.whitelist + return self.importer.master_whitelist + + def allocate_id(self): + return self.id_allocator.allocate() + + connection_timeout_msg = u"Connection timed out." + + def _connect(self, klass, **kwargs): + context_id = self.allocate_id() + context = self.context_class(self, context_id) + context.name = kwargs.get('name') + + kwargs['old_router'] = self + kwargs['max_message_size'] = self.max_message_size + conn = klass(klass.options_class(**kwargs), self) + try: + conn.connect(context=context) + except mitogen.core.TimeoutError: + raise mitogen.core.StreamError(self.connection_timeout_msg) + + return context + + def connect(self, method_name, name=None, **kwargs): + if name: + name = mitogen.core.to_text(name) + + klass = get_connection_class(method_name) + kwargs.setdefault(u'debug', self.debug) + kwargs.setdefault(u'profiling', self.profiling) + kwargs.setdefault(u'unidirectional', self.unidirectional) + kwargs.setdefault(u'name', name) + + via = kwargs.pop(u'via', None) + if via is not None: + return self.proxy_connect(via, method_name, + **mitogen.core.Kwargs(kwargs)) + return self._connect(klass, **mitogen.core.Kwargs(kwargs)) + + def proxy_connect(self, via_context, method_name, name=None, **kwargs): + resp = via_context.call(_proxy_connect, + name=name, + method_name=method_name, + kwargs=mitogen.core.Kwargs(kwargs), + ) + if resp['msg'] is not None: + raise mitogen.core.StreamError(resp['msg']) + + name = u'%s.%s' % (via_context.name, resp['name']) + context = self.context_class(self, resp['id'], name=name) + context.via = via_context + self._write_lock.acquire() + try: + self._context_by_id[context.context_id] = context + finally: + self._write_lock.release() + return context + + def buildah(self, **kwargs): + return self.connect(u'buildah', **kwargs) + + def doas(self, **kwargs): + return self.connect(u'doas', **kwargs) + + def docker(self, **kwargs): + return self.connect(u'docker', **kwargs) + + def kubectl(self, **kwargs): + return self.connect(u'kubectl', **kwargs) + + def fork(self, **kwargs): + return self.connect(u'fork', **kwargs) + + def jail(self, **kwargs): + return self.connect(u'jail', **kwargs) + + def local(self, **kwargs): + return self.connect(u'local', **kwargs) + + def lxc(self, **kwargs): + return self.connect(u'lxc', **kwargs) + + def lxd(self, **kwargs): + return self.connect(u'lxd', **kwargs) + + def setns(self, **kwargs): + return self.connect(u'setns', **kwargs) + + def su(self, **kwargs): + return self.connect(u'su', **kwargs) + + def sudo(self, **kwargs): + return self.connect(u'sudo', **kwargs) + + def ssh(self, **kwargs): + return self.connect(u'ssh', **kwargs) + + def podman(self, **kwargs): + return self.connect(u'podman', **kwargs) + + +class Reaper(object): + """ + Asynchronous logic for reaping :class:`Process` objects. This is necessary + to prevent uncontrolled buildup of zombie processes in long-lived parents + that will eventually reach an OS limit, preventing creation of new threads + and processes, and to log the exit status of the child in the case of an + error. + + To avoid modifying process-global state such as with + :func:`signal.set_wakeup_fd` or installing a :data:`signal.SIGCHLD` handler + that might interfere with the user's ability to use those facilities, + Reaper polls for exit with backoff using timers installed on an associated + :class:`Broker`. + + :param mitogen.core.Broker broker: + The :class:`Broker` on which to install timers + :param mitogen.parent.Process proc: + The process to reap. + :param bool kill: + If :data:`True`, send ``SIGTERM`` and ``SIGKILL`` to the process. + :param bool wait_on_shutdown: + If :data:`True`, delay :class:`Broker` shutdown if child has not yet + exited. If :data:`False` simply forget the child. + """ + #: :class:`Timer` that invokes :meth:`reap` after some polling delay. + _timer = None + + def __init__(self, broker, proc, kill, wait_on_shutdown): + self.broker = broker + self.proc = proc + self.kill = kill + self.wait_on_shutdown = wait_on_shutdown + self._tries = 0 + + def _signal_child(self, signum): + # For processes like sudo we cannot actually send sudo a signal, + # because it is setuid, so this is best-effort only. + LOG.debug('%r: sending %s', self.proc, SIGNAL_BY_NUM[signum]) + try: + os.kill(self.proc.pid, signum) + except OSError: + e = sys.exc_info()[1] + if e.args[0] != errno.EPERM: + raise + + def _calc_delay(self, count): + """ + Calculate a poll delay given `count` attempts have already been made. + These constants have no principle, they just produce rapid but still + relatively conservative retries. + """ + delay = 0.05 + for _ in xrange(count): + delay *= 1.72 + return delay + + def _on_broker_shutdown(self): + """ + Respond to :class:`Broker` shutdown by cancelling the reap timer if + :attr:`Router.await_children_at_shutdown` is disabled. Otherwise + shutdown is delayed for up to :attr:`Broker.shutdown_timeout` for + subprocesses may have no intention of exiting any time soon. + """ + if not self.wait_on_shutdown: + self._timer.cancel() + + def _install_timer(self, delay): + new = self._timer is None + self._timer = self.broker.timers.schedule( + when=mitogen.core.now() + delay, + func=self.reap, + ) + if new: + mitogen.core.listen(self.broker, 'shutdown', + self._on_broker_shutdown) + + def _remove_timer(self): + if self._timer and self._timer.active: + self._timer.cancel() + mitogen.core.unlisten(self.broker, 'shutdown', + self._on_broker_shutdown) + + def reap(self): + """ + Reap the child process during disconnection. + """ + status = self.proc.poll() + if status is not None: + LOG.debug('%r: %s', self.proc, returncode_to_str(status)) + mitogen.core.fire(self.proc, 'exit') + self._remove_timer() + return + + self._tries += 1 + if self._tries > 20: + LOG.warning('%r: child will not exit, giving up', self) + self._remove_timer() + return + + delay = self._calc_delay(self._tries - 1) + LOG.debug('%r still running after IO disconnect, recheck in %.03fs', + self.proc, delay) + self._install_timer(delay) + + if not self.kill: + pass + elif self._tries == 2: + self._signal_child(signal.SIGTERM) + elif self._tries == 6: # roughly 4 seconds + self._signal_child(signal.SIGKILL) + + +class Process(object): + """ + Process objects provide a uniform interface to the :mod:`subprocess` and + :mod:`mitogen.fork`. This class is extended by :class:`PopenProcess` and + :class:`mitogen.fork.Process`. + + :param int pid: + The process ID. + :param file stdin: + File object attached to standard input. + :param file stdout: + File object attached to standard output. + :param file stderr: + File object attached to standard error, or :data:`None`. + """ + #: Name of the process used in logs. Set to the stream/context name by + #: :class:`Connection`. + name = None + + def __init__(self, pid, stdin, stdout, stderr=None): + #: The process ID. + self.pid = pid + #: File object attached to standard input. + self.stdin = stdin + #: File object attached to standard output. + self.stdout = stdout + #: File object attached to standard error. + self.stderr = stderr + + def __repr__(self): + return '%s %s pid %d' % ( + type(self).__name__, + self.name, + self.pid, + ) + + def poll(self): + """ + Fetch the child process exit status, or :data:`None` if it is still + running. This should be overridden by subclasses. + + :returns: + Exit status in the style of the :attr:`subprocess.Popen.returncode` + attribute, i.e. with signals represented by a negative integer. + """ + raise NotImplementedError() + + +class PopenProcess(Process): + """ + :class:`Process` subclass wrapping a :class:`subprocess.Popen` object. + + :param subprocess.Popen proc: + The subprocess. + """ + def __init__(self, proc, stdin, stdout, stderr=None): + super(PopenProcess, self).__init__(proc.pid, stdin, stdout, stderr) + #: The subprocess. + self.proc = proc + + def poll(self): + return self.proc.poll() + + +class ModuleForwarder(object): + """ + Respond to :data:`mitogen.core.GET_MODULE` requests in a child by + forwarding the request to our parent context, or satisfying the request + from our local Importer cache. + """ + def __init__(self, router, parent_context, importer): + self.router = router + self.parent_context = parent_context + self.importer = importer + router.add_handler( + fn=self._on_forward_module, + handle=mitogen.core.FORWARD_MODULE, + persist=True, + policy=mitogen.core.has_parent_authority, + ) + router.add_handler( + fn=self._on_get_module, + handle=mitogen.core.GET_MODULE, + persist=True, + policy=is_immediate_child, + ) + + def __repr__(self): + return 'ModuleForwarder' + + def _on_forward_module(self, msg): + if msg.is_dead: + return + + context_id_s, _, fullname = bytes_partition(msg.data, b('\x00')) + fullname = mitogen.core.to_text(fullname) + context_id = int(context_id_s) + stream = self.router.stream_by_id(context_id) + if stream.protocol.remote_id == mitogen.parent_id: + LOG.error('%r: dropping FORWARD_MODULE(%d, %r): no route to child', + self, context_id, fullname) + return + + if fullname in stream.protocol.sent_modules: + return + + LOG.debug('%r._on_forward_module() sending %r to %r via %r', + self, fullname, context_id, stream.protocol.remote_id) + self._send_module_and_related(stream, fullname) + if stream.protocol.remote_id != context_id: + stream.protocol._send( + mitogen.core.Message( + data=msg.data, + handle=mitogen.core.FORWARD_MODULE, + dst_id=stream.protocol.remote_id, + ) + ) + + def _on_get_module(self, msg): + if msg.is_dead: + return + + fullname = msg.data.decode('utf-8') + LOG.debug('%r: %s requested by context %d', self, fullname, msg.src_id) + callback = lambda: self._on_cache_callback(msg, fullname) + self.importer._request_module(fullname, callback) + + def _on_cache_callback(self, msg, fullname): + stream = self.router.stream_by_id(msg.src_id) + LOG.debug('%r: sending %s to %r', self, fullname, stream) + self._send_module_and_related(stream, fullname) + + def _send_module_and_related(self, stream, fullname): + tup = self.importer._cache[fullname] + for related in tup[4]: + rtup = self.importer._cache.get(related) + if rtup: + self._send_one_module(stream, rtup) + else: + LOG.debug('%r: %s not in cache (for %s)', + self, related, fullname) + + self._send_one_module(stream, tup) + + def _send_one_module(self, stream, tup): + if tup[0] not in stream.protocol.sent_modules: + stream.protocol.sent_modules.add(tup[0]) + self.router._async_route( + mitogen.core.Message.pickled( + tup, + dst_id=stream.protocol.remote_id, + handle=mitogen.core.LOAD_MODULE, + ) + ) diff --git a/mitogen-0.3.9/mitogen/podman.py b/mitogen-0.3.9/mitogen/podman.py new file mode 100644 index 0000000..acc46a3 --- /dev/null +++ b/mitogen-0.3.9/mitogen/podman.py @@ -0,0 +1,73 @@ +# Copyright 2019, David Wilson +# Copyright 2021, Mitogen contributors +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import logging + +import mitogen.parent + + +LOG = logging.getLogger(__name__) + + +class Options(mitogen.parent.Options): + container = None + username = None + podman_path = 'podman' + + def __init__(self, container=None, podman_path=None, username=None, + **kwargs): + super(Options, self).__init__(**kwargs) + assert container is not None + self.container = container + if podman_path: + self.podman_path = podman_path + if username: + self.username = username + + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = False + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } + + def _get_name(self): + return u'podman.' + self.options.container + + def get_boot_command(self): + args = [self.options.podman_path, 'exec'] + if self.options.username: + args += ['--user=' + self.options.username] + args += ["--interactive", "--", self.options.container] + return args + super(Connection, self).get_boot_command() diff --git a/mitogen-0.3.9/mitogen/profiler.py b/mitogen-0.3.9/mitogen/profiler.py new file mode 100644 index 0000000..512a593 --- /dev/null +++ b/mitogen-0.3.9/mitogen/profiler.py @@ -0,0 +1,164 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +mitogen.profiler + Record and report cProfile statistics from a run. Creates one aggregated + output file, one aggregate containing only workers, and one for the + top-level process. + +Usage: + mitogen.profiler record [args ..] + mitogen.profiler report [sort_mode] + mitogen.profiler stat [args ..] + +Mode: + record: Record a trace. + report: Report on a previously recorded trace. + stat: Record and report in a single step. + +Where: + dest_path: Filesystem prefix to write .pstats files to. + sort_mode: Sorting mode; defaults to "cumulative". See: + https://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats + +Example: + mitogen.profiler record /tmp/mypatch ansible-playbook foo.yml + mitogen.profiler dump /tmp/mypatch-worker.pstats +""" + +from __future__ import print_function +import os +import pstats +import shutil +import subprocess +import sys +import tempfile +import time + + +def try_merge(stats, path): + try: + stats.add(path) + return True + except Exception as e: + print('%s failed. Will retry. %s' % (path, e)) + return False + + +def merge_stats(outpath, inpaths): + first, rest = inpaths[0], inpaths[1:] + for x in range(1): + try: + stats = pstats.Stats(first) + except EOFError: + time.sleep(0.2) + continue + + print("Writing %r..." % (outpath,)) + for path in rest: + #print("Merging %r into %r.." % (os.path.basename(path), outpath)) + for x in range(5): + if try_merge(stats, path): + break + time.sleep(0.2) + + pstats.dump_stats(outpath) + + +def generate_stats(outpath, tmpdir): + print('Generating stats..') + all_paths = [] + paths_by_ident = {} + + for name in os.listdir(tmpdir): + if name.endswith('-dump.pstats'): + ident, _, pid = name.partition('-') + path = os.path.join(tmpdir, name) + all_paths.append(path) + paths_by_ident.setdefault(ident, []).append(path) + + merge_stats('%s-all.pstat' % (outpath,), all_paths) + for ident, paths in paths_by_ident.items(): + merge_stats('%s-%s.pstat' % (outpath, ident), paths) + + +def do_record(tmpdir, path, *args): + env = os.environ.copy() + fmt = '%(identity)s-%(pid)s.%(now)s-dump.%(ext)s' + env['MITOGEN_PROFILING'] = '1' + env['MITOGEN_PROFILE_FMT'] = os.path.join(tmpdir, fmt) + rc = subprocess.call(args, env=env) + generate_stats(path, tmpdir) + return rc + + +def do_report(tmpdir, path, sort='cumulative'): + stats = pstats.Stats(path).sort_stats(sort) + stats.print_stats(100) + + +def do_stat(tmpdir, sort, *args): + valid_sorts = pstats.Stats.sort_arg_dict_default + if sort not in valid_sorts: + sys.stderr.write('Invalid sort %r, must be one of %s\n' % + (sort, ', '.join(sorted(valid_sorts)))) + sys.exit(1) + + outfile = os.path.join(tmpdir, 'combined') + do_record(tmpdir, outfile, *args) + aggs = ('app.main', 'mitogen.broker', 'mitogen.child_main', + 'mitogen.service.pool', 'Strategy', 'WorkerProcess', + 'all') + for agg in aggs: + path = '%s-%s.pstat' % (outfile, agg) + if os.path.exists(path): + print() + print() + print('------ Aggregation %r ------' % (agg,)) + print() + do_report(tmpdir, path, sort) + print() + + +def main(): + if len(sys.argv) < 2 or sys.argv[1] not in ('record', 'report', 'stat'): + sys.stderr.write(__doc__.lstrip()) + sys.exit(1) + + func = globals()['do_' + sys.argv[1]] + tmpdir = tempfile.mkdtemp(prefix='mitogen.profiler') + try: + sys.exit(func(tmpdir, *sys.argv[2:]) or 0) + finally: + shutil.rmtree(tmpdir) + +if __name__ == '__main__': + main() diff --git a/mitogen-0.3.9/mitogen/select.py b/mitogen-0.3.9/mitogen/select.py new file mode 100644 index 0000000..2d87574 --- /dev/null +++ b/mitogen-0.3.9/mitogen/select.py @@ -0,0 +1,348 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import mitogen.core + + +class Error(mitogen.core.Error): + pass + + +class Event(object): + """ + Represents one selected event. + """ + #: The first Receiver or Latch the event traversed. + source = None + + #: The :class:`mitogen.core.Message` delivered to a receiver, or the object + #: posted to a latch. + data = None + + +class Select(object): + """ + Support scatter/gather asynchronous calls and waiting on multiple + :class:`receivers `, + :class:`channels `, + :class:`latches `, and + :class:`sub-selects