Compare commits

..

No commits in common. "master" and "v0.0.2" have entirely different histories.

24 changed files with 621 additions and 1237 deletions

4
Jenkinsfile vendored
View File

@ -1,4 +0,0 @@
// https://github.com/Rudd-O/shared-jenkins-libraries
@Library('shared-jenkins-libraries@master') _
genericFedoraRPMPipeline()

View File

@ -89,21 +89,21 @@ Enabling bombshell-client access to dom0
----------------------------------------
`dom0` needs its `qubes.VMShell` service activated. As `root` in `dom0`,
create a file `/etc/qubes-rpc/qubes.VMshell` with mode `0755` and make
create a file `/etc/qubes-rpc/qubes.VMshell` with mode `0644` and make
sure its contents say `/bin/bash`.
You will then create a file `/etc/qubes/policy.d/80-ansible-qubes.policy`
with mode 0664, owned by `root` and group `qubes`. Add a policy
You will then create a file `/etc/qubes-rpc/policy/qubes.VMShell` with
mode 0664, owned by your login user, and group `qubes`. Add a policy
line towards the top of the file:
```
qubes.VMShell * controller * allow
yourvm dom0 ask
```
Where `controller` represents the name of the VM you will be executing
`bombshell-client` against `dom0` from.
Where `yourvm` represents the name of the VM you will be executing
`bombshell-client` against dom0 from.
That's it -- `bombshell-client` should work against `dom0` now. Of course,
That's it -- `bombshell-client` should work against dom0 now. Of course,
you can adjust the policy to have it not ask — do the security math
on what that implies.

View File

@ -1,71 +0,0 @@
import os
import sys
import tempfile
from ansible import errors
from ansible.plugins.action.template import ActionModule as template
sys.path.insert(0, os.path.dirname(__file__))
import commonlib
contents = """# Sample configuration file for Qubes GUI daemon
# For syntax go http://www.hyperrealm.com/libconfig/libconfig_manual.html
global: {
# default values
#allow_fullscreen = false;
#allow_utf8_titles = false;
#secure_copy_sequence = "Ctrl-Shift-c";
#secure_paste_sequence = "Ctrl-Shift-v";
#windows_count_limit = 500;
#audio_low_latency = false;
};
# most of setting can be set per-VM basis
VM: {
{% for vmname, vm in vms.items() %}
{% set audio_low_latency = vm.qubes.get('guid', {}).get('audio_low_latency') %}
{% set allow_fullscreen = vm.qubes.get('guid', {}).get('allow_fullscreen') %}
{% if audio_low_latency or allow_fullscreen %}
{{ vmname }}: {
{% if audio_low_latency %}audio_low_latency = true;{% endif %}
{% if allow_fullscreen %}allow_fullscreen = true;{% endif %}
};
{% endif %}
{% endfor %}
};
"""
class ActionModule(object):
TRANSFERS_FILES = True
def __init__(self, runner):
self.ActionModule = template.ActionModule(runner)
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' handler for launcher operations '''
if module_args:
raise errors.AnsibleError("This module does not accept simple module args: %r" % module_args)
new_inject = dict(inject)
new_inject["vms"] = commonlib.inject_qubes(inject)
with tempfile.NamedTemporaryFile() as x:
x.write(contents)
x.flush()
new_complex_args = dict(complex_args)
new_complex_args["src"] = x.name
new_complex_args["dest"] = "/etc/qubes/guid.conf"
return self.ActionModule.run(
conn,
tmp,
'template',
module_args,
inject=new_inject,
complex_args=new_complex_args
)

View File

@ -1,21 +0,0 @@
import pipes
from ansible import errors
from ansible.plugins.action.command import ActionModule as command
class ActionModule(command):
def run(self, tmp=None, task_vars=None):
cmd = ["qubesctl"]
cmd.append('state.sls')
cmd.append(self._task.args['sls'])
if 'env' in self._task.args:
cmd.append("saltenv=%s" % (self._task.args['env'],))
module_args = " ".join(pipes.quote(s) for s in cmd)
module_args = "bash -c %s" % pipes.quote("DISPLAY=:0 " + module_args)
self._task.action = "command"
self._task.args['_raw_params'] = module_args
for x in 'env sls'.split():
if x in self._task.args:
del self._task.args[x]
return command.run(self, tmp, task_vars)

View File

@ -3,7 +3,7 @@
%define mybuildnumber %{?build_number}%{?!build_number:1}
Name: ansible-qubes
Version: 0.0.21
Version: 0.0.2
Release: %{mybuildnumber}%{?dist}
Summary: Inter-VM program execution for Qubes OS AppVMs and StandaloneVMs
BuildArch: noarch
@ -15,7 +15,7 @@ Source0: https://github.com/Rudd-O/%{name}/archive/{%version}.tar.gz#/%{n
BuildRequires: make
BuildRequires: gawk
Requires: python3
Requires: python2
%description
This package lets you execute programs between VMs as if it was SSH.

View File

@ -30,8 +30,6 @@ def inject_qubes(inject):
if vmitem == "template_vm":
if "_template" in hostvars and not "template_vm" in qubes:
qubes["template_vm"] = hostvars["_template"]
elif invhostname + "-template" in inject["groups"]:
qubes["template_vm"] = inject["groups"][invhostname + "-template"][0]
if vmitem in qubes:
t = qubes[vmitem]
if t is None or t.lower() == "none":
@ -96,8 +94,6 @@ def inject_qubes(inject):
pass
elif vmtype == "ProxyVM":
add(flags, "proxy")
elif vmtype == "DispVM":
pass
elif vmtype == "TemplateVM":
try:
qubes["source"] = qubes["template"]

View File

@ -3,40 +3,27 @@ import os
import sys
import tempfile
from ansible import errors
from ansible.plugins.action.template import ActionModule as template
from ansible.runner.action_plugins import template
sys.path.insert(0, os.path.dirname(__file__))
import commonlib
contents = """{{ vms | to_nice_yaml }}"""
topcontents = "{{ saltenv }}:\n '*':\n - {{ recipename }}\n"
def generate_datastructure(vms, task_vars):
def generate_datastructure(vms):
dc = collections.OrderedDict
d = dc()
for n, data in vms.items():
# This block will skip any VMs that are not in the groups defined in the 'formation_vm_groups' variable
# This allows you to deploy in multiple stages which is useful in cases
# where you want to create a template after another template is already provisioned.
if 'formation_vm_groups' in task_vars:
continueLoop = True
for group in task_vars['formation_vm_groups']:
if n in task_vars['hostvars'][n]['groups'][group]:
continueLoop = False
if continueLoop:
continue
qubes = data['qubes']
d[task_vars['hostvars'][n]['inventory_hostname_short']] = dc(qvm=['vm'])
vm = d[task_vars['hostvars'][n]['inventory_hostname_short']]
d[n] = dc(qvm=['vm'])
vm = d[n]
qvm = vm['qvm']
actions = []
qvm.append(dc(actions=actions))
for k in 'template source netvm'.split():
if qubes.get(k) and qubes.get(k) is not None:
qubes[k] = task_vars['hostvars'][qubes[k]]['inventory_hostname_short']
# Setup creation / cloning / existence test.
if 'template' in qubes:
creationparms = [
@ -86,12 +73,11 @@ def generate_datastructure(vms, task_vars):
qvm.append({'start': []})
# Collate and setup dependencies.
template = qubes.get('template', None)
source = qubes.get('source', None)
template = qubes.get('template') or qubes.get('source')
netvm = qubes.get('netvm', None)
require = []
if template or source:
require.append({'qvm': template or source})
if template:
require.append({'qvm': template})
if netvm != None:
require.append({'qvm': netvm})
if require:
@ -99,49 +85,73 @@ def generate_datastructure(vms, task_vars):
return d
class ActionModule(template):
class ActionModule(object):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
qubesdata = commonlib.inject_qubes(task_vars)
task_vars["vms"] = generate_datastructure(qubesdata, task_vars)
def __init__(self, runner):
self.ActionModule = template.ActionModule(runner)
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' handler for launcher operations '''
if module_args:
raise errors.AnsibleError("This module does not accept simple module args: %r" % module_args)
new_inject = dict(inject)
qubesdata = commonlib.inject_qubes(inject)
new_inject["vms"] = generate_datastructure(qubesdata)
with tempfile.NamedTemporaryFile() as x:
x.write(contents.encode())
x.write(contents)
x.flush()
self._task.args['src'] = x.name
retval = template.run(self, tmp, task_vars)
if retval.get("failed"):
new_complex_args = dict(complex_args)
new_complex_args["src"] = x.name
retval = self.ActionModule.run(
conn,
tmp,
'template',
module_args,
inject=new_inject,
complex_args=new_complex_args
)
if retval.result.get("failed"):
return retval
with tempfile.NamedTemporaryFile() as y:
y.write(topcontents.encode())
y.write(topcontents)
y.flush()
# Create new tmp path -- the other was blown away.
tmp = self._make_tmp_path()
tmp = self.ActionModule.runner._make_tmp_path(conn)
self._task.args["src"] = y.name
namenoext = os.path.splitext(self._task.args["dest"])[0]
new_complex_args = dict(complex_args)
new_complex_args["src"] = y.name
namenoext = os.path.splitext(complex_args["dest"])[0]
dest = namenoext + ".top"
self._task.args["dest"] = dest
task_vars["recipename"] = os.path.basename(namenoext)
task_vars["saltenv"] = "user" if "user_salt" in dest.split(os.sep) else "base"
retval2 = template.run(self, tmp, task_vars)
if retval2.get("failed"):
new_complex_args["dest"] = dest
new_inject["recipename"] = os.path.basename(namenoext)
new_inject["saltenv"] = "user" if "user_salt" in dest.split(os.sep) else "base"
retval2 = self.ActionModule.run(
conn,
tmp,
'template',
module_args,
inject=new_inject,
complex_args=new_complex_args
)
if retval2.result.get("failed"):
return retval2
if not retval['changed'] and not retval2['changed']:
if not retval.result['changed'] and not retval2.result['changed']:
for c in ('path', 'size'):
retval[c] = [x[c] for x in (retval, retval2) if c in x]
retval.result[c] = [x.result[c] for x in (retval, retval2) if c in x.result]
return retval
elif retval['changed'] and retval2['changed']:
elif retval.result['changed'] and retval2.result['changed']:
for c in ('src', 'checksum', 'size', 'state', 'changed', 'md5sum', 'dest'):
retval[c] = [x[c] for x in (retval, retval2) if c in x]
retval.result[c] = [x.result[c] for x in (retval, retval2) if c in x.result]
return retval
elif retval['changed']:
elif retval.result['changed']:
return retval
elif retval2['changed']:
elif retval2.result['changed']:
return retval2
else:
assert 0, "not reached"

View File

@ -0,0 +1,43 @@
import pipes
from ansible import errors
class ActionModule(object):
TRANSFERS_FILES = True
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' handler for launcher operations '''
if module_args:
raise errors.AnsibleError("This module does not accept simple module args: %r" % module_args)
cmd = ["qubesctl"]
cmd.append('state.sls')
cmd.append(complex_args['sls'])
if 'env' in complex_args:
cmd.append("saltenv=%s" % (complex_args['env'],))
if self.runner.noop_on_check(inject):
cmd.append("test=True")
module_args = " ".join(pipes.quote(s) for s in cmd)
retval = self.runner._execute_module(
conn,
tmp,
'command',
module_args,
inject=inject,
complex_args=complex_args
)
changeline = retval.result['stdout'].splitlines()[-4]
if self.runner.noop_on_check(inject):
numtasks = changeline.split()[1]
numunchanged = changeline.split("=")[1].split(')')[0]
retval.result['changed'] = numtasks != numunchanged
else:
retval.result['changed'] = 'changed=' in changeline
return retval

View File

@ -0,0 +1,196 @@
# Based on local.py (c) 2012, Anon <anon@anon.anon>
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__metaclass__ = type
DOCUMENTATION = """
author:
- Manuel Amador (Rudd-O)
connection: qubes
short_description: Execute tasks in Qubes VMs.
description:
- Use the qssh command to run commands in Qubes OS VMs.
version_added: "2.0"
requirements:
- qssh (Python script from ansible-qubes)
options:
management_proxy:
description:
- Management proxy. A machine accessible via SSH that can run qrexec.
default: ''
vars:
- name: management_proxy
env:
- name: MANAGEMENT_PROXY
"""
import distutils.spawn
import traceback
import os
import shlex
import subprocess
import pipes
from ansible import errors
from ansible import utils
from ansible.utils.display import Display
display = Display()
from ansible.plugins.connection import ConnectionBase
from ansible.utils.vars import combine_vars
from ansible.module_utils._text import to_bytes
from ansible import constants as C
BUFSIZE = 1024*1024
CONNECTION_TRANSPORT = "qubes"
CONNECTION_OPTIONS = {
'management_proxy': '--management-proxy',
}
class QubesRPCError(subprocess.CalledProcessError):
def __init__(self, returncode, cmd, output=None):
subprocess.CalledProcessError.__init__(self, returncode, cmd, output)
def __str__(self):
r = subprocess.CalledProcessError.__str__(self)
r = r + " while producing output %r" % self.output
return r
class Connection(ConnectionBase):
''' Qubes based connections '''
transport = CONNECTION_TRANSPORT
connection_options = CONNECTION_OPTIONS
documentation = DOCUMENTATION
has_pipelining = False
become_from_methods = frozenset(["sudo"])
transport_cmd = None
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.transport_cmd = distutils.spawn.find_executable('qrun')
if not self.transport_cmd:
self.transport_cmd = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
os.path.pardir,
"bin",
"qrun",
)
if not os.path.exists(self.transport_cmd):
self.transport_cmd = None
if not self.transport_cmd:
raise errors.AnsibleError("qrun command not found in PATH")
def _connect(self):
'''Connect to the VM; nothing to do here '''
super(Connection, self)._connect()
if not self._connected:
display.vvv("THIS IS A QUBES VM", host=self._play_context.remote_addr)
self._connected = True
def _produce_command(self, cmd):
addr = self._play_context.remote_addr
proxy = self.get_option("management_proxy")
if proxy:
proxy = ["--proxy=%s" % proxy] if proxy else []
addr = addr.split(".")[0]
else:
proxy = []
if isinstance(cmd, basestring):
cmd = shlex.split(cmd)
cmd = [self.transport_cmd] + proxy + [addr] + cmd
display.vvv("COMMAND %s" % (cmd,), host=self._play_context.remote_addr)
return cmd
def exec_command(self, cmd, in_data=None, sudoable=False):
'''Run a command on the VM.'''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd = self._produce_command(cmd)
cmd = [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
p = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to VM '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
out_path = self._prefix_login_path(out_path)
try:
with open(in_path, 'rb') as in_file:
try:
cmd = self._produce_command(['dd','of=%s' % out_path, 'bs=%s' % BUFSIZE])
p = subprocess.Popen(cmd, shell=False, stdin=in_file,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
raise errors.AnsibleError("chroot connection requires dd command in the chroot")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise errors.AnsibleError("file or module does not exist at: %s" % in_path)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from VM to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
in_path = self._prefix_login_path(in_path)
try:
cmd = self._produce_command(['dd', 'if=%s' % in_path, 'bs=%s' % BUFSIZE])
p = subprocess.Popen(cmd, shell=False, stdin=open(os.devnull),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
raise errors.AnsibleError("Qubes connection requires dd command in the chroot")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False

View File

@ -2,19 +2,17 @@
import base64
import pickle
import contextlib
import ctypes
import ctypes.util
import errno
import fcntl
import os
import pipes
try:
from shlex import quote
import queue
except ImportError:
from pipes import quote # noqa
try:
from queue import Queue
except ImportError:
from Queue import Queue # noqa
import Queue as queue
import select
import signal
import struct
@ -26,66 +24,61 @@ import time
import traceback
MAX_MUX_READ = 128 * 1024 # 64*1024*1024
MAX_MUX_READ = 128*1024 # 64*1024*1024
PACKLEN = 8
PACKFORMAT = "!HbIx"
def set_proc_name(newname):
from ctypes import cdll, byref, create_string_buffer
if isinstance(newname, str):
newname = newname.encode("utf-8")
libc = cdll.LoadLibrary("libc.so.6")
buff = create_string_buffer(len(newname) + 1)
buff.value = newname
libc.prctl(15, byref(buff), 0, 0, 0)
@contextlib.contextmanager
def mutexfile(filepath):
oldumask = os.umask(0o077)
try:
f = open(filepath, "a")
finally:
os.umask(oldumask)
fcntl.lockf(f.fileno(), fcntl.LOCK_EX)
yield
f.close()
def unset_cloexec(fd):
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~fcntl.FD_CLOEXEC)
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~ fcntl.FD_CLOEXEC)
def openfdforappend(fd):
f = None
try:
f = os.fdopen(fd, "ab", 0)
except IOError as e:
if e.errno != errno.ESPIPE:
raise
f = os.fdopen(fd, "wb", 0)
unset_cloexec(f.fileno())
return f
f = None
try:
f = os.fdopen(fd, "ab", 0)
except IOError as e:
if e.errno != errno.ESPIPE:
raise
f = os.fdopen(fd, "wb", 0)
unset_cloexec(f.fileno())
return f
def openfdforread(fd):
f = os.fdopen(fd, "rb", 0)
unset_cloexec(f.fileno())
return f
f = os.fdopen(fd, "rb", 0)
unset_cloexec(f.fileno())
return f
debug_lock = threading.Lock()
debug_enabled = False
_startt = time.time()
class LoggingEmu:
class LoggingEmu():
def __init__(self, prefix):
self.prefix = prefix
syslog.openlog("bombshell-client.%s" % self.prefix)
def debug(self, *a, **kw):
if not debug_enabled:
return
self._print(syslog.LOG_DEBUG, *a, **kw)
def info(self, *a, **kw):
self._print(syslog.LOG_INFO, *a, **kw)
def error(self, *a, **kw):
self._print(syslog.LOG_ERR, *a, **kw)
def _print(self, prio, *a, **kw):
debug_lock.acquire()
global _startt
@ -95,126 +88,105 @@ class LoggingEmu:
string = a[0]
else:
string = a[0] % a[1:]
n = threading.current_thread().name
syslog.syslog(
prio,
("%.3f " % deltat) + n + ": " + string,
)
syslog.syslog(prio, ("%.3f " % deltat) + string)
finally:
debug_lock.release()
logging = None
def send_confirmation(chan, retval, errmsg):
chan.write(struct.pack("!H", retval))
ln = len(errmsg)
assert ln < 1 << 32
chan.write(struct.pack("!I", ln))
chan.write(errmsg)
chan.flush()
logging.debug(
"Sent confirmation on channel %s: %s %s",
chan,
retval,
errmsg,
)
chan.write(struct.pack("!H", retval))
l = len(errmsg)
assert l < 1<<32
chan.write(struct.pack("!I", l))
chan.write(errmsg)
chan.flush()
logging.debug("Sent confirmation on channel %s: %s %s", chan, retval, errmsg)
def recv_confirmation(chan):
logging.debug("Waiting for confirmation on channel %s", chan)
r = chan.read(2)
if len(r) == 0:
# This happens when the remote domain does not exist.
r, errmsg = 125, "domain does not exist"
logging.debug("No confirmation: %s %s", r, errmsg)
return r, errmsg
assert len(r) == 2, r
r = struct.unpack("!H", r)[0]
lc = chan.read(4)
assert len(lc) == 4, lc
lu = struct.unpack("!I", lc)[0]
errmsg = chan.read(lu)
logging.debug("Received confirmation: %s %s", r, errmsg)
return r, errmsg
logging.debug("Waiting for confirmation on channel %s", chan)
r = chan.read(2)
if len(r) == 0:
# This happens when the remote domain does not exist.
r, errmsg = 125, "domain does not exist"
logging.debug("No confirmation: %s %s", r, errmsg)
return r, errmsg
assert len(r) == 2, r
r = struct.unpack("!H", r)[0]
l = chan.read(4)
assert len(l) == 4, l
l = struct.unpack("!I", l)[0]
errmsg = chan.read(l)
logging.debug("Received confirmation: %s %s", r, errmsg)
return r, errmsg
class MyThread(threading.Thread):
def run(self):
try:
self._run()
except Exception:
n = threading.current_thread().name
logging.error("%s: unexpected exception", n)
tb = traceback.format_exc()
logging.error("%s: traceback: %s", n, tb)
logging.error("%s: exiting program", n)
os._exit(124)
def run(self):
try:
self._run()
except Exception as e:
logging.error("%s: unexpected exception", threading.currentThread())
tb = traceback.format_exc()
logging.error("%s: traceback: %s", threading.currentThread(), tb)
logging.error("%s: exiting program", threading.currentThread())
os._exit(124)
class SignalSender(MyThread):
def __init__(self, signals, sigqueue):
"""Handles signals by pushing them into a file-like object."""
threading.Thread.__init__(self)
self.daemon = True
self.queue = Queue()
self.sigqueue = sigqueue
for sig in signals:
signal.signal(sig, self.copy)
def copy(self, signum, frame):
self.queue.put(signum)
logging.debug("Signal %s pushed to queue", signum)
def __init__(self, signals, sigqueue):
"""Handles signals by pushing them into a file-like object."""
threading.Thread.__init__(self)
self.setDaemon(True)
self.queue = queue.Queue()
self.sigqueue = sigqueue
for sig in signals:
signal.signal(sig, self.copy)
def _run(self):
while True:
signum = self.queue.get()
logging.debug("Dequeued signal %s", signum)
if signum is None:
break
assert signum > 0
self.sigqueue.write(struct.pack("!H", signum))
self.sigqueue.flush()
logging.debug("Wrote signal %s to remote end", signum)
def copy(self, signum, frame):
self.queue.put(signum)
logging.debug("Signal %s pushed to queue", signum)
def _run(self):
while True:
signum = self.queue.get()
logging.debug("Dequeued signal %s", signum)
if signum is None:
break
assert signum > 0
self.sigqueue.write(struct.pack("!H", signum))
self.sigqueue.flush()
logging.debug("Wrote signal %s to remote end", signum)
class Signaler(MyThread):
def __init__(self, process, sigqueue):
"""Reads integers from a file-like object and relays that as kill()."""
threading.Thread.__init__(self)
self.daemon = True
self.process = process
self.sigqueue = sigqueue
def _run(self):
while True:
data = self.sigqueue.read(2)
if len(data) == 0:
logging.debug("Received no signal data")
break
assert len(data) == 2
signum = struct.unpack("!H", data)[0]
logging.debug(
"Received relayed signal %s, sending to process %s",
signum,
self.process.pid,
)
try:
self.process.send_signal(signum)
except BaseException as e:
logging.error(
"Failed to relay signal %s to process %s: %s",
signum,
self.process.pid,
e,
)
logging.debug("End of signaler")
def __init__(self, process, sigqueue):
"""Reads integers from a file-like object and relays that as kill()."""
threading.Thread.__init__(self)
self.setDaemon(True)
self.process = process
self.sigqueue = sigqueue
def _run(self):
while True:
data = self.sigqueue.read(2)
if len(data) == 0:
logging.debug("Received no signal data")
break
assert len(data) == 2
signum = struct.unpack("!H", data)[0]
logging.debug("Received relayed signal %s, sending to process %s", signum, self.process.pid)
self.process.send_signal(signum)
logging.debug("End of signaler")
def write(dst, buffer, ln):
def write(dst, buffer, l):
alreadywritten = 0
mv = memoryview(buffer)[:ln]
mv = memoryview(buffer)[:l]
while len(mv):
dst.write(mv)
writtenthisloop = len(mv)
@ -224,10 +196,10 @@ def write(dst, buffer, ln):
alreadywritten = alreadywritten + writtenthisloop
def copy(src, dst, buffer, ln):
def copy(src, dst, buffer, l):
alreadyread = 0
mv = memoryview(buffer)[:ln]
assert len(mv) == ln, "Buffer object is too small: %s %s" % (len(mv), ln)
mv = memoryview(buffer)[:l]
assert len(mv) == l, "Buffer object is too small: %s %s" % (len(mv), l)
while len(mv):
_, _, _ = select.select([src], (), ())
readthisloop = src.readinto(mv)
@ -235,253 +207,220 @@ def copy(src, dst, buffer, ln):
raise Exception("copy: Failed to read any bytes")
mv = mv[readthisloop:]
alreadyread = alreadyread + readthisloop
return write(dst, buffer, ln)
return write(dst, buffer, l)
class DataMultiplexer(MyThread):
def __init__(self, sources, sink):
threading.Thread.__init__(self)
self.daemon = True
self.sources = dict((s, num) for num, s in enumerate(sources))
self.sink = sink
def _run(self):
logging.debug(
"mux: Started with sources %s and sink %s", self.sources, self.sink
)
buffer = bytearray(MAX_MUX_READ)
while self.sources:
sources, _, x = select.select(
(s for s in self.sources), (), (s for s in self.sources)
)
assert not x, x
for s in sources:
n = self.sources[s]
logging.debug("mux: Source %s (%s) is active", n, s)
readthisloop = s.readinto(buffer)
if readthisloop == 0:
logging.debug(
"mux: Received no bytes from source %s, signaling"
" peer to close corresponding source",
n,
)
del self.sources[s]
header = struct.pack(PACKFORMAT, n, False, 0)
self.sink.write(header)
continue
ln = readthisloop
header = struct.pack(PACKFORMAT, n, True, ln)
self.sink.write(header)
write(self.sink, buffer, ln)
logging.debug("mux: End of data multiplexer")
def __init__(self, sources, sink):
threading.Thread.__init__(self)
self.setDaemon(True)
self.sources = dict((s,num) for num, s in enumerate(sources))
self.sink = sink
def _run(self):
logging.debug("mux: Started with sources %s and sink %s", self.sources, self.sink)
buffer = bytearray(MAX_MUX_READ)
while self.sources:
sources, _, x = select.select((s for s in self.sources), (), (s for s in self.sources))
assert not x, x
for s in sources:
n = self.sources[s]
logging.debug("mux: Source %s (%s) is active", n, s)
readthisloop = s.readinto(buffer)
if readthisloop == 0:
logging.debug("mux: Received no bytes from source %s, signaling peer to close corresponding source", n)
del self.sources[s]
header = struct.pack(PACKFORMAT, n, False, 0)
self.sink.write(header)
continue
l = readthisloop
header = struct.pack(PACKFORMAT, n, True, l)
self.sink.write(header)
write(self.sink, buffer, l)
logging.debug("mux: End of data multiplexer")
class DataDemultiplexer(MyThread):
def __init__(self, source, sinks):
threading.Thread.__init__(self)
self.daemon = True
self.sinks = dict(enumerate(sinks))
self.source = source
def _run(self):
logging.debug(
"demux: Started with source %s and sinks %s",
self.source,
self.sinks,
)
buffer = bytearray(MAX_MUX_READ)
while self.sinks:
r, _, x = select.select([self.source], (), [self.source])
assert not x, x
for s in r:
header = s.read(PACKLEN)
if header == b"":
logging.debug(
"demux: Received no bytes from source, closing sinks",
)
for sink in self.sinks.values():
sink.close()
self.sinks = []
break
n, active, ln = struct.unpack(PACKFORMAT, header)
if not active:
logging.debug(
"demux: Source %s inactive, closing matching sink %s",
s,
self.sinks[n],
)
self.sinks[n].close()
del self.sinks[n]
else:
copy(self.source, self.sinks[n], buffer, ln)
logging.debug("demux: End of data demultiplexer")
def __init__(self, source, sinks):
threading.Thread.__init__(self)
self.setDaemon(True)
self.sinks = dict(enumerate(sinks))
self.source = source
def _run(self):
logging.debug("demux: Started with source %s and sinks %s", self.source, self.sinks)
buffer = bytearray(MAX_MUX_READ)
while self.sinks:
r, _, x = select.select([self.source], (), [self.source])
assert not x, x
for s in r:
header = s.read(PACKLEN)
if header == "":
logging.debug("demux: Received no bytes from source, closing all sinks")
for sink in self.sinks.values():
sink.close()
self.sinks = []
break
n, active, l = struct.unpack(PACKFORMAT, header)
if not active:
logging.debug("demux: Source %s now inactive, closing corresponding sink %s", s, self.sinks[n])
self.sinks[n].close()
del self.sinks[n]
else:
copy(self.source, self.sinks[n], buffer, l)
logging.debug("demux: End of data demultiplexer")
def quotedargs():
return " ".join(quote(x) for x in sys.argv[1:])
def quotedargs_ellipsized(cmdlist):
text = " ".join(quote(x) for x in cmdlist)
if len(text) > 80:
text = text[:77] + "..."
return text
def main_master():
set_proc_name("bombshell-client (master) %s" % quotedargs())
global logging
logging = LoggingEmu("master")
global logging
logging = LoggingEmu("master")
logging.info("Started with arguments: %s", quotedargs_ellipsized(sys.argv[1:]))
logging.info("Started with arguments: %s", sys.argv[1:])
global debug_enabled
args = sys.argv[1:]
if args[0] == "-d":
args = args[1:]
debug_enabled = True
global debug_enabled
args = sys.argv[1:]
if args[0] == "-d":
args = args[1:]
debug_enabled = True
remote_vm = args[0]
remote_command = args[1:]
assert remote_command
remote_vm = args[0]
remote_command = args[1:]
assert remote_command
def anypython(exe):
return "` test -x %s && echo %s || echo python3`" % (
quote(exe),
quote(exe),
)
def anypython(exe):
return "` test -x %s && echo %s || echo python`" % (pipes.quote(exe),
pipes.quote(exe))
remote_helper_text = b"exec "
remote_helper_text += bytes(anypython(sys.executable), "utf-8")
remote_helper_text += bytes(" -u -c ", "utf-8")
remote_helper_text += bytes(
quote(open(__file__, "r").read()),
"ascii",
)
remote_helper_text += b" -d " if debug_enabled else b" "
remote_helper_text += base64.b64encode(pickle.dumps(remote_command, 2))
remote_helper_text += b"\n"
remote_helper_text = b"exec "
remote_helper_text += bytes(anypython(sys.executable), "utf-8")
remote_helper_text += bytes(" -u -c ", "utf-8")
remote_helper_text += bytes(pipes.quote(open(__file__, "r").read()), "ascii")
remote_helper_text += b" -d " if debug_enabled else b" "
remote_helper_text += base64.b64encode(pickle.dumps(remote_command, 2))
remote_helper_text += b"\n"
saved_stderr = openfdforappend(os.dup(sys.stderr.fileno()))
saved_stderr = openfdforappend(os.dup(sys.stderr.fileno()))
try:
with mutexfile(os.path.expanduser("~/.bombshell-lock")):
try:
p = subprocess.Popen(
["qrexec-client-vm", remote_vm, "qubes.VMShell"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True,
preexec_fn=os.setpgrp,
bufsize=0,
["qrexec-client-vm", remote_vm, "qubes.VMShell"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True,
preexec_fn=os.setpgrp,
bufsize=0,
)
except OSError as e:
except OSError as e:
logging.error("cannot launch qrexec-client-vm: %s", e)
return 127
logging.debug("Writing the helper text into the other side")
p.stdin.write(remote_helper_text)
p.stdin.flush()
logging.debug("Writing the helper text into the other side")
p.stdin.write(remote_helper_text)
p.stdin.flush()
confirmation, errmsg = recv_confirmation(p.stdout)
if confirmation != 0:
confirmation, errmsg = recv_confirmation(p.stdout)
if confirmation != 0:
logging.error("remote: %s", errmsg)
return confirmation
handled_signals = (
signal.SIGINT,
signal.SIGABRT,
signal.SIGALRM,
signal.SIGTERM,
signal.SIGUSR1,
signal.SIGUSR2,
signal.SIGTSTP,
signal.SIGCONT,
)
read_signals, write_signals = pairofpipes()
signaler = SignalSender(handled_signals, write_signals)
signaler.name = "master signaler"
signaler.start()
handled_signals = (
signal.SIGINT,
signal.SIGABRT,
signal.SIGALRM,
signal.SIGTERM,
signal.SIGUSR1,
signal.SIGUSR2,
signal.SIGTSTP,
signal.SIGCONT,
)
read_signals, write_signals = pairofpipes()
signaler = SignalSender(handled_signals, write_signals)
signaler.setName("master signaler")
signaler.start()
muxer = DataMultiplexer([sys.stdin, read_signals], p.stdin)
muxer.name = "master multiplexer"
muxer.start()
muxer = DataMultiplexer([sys.stdin, read_signals], p.stdin)
muxer.setName("master multiplexer")
muxer.start()
demuxer = DataDemultiplexer(p.stdout, [sys.stdout, saved_stderr])
demuxer.name = "master demultiplexer"
demuxer.start()
demuxer = DataDemultiplexer(p.stdout, [sys.stdout, saved_stderr])
demuxer.setName("master demultiplexer")
demuxer.start()
retval = p.wait()
logging.info("Return code %s for qubes.VMShell proxy", retval)
demuxer.join()
logging.info("Ending bombshell")
return retval
retval = p.wait()
logging.info("Return code %s for qubes.VMShell proxy", retval)
demuxer.join()
logging.info("Ending bombshell")
return retval
def pairofpipes():
read, write = os.pipe()
return os.fdopen(read, "rb", 0), os.fdopen(write, "wb", 0)
read, write = os.pipe()
return os.fdopen(read, "rb", 0), os.fdopen(write, "wb", 0)
def main_remote():
set_proc_name("bombshell-client (remote) %s" % quotedargs())
global logging
logging = LoggingEmu("remote")
global logging
logging = LoggingEmu("remote")
logging.info("Started with arguments: %s", quotedargs_ellipsized(sys.argv[1:]))
logging.info("Started with arguments: %s", sys.argv[1:])
global debug_enabled
if "-d" in sys.argv[1:]:
debug_enabled = True
cmd = sys.argv[2]
else:
cmd = sys.argv[1]
global debug_enabled
if "-d" in sys.argv[1:]:
debug_enabled = True
cmd = sys.argv[2]
else:
cmd = sys.argv[1]
cmd = pickle.loads(base64.b64decode(cmd))
logging.debug("Received command: %s", cmd)
cmd = pickle.loads(base64.b64decode(cmd))
logging.debug("Received command: %s", cmd)
nicecmd = " ".join(quote(a) for a in cmd)
try:
p = subprocess.Popen(
cmd,
# ["strace", "-s4096", "-ff"] + cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
bufsize=0,
)
send_confirmation(sys.stdout, 0, b"")
except OSError as e:
msg = "cannot execute %s: %s" % (nicecmd, e)
logging.error(msg)
send_confirmation(sys.stdout, 127, bytes(msg, "utf-8"))
sys.exit(0)
except BaseException as e:
msg = "cannot execute %s: %s" % (nicecmd, e)
logging.error(msg)
send_confirmation(sys.stdout, 126, bytes(msg, "utf-8"))
sys.exit(0)
nicecmd = " ".join(pipes.quote(a) for a in cmd)
try:
p = subprocess.Popen(
cmd,
# ["strace", "-s4096", "-ff"] + cmd,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
close_fds=True,
bufsize=0,
)
send_confirmation(sys.stdout, 0, b"")
except OSError as e:
msg = "cannot execute %s: %s" % (nicecmd, e)
logging.error(msg)
send_confirmation(sys.stdout, 127, bytes(msg, "utf-8"))
sys.exit(0)
except BaseException as e:
msg = "cannot execute %s: %s" % (nicecmd, e)
logging.error(msg)
send_confirmation(sys.stdout, 126, bytes(msg, "utf-8"))
sys.exit(0)
signals_read, signals_written = pairofpipes()
signals_read, signals_written = pairofpipes()
signaler = Signaler(p, signals_read)
signaler.name = "remote signaler"
signaler.start()
signaler = Signaler(p, signals_read)
signaler.setName("remote signaler")
signaler.start()
demuxer = DataDemultiplexer(sys.stdin, [p.stdin, signals_written])
demuxer.name = "remote demultiplexer"
demuxer.start()
demuxer = DataDemultiplexer(sys.stdin, [p.stdin, signals_written])
demuxer.setName("remote demultiplexer")
demuxer.start()
muxer = DataMultiplexer([p.stdout, p.stderr], sys.stdout)
muxer.name = "remote multiplexer"
muxer.start()
muxer = DataMultiplexer([p.stdout, p.stderr], sys.stdout)
muxer.setName("remote multiplexer")
muxer.start()
nicecmd_ellipsized = quotedargs_ellipsized(cmd)
logging.info("Started %s", nicecmd_ellipsized)
logging.info("Started %s", nicecmd)
retval = p.wait()
logging.info("Return code %s for %s", retval, nicecmd_ellipsized)
muxer.join()
logging.info("Ending bombshell")
return retval
retval = p.wait()
logging.info("Return code %s for %s", retval, nicecmd)
muxer.join()
logging.info("Ending bombshell")
return retval
sys.stdin = openfdforread(sys.stdin.fileno())

View File

@ -1,9 +1,6 @@
#!/usr/bin/python3 -u
#!/usr/bin/env python
try:
from pipes import quote
except ImportError:
from shlex import quote
import pipes
import os
import subprocess
import sys
@ -31,29 +28,16 @@ else:
] + parms
if remotehost:
args = " ".join(quote(x) for x in parms)
with open(path_to_bombshell, "r") as f:
poop = quote(f.read())
therest_template = ('''
set -e
which bombshell-client >/dev/null 2>&1 && {
exec bombshell-client %s %s %s
} || {
echo %s > .bombshell-client.tmp
chmod +x .bombshell-client.tmp
mv -fT .bombshell-client.tmp .bombshell-client
exec ./.bombshell-client %s %s %s
}
''')
therest = therest_template % (
"-d" if os.getenv("BOMBSHELL_DEBUG") else "",
quote(host),
args,
poop,
"-d" if os.getenv("BOMBSHELL_DEBUG") else "",
quote(host),
args,
)
args = " ".join(pipes.quote(x) for x in parms)
poop = file(path_to_bombshell, "rb").read().encode("hex_codec")
therest_template = ("test -x ./.bombshell-client || "
"python -c 'import os; file(\"./.bombshell-client\", \"wb\").write(\"%s\".decode(\"hex_codec\")); os.chmod(\"./.bombshell-client\", 0700)' || "
"exit 127 ;"
"./.bombshell-client %s %s %s")
therest = therest_template % (poop,
"-d" if os.getenv("BOMBSHELL_DEBUG") else "",
pipes.quote(host),
args)
cmd = [
'ssh',
'-o', 'BatchMode yes',

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/env python
import sys
import os

View File

@ -1 +0,0 @@
["RELEASE": "q4.2 38 39"]

View File

@ -1,506 +0,0 @@
# Based on local.py (c) 2012, Anon <anon@anon.anon>
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__metaclass__ = type
DOCUMENTATION = """
author:
- Manuel Amador (Rudd-O)
connection: qubes
short_description: Execute tasks in Qubes VMs.
description:
- Use the qrun command to run commands in Qubes OS VMs.
version_added: "2.0"
requirements:
- qrun (Python script from ansible-qubes)
options:
management_proxy:
description:
- Management proxy. A machine accessible via SSH that can run qrexec.
default: ''
vars:
- name: management_proxy
env:
- name: MANAGEMENT_PROXY
"""
import distutils.spawn
import inspect
import traceback
import textwrap
import os
import shlex
import sys
import subprocess
import pipes
from ansible import errors
from ansible import utils
from ansible.plugins.loader import connection_loader
from ansible.plugins.connection import ConnectionBase
from ansible.utils.vars import combine_vars
from ansible.module_utils._text import to_bytes
from ansible.utils.path import unfrackpath
from ansible import constants as C
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class x(object):
def vvvv(self, text, host=None):
with open(os.path.expanduser("~/ansible-qubes.log"), "a") as f:
print(text, host, file=f)
def vvv(self, text, host=None):
with open(os.path.expanduser("~/ansible-qubes.log"), "a") as f:
print(text, host, file=f)
display = x()
BUFSIZE = 64*1024 # any bigger and it causes issues because we don't read multiple chunks until completion
CONNECTION_TRANSPORT = "qubes"
CONNECTION_OPTIONS = {
'management_proxy': '--management-proxy',
}
def debug(text):
return
print(text, file=sys.stderr)
def encode_exception(exc, stream):
debug("encoding exception")
stream.write('{}\n'.format(len(exc.__class__.__name__)).encode('ascii'))
stream.write('{}'.format(exc.__class__.__name__).encode('ascii'))
for attr in "errno", "filename", "message", "strerror":
stream.write('{}\n'.format(len('{}'.format(getattr(exc, attr)))).encode('ascii'))
stream.write('{}'.format('{}'.format(getattr(exc, attr))).encode('ascii'))
def decode_exception(stream):
debug("decoding exception")
name_len = stream.readline(16)
name_len = int(name_len)
name = stream.read(name_len)
keys = ["errno", "filename", "message", "strerror"]
vals = dict((a, None) for a in keys)
for k in keys:
v_len = stream.readline(16)
v_len = int(v_len)
v = stream.read(v_len)
if v == 'None':
vals[k] = None
else:
try:
vals[k] = int(v)
except Exception:
vals[k] = v
if name == "IOError":
e = IOError()
elif name == "OSError":
e = OSError()
else:
raise TypeError("Exception %s cannot be decoded" % name)
for k, v in vals.items():
setattr(e, k, v)
return e
def popen(cmd, in_data, outf=sys.stdout):
debug("popening on remote %s" % type(in_data))
try:
p = subprocess.Popen(
cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
out, err = p.communicate(in_data)
ret = p.wait()
outf.write('{}\n'.format('Y').encode('ascii'))
except (IOError, OSError) as e:
outf.write('{}\n'.format('N').encode('ascii'))
encode_exception(e, out)
outf.write('{}\n'.format(ret).encode('ascii'))
outf.write('{}\n'.format(len(out)).encode('ascii'))
outf.write(out)
outf.write('{}\n'.format(len(err)).encode('ascii'))
outf.write(err)
outf.flush()
debug("finished popening")
def put(out_path):
debug("dest writing %s" % out_path)
try:
f = open(out_path, "wb")
sys.stdout.write(b'Y\n')
except (IOError, OSError) as e:
sys.stdout.write(b'N\n')
encode_exception(e, sys.stdout)
return
while True:
chunksize = int(sys.stdin.readline(16))
if not chunksize:
debug("looks like we have no more to read")
break
while chunksize:
debug(type(chunksize))
chunk = sys.stdin.read(chunksize)
assert chunk
debug("dest writing %s" % len(chunk))
try:
f.write(chunk)
except (IOError, OSError) as e:
sys.stdout.write(b'N\n')
encode_exception(e, sys.stdout)
f.close()
return
chunksize = chunksize - len(chunk)
debug("remaining %s" % chunksize)
sys.stdout.write(b'Y\n')
sys.stdout.flush()
try:
f.flush()
except (IOError, OSError) as e:
sys.stdout.write(b'N\n')
encode_exception(e, sys.stdout)
return
finally:
debug("finished writing dest")
f.close()
def fetch(in_path, bufsize):
debug("Fetching from remote %s" % in_path)
try:
f = open(in_path, "rb")
except (IOError, OSError) as e:
sys.stdout.write(b'N\n')
encode_exception(e, sys.stdout)
return
try:
while True:
try:
data = f.read(bufsize)
except (IOError, OSError) as e:
sys.stdout.write(b'N\n')
encode_exception(e, sys.stdout)
f.close()
return
sys.stdout.write('{}\n'.format(len(data)).encode('ascii'))
if len(data) == 0:
sys.stdout.flush()
break
sys.stdout.write(data)
sys.stdout.flush()
finally:
f.close()
if __name__ == '__main__':
# FIXME: WRITE TESTS!
import StringIO
s = StringIO.StringIO()
try:
open("/doesnotexist")
except Exception as e:
encode_exception(e, s)
s.seek(0)
dec = decode_exception(s)
preamble = b'''
from __future__ import print_function
import sys, os, subprocess
sys.ps1 = ''
sys.ps2 = ''
sys.stdin = os.fdopen(sys.stdin.fileno(), 'rb', 0) if hasattr(sys.stdin, 'buffer') else sys.stdin
sys.stdout = sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout
'''
payload = b'\n\n'.join(
inspect.getsource(x).encode("utf-8")
for x in (debug, encode_exception, popen, put, fetch)
) + \
b'''
_ = sys.stdout.write(b'OK\\n')
sys.stdout.flush()
'''
def _prefix_login_path(remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
class QubesRPCError(subprocess.CalledProcessError):
def __init__(self, returncode, cmd, output=None):
subprocess.CalledProcessError.__init__(self, returncode, cmd, output)
def __str__(self):
r = subprocess.CalledProcessError.__str__(self)
r = r + " while producing output %r" % self.output
return r
class Connection(ConnectionBase):
''' Qubes based connections '''
transport = CONNECTION_TRANSPORT
connection_options = CONNECTION_OPTIONS
documentation = DOCUMENTATION
become_from_methods = frozenset(["sudo"])
has_pipelining = True
transport_cmd = None
_transport = None
def set_options(self, task_keys=None, var_options=None, direct=None):
super(Connection, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
# FIXME HORRIBLE WORKAROUND FIXME
if task_keys and task_keys['delegate_to'] and self._options and 'management_proxy' in self._options:
self._options['management_proxy'] = ''
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
display.vvv("INSTANTIATING %s %s" % (os.getppid(), id(self)), host=play_context.remote_addr)
if 'transport_cmd' in kwargs:
self.transport_cmd = kwargs['transport_cmd']
return
self.transport_cmd = distutils.spawn.find_executable('qrun')
if not self.transport_cmd:
self.transport_cmd = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
"bin",
"qrun",
)
if not os.path.exists(self.transport_cmd):
self.transport_cmd = None
if not self.transport_cmd:
raise errors.AnsibleError("qrun command not found in PATH")
self.transport_cmd = [self.transport_cmd]
display.vvvv("INSTANTIATED %s" % (os.getppid(),), host=play_context.remote_addr)
def _connect(self):
'''Connect to the VM.
Unlike in earlier editions of this program, in this edition the
program attempts to create a persistent Python session with the
machine it's trying to connect to, speeding up greatly the exec-
ution of Ansible modules against VMs, whether local or remote
via SSH. In other words, we have pipelining now.
'''
display.vvv("CONNECTING %s %s %s" % (os.getppid(), id(self), self.get_option("management_proxy")), host=self._play_context.remote_addr)
super(Connection, self)._connect()
if not self._connected:
remote_cmd = [to_bytes(x, errors='surrogate_or_strict') for x in [
# 'strace', '-s', '2048', '-o', '/tmp/log',
'python3', '-u', '-i', '-c', preamble
]]
addr = self._play_context.remote_addr
proxy = to_bytes(self.get_option("management_proxy")) if self.get_option("management_proxy") else ""
if proxy:
proxy = [b"--proxy=%s" % proxy] if proxy else []
addr = addr.split(".")[0]
else:
proxy = []
addr = to_bytes(addr)
cmd = [to_bytes(x) for x in self.transport_cmd] + proxy + [addr] + remote_cmd
display.vvvv("CONNECT %s" % (cmd,), host=self._play_context.remote_addr)
self._transport = subprocess.Popen(
cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
try:
self._transport.stdin.write(payload)
self._transport.stdin.flush()
ok = self._transport.stdout.readline(16)
if not ok.startswith(b"OK\n"):
cmdquoted = " ".join(pipes.quote(x.decode("utf-8")) for x in cmd)
raise errors.AnsibleError("the remote end of the Qubes connection was not ready: %s yielded %r" % (cmdquoted, ok))
except Exception:
self._abort_transport()
raise
display.vvvv("CONNECTED %s" % (cmd,), host=self._play_context.remote_addr)
self._connected = True
def _abort_transport(self):
display.vvvv("ABORT", host=self._play_context.remote_addr)
if self._transport:
display.vvvv("ABORTING", host=self._play_context.remote_addr)
try:
self._transport.kill()
except Exception:
pass
display.vvvv("ABORTED", host=self._play_context.remote_addr)
self.close()
def close(self):
'''Terminate the connection.'''
super(Connection, self).close()
display.vvvv("CLOSE %s" % (os.getppid(),), host=self._play_context.remote_addr)
if self._transport:
display.vvvv("CLOSING %s" % (os.getppid(),), host=self._play_context.remote_addr)
self._transport.stdin.close()
self._transport.stdout.close()
retcode = self._transport.wait()
self._transport = None
self._connected = False
display.vvvv("CLOSED %s" % (os.getppid(),), host=self._play_context.remote_addr)
def exec_command(self, cmd, in_data=None, sudoable=False):
'''Run a command on the VM.'''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
try: basestring
except NameError: basestring = str
if isinstance(cmd, basestring):
cmd = shlex.split(cmd)
display.vvvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
try:
payload = ('popen(%r, %r)\n\n' % (cmd, in_data)).encode("utf-8")
self._transport.stdin.write(payload)
self._transport.stdin.flush()
yesno = self._transport.stdout.readline(2)
debug("Reading yesno")
except Exception:
self._abort_transport()
raise
if yesno == "Y\n" or yesno == b"Y\n":
try:
retcode = self._transport.stdout.readline(16)
debug("Reading retcode")
try:
retcode = int(retcode)
except Exception:
raise errors.AnsibleError("return code from remote end is unexpected: %r" % retcode)
if retcode > 65536 or retcode < -65535:
raise errors.AnsibleError("return code from remote end is outside the range: %r" % retcode)
stdout_len = self._transport.stdout.readline(16)
try:
stdout_len = int(stdout_len)
except Exception:
raise errors.AnsibleError("stdout size from remote end is unexpected: %r" % stdout_len)
if stdout_len > 1024*1024*1024 or stdout_len < 0:
raise errors.AnsibleError("stdout size from remote end is invalid: %r" % stdout_len)
stdout = self._transport.stdout.read(stdout_len) if stdout_len != 0 else ''
if len(stdout) != stdout_len:
raise errors.AnsibleError("stdout size from remote end does not match actual stdout length: %s != %s" % (stdout_len, len(stdout)))
stderr_len = self._transport.stdout.readline(16)
try:
stderr_len = int(stderr_len)
except Exception:
raise errors.AnsibleError("stderr size from remote end is unexpected: %r" % stderr_len)
if stdout_len > 1024*1024*1024 or stdout_len < 0:
raise errors.AnsibleError("stderr size from remote end is invalid: %s" % stderr_len)
stderr = self._transport.stdout.read(stderr_len) if stderr_len != 0 else ''
if len(stderr) != stderr_len:
raise errors.AnsibleError("stderr size from remote end does not match actual stderr length: %s != %s" % (stderr_len, len(stderr)))
return (retcode, stdout, stderr)
except Exception:
self._abort_transport()
raise
elif yesno == "N\n" or yesno == b"N\n":
exc = decode_exception(self._transport.stdin)
raise exc
else:
self._abort_transport()
raise errors.AnsibleError("pass/fail from remote end is unexpected: %r" % yesno)
debug("finished popening on master")
def put_file(self, in_path, out_path):
'''Transfer a file from local to VM.'''
super(Connection, self).put_file(in_path, out_path)
display.vvvv("PUT %s to %s" % (in_path, out_path), host=self._play_context.remote_addr)
out_path = _prefix_login_path(out_path)
payload = 'put(%r)\n' % (out_path,)
self._transport.stdin.write(payload.encode("utf-8"))
self._transport.stdin.flush()
yesno = self._transport.stdout.readline(2)
if yesno == "Y\n" or yesno == b"Y\n":
pass
elif yesno == "N\n" or yesno == b"N\n":
exc = decode_exception(self._transport.stdin)
raise exc
else:
self._abort_transport()
raise errors.AnsibleError("pass/fail from remote end is unexpected: %r" % yesno)
with open(in_path, 'rb') as in_file:
while True:
chunk = in_file.read(BUFSIZE)
debug("source writing %s bytes" % len(chunk))
try:
self._transport.stdin.write(("%s\n" % len(chunk)).encode("utf-8"))
self._transport.stdin.flush()
if len(chunk) == 0:
break
self._transport.stdin.write(chunk)
self._transport.stdin.flush()
except Exception:
self._abort_transport()
raise
yesno = self._transport.stdout.readline(2)
if yesno == "Y\n" or yesno == b"Y\n":
pass
elif yesno == "N\n" or yesno == b"N\n":
exc = decode_exception(self._transport.stdin)
raise exc
else:
self._abort_transport()
raise errors.AnsibleError("pass/fail from remote end is unexpected: %r" % yesno)
debug("on this side it's all good")
self._transport.stdin.write(("%s\n" % 0).encode("utf-8"))
self._transport.stdin.flush()
debug("finished writing source")
def fetch_file(self, in_path, out_path):
'''Fetch a file from VM to local.'''
debug("fetching to local")
super(Connection, self).fetch_file(in_path, out_path)
display.vvvv("FETCH %s to %s" % (in_path, out_path), host=self._play_context.remote_addr)
in_path = _prefix_login_path(in_path)
with open(out_path, "wb") as out_file:
try:
payload = 'fetch(%r, %r)\n' % (in_path, BUFSIZE)
self._transport.stdin.write(payload.encode("utf-8"))
self._transport.stdin.flush()
while True:
chunk_len = self._transport.stdout.readline(16)
try:
chunk_len = int(chunk_len)
except Exception:
if chunk_len == "N\n":
exc = decode_exception(self._transport.stdin)
raise exc
else:
self._abort_transport()
raise errors.AnsibleError("chunk size from remote end is unexpected: %r" % chunk_len)
if chunk_len > BUFSIZE or chunk_len < 0:
raise errors.AnsibleError("chunk size from remote end is invalid: %r" % chunk_len)
if chunk_len == 0:
break
chunk = self._transport.stdout.read(chunk_len)
if len(chunk) != chunk_len:
raise errors.AnsibleError("stderr size from remote end does not match actual stderr length: %s != %s" % (chunk_len, len(chunk)))
out_file.write(chunk)
except Exception:
self._abort_transport()
raise

View File

@ -1,103 +0,0 @@
import sys, os ; sys.path.append(os.path.dirname(__file__))
import contextlib
try:
from StringIO import StringIO
BytesIO = StringIO
except ImportError:
from io import StringIO, BytesIO
import unittest
import tempfile
import qubes
if sys.version_info.major == 3:
cases = [
(['true'], '', b'Y\n0\n0\n0\n'),
(['false'], '', b'Y\n1\n0\n0\n'),
(['sh', '-c', 'echo yes'], '', b'Y\n0\n4\nyes\n0\n'),
(['sh', '-c', 'echo yes >&2'], '', b'Y\n0\n0\n4\nyes\n'),
]
cases_with_harness = [
(['true'], '', 0, '', ''),
(['false'], '', 1, '', ''),
(['sh', '-c', 'echo yes'], '', 0, b'yes\n', ''),
(['sh', '-c', 'echo yes >&2'], '', 0, '', b'yes\n'),
]
else:
cases = []
cases_with_harness = []
class MockPlayContext(object):
shell = 'sh'
executable = 'sh'
become = False
become_method = 'sudo'
remote_addr = '127.0.0.7'
@contextlib.contextmanager
def local_connection():
c = qubes.Connection(
MockPlayContext(), None,
transport_cmd=['sh', '-c', '"$@"']
)
c._options = {"management_proxy": None}
try:
yield c
finally:
c.close()
class TestBasicThings(unittest.TestCase):
def test_popen(self):
for cmd, in_, out in cases:
outf = BytesIO()
qubes.popen(cmd, in_, outf=outf)
self.assertEqual(
outf.getvalue(),
out
)
def test_exec_command_with_harness(self):
for cmd, in_, ret, out, err in cases_with_harness:
with local_connection() as c:
retcode, stdout, stderr = c.exec_command(cmd)
self.assertEqual(ret, retcode)
self.assertEqual(out, stdout)
self.assertEqual(err, stderr)
self.assertEqual(c._transport, None)
def test_fetch_file_with_harness(self):
if sys.version_info.major == 2:
in_text = "abcd"
else:
in_text = b"abcd"
with tempfile.NamedTemporaryFile() as x:
x.write(in_text)
x.flush()
with tempfile.NamedTemporaryFile() as y:
with local_connection() as c:
c.fetch_file(in_path=x.name, out_path=y.name)
y.seek(0)
out_text = y.read()
self.assertEqual(in_text, out_text)
def test_put_file_with_harness(self):
if sys.version_info.major == 2:
in_text = "abcd"
else:
in_text = b"abcd"
with tempfile.NamedTemporaryFile() as x:
x.write(in_text)
x.flush()
with tempfile.NamedTemporaryFile() as y:
with local_connection() as c:
c.put_file(in_path=x.name, out_path=y.name)
y.seek(0)
out_text = y.read()
self.assertEqual(in_text, out_text)

View File

@ -24,13 +24,13 @@ Integrate this software into your Ansible setup (within your `managevm`) VM) by:
## Set up the policy file for `qubes.VMShell`
Edit (as `root`) the file `/etc/qubes/policy.d/80-ansible-qubes.policy`
Edit (as `root`) the file `/etc/qubes-rpc/policy/qubes.VMShell`
located on the file system of your `dom0`.
At the top of the file, add the following two lines:
```
qubes.VMShell * managevm * allow
managevm $anyvm allow
```
This first line lets `managevm` execute any commands on any VM on your
@ -41,21 +41,25 @@ security prompt to allow `qubes.VMShell` on the target VM you're managing.
Now save that file, and exit your editor.
If your dom0 has a file `/etc/qubes-rpc/policy/qubes.VMShell`,
you can delete it now. It is obsolete.
### Optional: allow `managevm` to manage `dom0`
The next step is to add the RPC service proper to dom0. Edit the file
Before the line you added in the previous step, add this line:
```
managevm dom0 allow
```
This line lets `managevm` execute any commands in `dom0`. Be sure you
understand the security implications of such a thing.
The next step is to add the RPC service proper. Edit the file
`/etc/qubes-rpc/qubes.VMShell` to have a single line that contains:
```
exec bash
```
Make the file executable.
That is it. `dom0` should work now. Note you do this at your own risk.
That is it. `dom0` should work now.
## Test `qrun` works

View File

@ -13,11 +13,11 @@ to set up a policy that allows us to remotely execute commands on any VM of the
network server, without having to be physically present to click any dialogs authorizing
the execution of those commands.
In `dom0` of your Qubes server, edit `/etc/qubes/policy.d/80-ansible-qubes.policy` to add,
In `dom0` of your Qubes server, edit `/etc/qubes-rpc/policy/qubes.VMShell` to add,
at the top of the file, a policy that looks like this:
```
qubes.VMShell * managevm * allow
exp-manager $anyvm allow
```
This tells Qubes OS that `exp-manager` is now authorized to run any command in any of the VMs.
@ -25,13 +25,13 @@ This tells Qubes OS that `exp-manager` is now authorized to run any command in a
**Security note**: this does mean that anyone with access to `exp-manager` can do
literally anything on any of your VMs in your Qubes OS server.
If that is not what you want, then replace `*` after `managevm` with the name of the VMs you
would like to manage. For example: if you would like `exp-manager` to be authorized to run
commands *only* on `exp-net`, then you can use the following policy:
If that is not what you want, then replace `$anyvm` with the name of the VMs you would like
to manage. For example: if you would like `exp-manager` to be authorized to run commands
*only* on `exp-net`, then you can use the following policy:
```
qubes.VMShell * exp-manager exp-net allow
qubes.VMShell * exp-manager @anyvm deny
exp-manager exp-net allow
exp-manager $anyvm deny
```
Try it out now. SSH from your manager machine into `exp-manager` and run:
@ -47,7 +47,7 @@ You should see `yes` followed by `exp-net` on the output side.
If you expect that you will need to run commands in `dom0` from your manager machine
(say, to create, stop, start and modify VMs in the Qubes OS server),
then you will have to create a file `/etc/qubes-rpc/qubes.VMShell` as `root` in `dom0`,
with the contents `/bin/bash` and permission mode `0755`. Doing this will enable you
with the contents `/bin/bash` and permission mode `0644`. Doing this will enable you
to run commands on `dom0` which you can subsequently test in `exp-manager` by running command:
```
@ -57,7 +57,7 @@ qvm-run dom0 'echo yes ; hostname'
like you did before.
**Security note**: this does mean that anyone with access to `exp-manager` can do
*literally anything* on your Qubes OS server. You have been warned.
literally anything on your Qubes OS server.
## Integrate your Ansible setup

View File

View File

@ -39,12 +39,3 @@ fail if the password does not exist:
```
thepassword: '{{ lookup("qubes-pass", "loginpwds/John Smith", create=False) }}'
```
If the password you expect to fetch is multiline/binary, you can retrieve
it correctly like this:
```
thepassword: '{{ lookup("qubes-pass", "loginpwds/John Smith", multiline=True) | b64encode }}'
```
then later base64 decode it on target.

View File

@ -1,30 +0,0 @@
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
import json
import sys
import subprocess
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
UNDEFINED = object()
class LookupModule(LookupBase):
def run(self, args, variables):
i = json.dumps(args[0])
c = ["jq", args[1]]
p = subprocess.Popen(c, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
o, e = p.communicate(i)
r = p.wait()
if r != 0 or e:
assert 0, e
raise subprocess.CalledProcessError(r, c, o, e)
r = json.loads(o)
return r

View File

@ -1,7 +1,6 @@
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
import sys
import subprocess
try:
@ -16,7 +15,7 @@ UNDEFINED = object()
class LookupModule(LookupBase):
def run(self, args, variables=None, vm=None, create=True, multiline=False, no_symbols=False, default=UNDEFINED):
def run(self, args, variables=None, vm=None, create=True, no_symbols=False, default=UNDEFINED):
ret = []
@ -27,14 +26,14 @@ class LookupModule(LookupBase):
cmd += ['get-or-generate']
if no_symbols:
cmd += ["-n"]
else:
cmd += ['get']
cmd += ['--', args[0]]
display.vvvv(u"Password lookup using command %s" % cmd)
try:
ret = subprocess.check_output(cmd)
if not multiline:
ret = ret[:-1].decode("utf-8")
ret = subprocess.check_output(cmd)[:-1]
except subprocess.CalledProcessError as e:
if e.returncode == 8:
if create or default is UNDEFINED:

View File

@ -1,42 +0,0 @@
- hosts: localhost
gather_facts: no
tasks:
- name: delete
shell: |
qvm-pass rm test-qubes-pass || true
- name: test qubes-pass lookup
shell: |
password={{ lookup("qubes-pass", "test-qubes-pass") | quote }}
echo "$password"
register: firsttry
- name: test qubes-pass lookup second time
shell: |
password={{ lookup("qubes-pass", "test-qubes-pass") | quote }}
echo "$password"
register: secondtry
- name: evaluate if they match
shell: |
firsttry={{ firsttry.stdout | quote }}
secondtry={{ secondtry.stdout | quote }}
if [ "$firsttry" != "$secondtry" ] ; then echo no match ; exit 8 ; fi
- name: delete
shell: qvm-pass rm test-qubes-pass
- name: generate randomness
shell: |
pwd
dd if=/dev/urandom of=.randomdata bs=16384 count=1
- name: test qubes-pass insert
shell: |
qvm-pass insert -m test-qubes-pass < .randomdata
- name: evaluate if they match
shell: |
set -e
echo {{ lookup("qubes-pass", "test-qubes-pass", create=False, multiline=True) | b64encode | quote }} | base64 -d > .randomdatafetched
ls -la .randomdata .randomdatafetched
diff .randomdata .randomdatafetched
- name: delete
shell: |
qvm-pass rm test-qubes-pass
rm -f .randomdata .randomdatafetched