Compare commits

..

No commits in common. "master" and "v0.0.8" have entirely different histories.

10 changed files with 332 additions and 454 deletions

View File

@ -89,21 +89,21 @@ Enabling bombshell-client access to dom0
----------------------------------------
`dom0` needs its `qubes.VMShell` service activated. As `root` in `dom0`,
create a file `/etc/qubes-rpc/qubes.VMshell` with mode `0755` and make
create a file `/etc/qubes-rpc/qubes.VMshell` with mode `0644` and make
sure its contents say `/bin/bash`.
You will then create a file `/etc/qubes/policy.d/80-ansible-qubes.policy`
with mode 0664, owned by `root` and group `qubes`. Add a policy
You will then create a file `/etc/qubes-rpc/policy/qubes.VMShell` with
mode 0664, owned by your login user, and group `qubes`. Add a policy
line towards the top of the file:
```
qubes.VMShell * controller * allow
yourvm dom0 ask
```
Where `controller` represents the name of the VM you will be executing
`bombshell-client` against `dom0` from.
Where `yourvm` represents the name of the VM you will be executing
`bombshell-client` against dom0 from.
That's it -- `bombshell-client` should work against `dom0` now. Of course,
That's it -- `bombshell-client` should work against dom0 now. Of course,
you can adjust the policy to have it not ask — do the security math
on what that implies.

View File

@ -96,8 +96,6 @@ def inject_qubes(inject):
pass
elif vmtype == "ProxyVM":
add(flags, "proxy")
elif vmtype == "DispVM":
pass
elif vmtype == "TemplateVM":
try:
qubes["source"] = qubes["template"]

View File

@ -8,24 +8,15 @@ from ansible.plugins.action.template import ActionModule as template
sys.path.insert(0, os.path.dirname(__file__))
import commonlib
contents = """{{ vms | to_nice_yaml }}"""
topcontents = "{{ saltenv }}:\n '*':\n - {{ recipename }}\n"
def generate_datastructure(vms, task_vars):
dc = collections.OrderedDict
d = dc()
for n, data in vms.items():
# This block will skip any VMs that are not in the groups defined in the 'formation_vm_groups' variable
# This allows you to deploy in multiple stages which is useful in cases
# where you want to create a template after another template is already provisioned.
if 'formation_vm_groups' in task_vars:
continueLoop = True
for group in task_vars['formation_vm_groups']:
if n in task_vars['hostvars'][n]['groups'][group]:
continueLoop = False
if continueLoop:
continue
qubes = data['qubes']
d[task_vars['hostvars'][n]['inventory_hostname_short']] = dc(qvm=['vm'])
vm = d[task_vars['hostvars'][n]['inventory_hostname_short']]
@ -99,6 +90,7 @@ def generate_datastructure(vms, task_vars):
return d
class ActionModule(template):
TRANSFERS_FILES = True
@ -107,7 +99,7 @@ class ActionModule(template):
qubesdata = commonlib.inject_qubes(task_vars)
task_vars["vms"] = generate_datastructure(qubesdata, task_vars)
with tempfile.NamedTemporaryFile() as x:
x.write(contents.encode())
x.write(contents)
x.flush()
self._task.args['src'] = x.name
retval = template.run(self, tmp, task_vars)
@ -115,7 +107,7 @@ class ActionModule(template):
return retval
with tempfile.NamedTemporaryFile() as y:
y.write(topcontents.encode())
y.write(topcontents)
y.flush()
# Create new tmp path -- the other was blown away.

View File

@ -3,7 +3,7 @@
%define mybuildnumber %{?build_number}%{?!build_number:1}
Name: ansible-qubes
Version: 0.0.21
Version: 0.0.8
Release: %{mybuildnumber}%{?dist}
Summary: Inter-VM program execution for Qubes OS AppVMs and StandaloneVMs
BuildArch: noarch

View File

@ -2,19 +2,17 @@
import base64
import pickle
import contextlib
import ctypes
import ctypes.util
import errno
import fcntl
import os
import pipes
try:
from shlex import quote
import queue
except ImportError:
from pipes import quote # noqa
try:
from queue import Queue
except ImportError:
from Queue import Queue # noqa
import Queue as queue
import select
import signal
import struct
@ -26,66 +24,61 @@ import time
import traceback
MAX_MUX_READ = 128 * 1024 # 64*1024*1024
MAX_MUX_READ = 128*1024 # 64*1024*1024
PACKLEN = 8
PACKFORMAT = "!HbIx"
def set_proc_name(newname):
from ctypes import cdll, byref, create_string_buffer
if isinstance(newname, str):
newname = newname.encode("utf-8")
libc = cdll.LoadLibrary("libc.so.6")
buff = create_string_buffer(len(newname) + 1)
buff.value = newname
libc.prctl(15, byref(buff), 0, 0, 0)
@contextlib.contextmanager
def mutexfile(filepath):
oldumask = os.umask(0o077)
try:
f = open(filepath, "a")
finally:
os.umask(oldumask)
fcntl.lockf(f.fileno(), fcntl.LOCK_EX)
yield
f.close()
def unset_cloexec(fd):
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~fcntl.FD_CLOEXEC)
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~ fcntl.FD_CLOEXEC)
def openfdforappend(fd):
f = None
try:
f = os.fdopen(fd, "ab", 0)
except IOError as e:
if e.errno != errno.ESPIPE:
raise
f = os.fdopen(fd, "wb", 0)
unset_cloexec(f.fileno())
return f
f = None
try:
f = os.fdopen(fd, "ab", 0)
except IOError as e:
if e.errno != errno.ESPIPE:
raise
f = os.fdopen(fd, "wb", 0)
unset_cloexec(f.fileno())
return f
def openfdforread(fd):
f = os.fdopen(fd, "rb", 0)
unset_cloexec(f.fileno())
return f
f = os.fdopen(fd, "rb", 0)
unset_cloexec(f.fileno())
return f
debug_lock = threading.Lock()
debug_enabled = False
_startt = time.time()
class LoggingEmu:
class LoggingEmu():
def __init__(self, prefix):
self.prefix = prefix
syslog.openlog("bombshell-client.%s" % self.prefix)
def debug(self, *a, **kw):
if not debug_enabled:
return
self._print(syslog.LOG_DEBUG, *a, **kw)
def info(self, *a, **kw):
self._print(syslog.LOG_INFO, *a, **kw)
def error(self, *a, **kw):
self._print(syslog.LOG_ERR, *a, **kw)
def _print(self, prio, *a, **kw):
debug_lock.acquire()
global _startt
@ -95,126 +88,108 @@ class LoggingEmu:
string = a[0]
else:
string = a[0] % a[1:]
n = threading.current_thread().name
syslog.syslog(
prio,
("%.3f " % deltat) + n + ": " + string,
)
syslog.syslog(prio, ("%.3f " % deltat) + threading.currentThread().getName() + ": " + string)
finally:
debug_lock.release()
logging = None
def send_confirmation(chan, retval, errmsg):
chan.write(struct.pack("!H", retval))
ln = len(errmsg)
assert ln < 1 << 32
chan.write(struct.pack("!I", ln))
chan.write(errmsg)
chan.flush()
logging.debug(
"Sent confirmation on channel %s: %s %s",
chan,
retval,
errmsg,
)
chan.write(struct.pack("!H", retval))
l = len(errmsg)
assert l < 1<<32
chan.write(struct.pack("!I", l))
chan.write(errmsg)
chan.flush()
logging.debug("Sent confirmation on channel %s: %s %s", chan, retval, errmsg)
def recv_confirmation(chan):
logging.debug("Waiting for confirmation on channel %s", chan)
r = chan.read(2)
if len(r) == 0:
# This happens when the remote domain does not exist.
r, errmsg = 125, "domain does not exist"
logging.debug("No confirmation: %s %s", r, errmsg)
return r, errmsg
assert len(r) == 2, r
r = struct.unpack("!H", r)[0]
lc = chan.read(4)
assert len(lc) == 4, lc
lu = struct.unpack("!I", lc)[0]
errmsg = chan.read(lu)
logging.debug("Received confirmation: %s %s", r, errmsg)
return r, errmsg
logging.debug("Waiting for confirmation on channel %s", chan)
r = chan.read(2)
if len(r) == 0:
# This happens when the remote domain does not exist.
r, errmsg = 125, "domain does not exist"
logging.debug("No confirmation: %s %s", r, errmsg)
return r, errmsg
assert len(r) == 2, r
r = struct.unpack("!H", r)[0]
l = chan.read(4)
assert len(l) == 4, l
l = struct.unpack("!I", l)[0]
errmsg = chan.read(l)
logging.debug("Received confirmation: %s %s", r, errmsg)
return r, errmsg
class MyThread(threading.Thread):
def run(self):
try:
self._run()
except Exception:
n = threading.current_thread().name
logging.error("%s: unexpected exception", n)
tb = traceback.format_exc()
logging.error("%s: traceback: %s", n, tb)
logging.error("%s: exiting program", n)
os._exit(124)
def run(self):
try:
self._run()
except Exception as e:
logging.error("%s: unexpected exception", threading.currentThread())
tb = traceback.format_exc()
logging.error("%s: traceback: %s", threading.currentThread(), tb)
logging.error("%s: exiting program", threading.currentThread())
os._exit(124)
class SignalSender(MyThread):
def __init__(self, signals, sigqueue):
"""Handles signals by pushing them into a file-like object."""
threading.Thread.__init__(self)
self.daemon = True
self.queue = Queue()
self.sigqueue = sigqueue
for sig in signals:
signal.signal(sig, self.copy)
def copy(self, signum, frame):
self.queue.put(signum)
logging.debug("Signal %s pushed to queue", signum)
def __init__(self, signals, sigqueue):
"""Handles signals by pushing them into a file-like object."""
threading.Thread.__init__(self)
self.setDaemon(True)
self.queue = queue.Queue()
self.sigqueue = sigqueue
for sig in signals:
signal.signal(sig, self.copy)
def _run(self):
while True:
signum = self.queue.get()
logging.debug("Dequeued signal %s", signum)
if signum is None:
break
assert signum > 0
self.sigqueue.write(struct.pack("!H", signum))
self.sigqueue.flush()
logging.debug("Wrote signal %s to remote end", signum)
def copy(self, signum, frame):
self.queue.put(signum)
logging.debug("Signal %s pushed to queue", signum)
def _run(self):
while True:
signum = self.queue.get()
logging.debug("Dequeued signal %s", signum)
if signum is None:
break
assert signum > 0
self.sigqueue.write(struct.pack("!H", signum))
self.sigqueue.flush()
logging.debug("Wrote signal %s to remote end", signum)
class Signaler(MyThread):
def __init__(self, process, sigqueue):
"""Reads integers from a file-like object and relays that as kill()."""
threading.Thread.__init__(self)
self.daemon = True
self.process = process
self.sigqueue = sigqueue
def _run(self):
while True:
data = self.sigqueue.read(2)
if len(data) == 0:
logging.debug("Received no signal data")
break
assert len(data) == 2
signum = struct.unpack("!H", data)[0]
logging.debug(
"Received relayed signal %s, sending to process %s",
signum,
self.process.pid,
)
try:
self.process.send_signal(signum)
except BaseException as e:
logging.error(
"Failed to relay signal %s to process %s: %s",
signum,
self.process.pid,
e,
)
logging.debug("End of signaler")
def __init__(self, process, sigqueue):
"""Reads integers from a file-like object and relays that as kill()."""
threading.Thread.__init__(self)
self.setDaemon(True)
self.process = process
self.sigqueue = sigqueue
def _run(self):
while True:
data = self.sigqueue.read(2)
if len(data) == 0:
logging.debug("Received no signal data")
break
assert len(data) == 2
signum = struct.unpack("!H", data)[0]
logging.debug("Received relayed signal %s, sending to process %s", signum, self.process.pid)
try:
self.process.send_signal(signum)
except BaseException as e:
logging.error("Failed to relay signal %s to process %s: %s", signum, self.process.pid, e)
logging.debug("End of signaler")
def write(dst, buffer, ln):
def write(dst, buffer, l):
alreadywritten = 0
mv = memoryview(buffer)[:ln]
mv = memoryview(buffer)[:l]
while len(mv):
dst.write(mv)
writtenthisloop = len(mv)
@ -224,10 +199,10 @@ def write(dst, buffer, ln):
alreadywritten = alreadywritten + writtenthisloop
def copy(src, dst, buffer, ln):
def copy(src, dst, buffer, l):
alreadyread = 0
mv = memoryview(buffer)[:ln]
assert len(mv) == ln, "Buffer object is too small: %s %s" % (len(mv), ln)
mv = memoryview(buffer)[:l]
assert len(mv) == l, "Buffer object is too small: %s %s" % (len(mv), l)
while len(mv):
_, _, _ = select.select([src], (), ())
readthisloop = src.readinto(mv)
@ -235,253 +210,220 @@ def copy(src, dst, buffer, ln):
raise Exception("copy: Failed to read any bytes")
mv = mv[readthisloop:]
alreadyread = alreadyread + readthisloop
return write(dst, buffer, ln)
return write(dst, buffer, l)
class DataMultiplexer(MyThread):
def __init__(self, sources, sink):
threading.Thread.__init__(self)
self.daemon = True
self.sources = dict((s, num) for num, s in enumerate(sources))
self.sink = sink
def _run(self):
logging.debug(
"mux: Started with sources %s and sink %s", self.sources, self.sink
)
buffer = bytearray(MAX_MUX_READ)
while self.sources:
sources, _, x = select.select(
(s for s in self.sources), (), (s for s in self.sources)
)
assert not x, x
for s in sources:
n = self.sources[s]
logging.debug("mux: Source %s (%s) is active", n, s)
readthisloop = s.readinto(buffer)
if readthisloop == 0:
logging.debug(
"mux: Received no bytes from source %s, signaling"
" peer to close corresponding source",
n,
)
del self.sources[s]
header = struct.pack(PACKFORMAT, n, False, 0)
self.sink.write(header)
continue
ln = readthisloop
header = struct.pack(PACKFORMAT, n, True, ln)
self.sink.write(header)
write(self.sink, buffer, ln)
logging.debug("mux: End of data multiplexer")
def __init__(self, sources, sink):
threading.Thread.__init__(self)
self.setDaemon(True)
self.sources = dict((s,num) for num, s in enumerate(sources))
self.sink = sink
def _run(self):
logging.debug("mux: Started with sources %s and sink %s", self.sources, self.sink)
buffer = bytearray(MAX_MUX_READ)
while self.sources:
sources, _, x = select.select((s for s in self.sources), (), (s for s in self.sources))
assert not x, x
for s in sources:
n = self.sources[s]
logging.debug("mux: Source %s (%s) is active", n, s)
readthisloop = s.readinto(buffer)
if readthisloop == 0:
logging.debug("mux: Received no bytes from source %s, signaling peer to close corresponding source", n)
del self.sources[s]
header = struct.pack(PACKFORMAT, n, False, 0)
self.sink.write(header)
continue
l = readthisloop
header = struct.pack(PACKFORMAT, n, True, l)
self.sink.write(header)
write(self.sink, buffer, l)
logging.debug("mux: End of data multiplexer")
class DataDemultiplexer(MyThread):
def __init__(self, source, sinks):
threading.Thread.__init__(self)
self.daemon = True
self.sinks = dict(enumerate(sinks))
self.source = source
def _run(self):
logging.debug(
"demux: Started with source %s and sinks %s",
self.source,
self.sinks,
)
buffer = bytearray(MAX_MUX_READ)
while self.sinks:
r, _, x = select.select([self.source], (), [self.source])
assert not x, x
for s in r:
header = s.read(PACKLEN)
if header == b"":
logging.debug(
"demux: Received no bytes from source, closing sinks",
)
for sink in self.sinks.values():
sink.close()
self.sinks = []
break
n, active, ln = struct.unpack(PACKFORMAT, header)
if not active:
logging.debug(
"demux: Source %s inactive, closing matching sink %s",
s,
self.sinks[n],
)
self.sinks[n].close()
del self.sinks[n]
else:
copy(self.source, self.sinks[n], buffer, ln)
logging.debug("demux: End of data demultiplexer")
def __init__(self, source, sinks):
threading.Thread.__init__(self)
self.setDaemon(True)
self.sinks = dict(enumerate(sinks))
self.source = source
def _run(self):
logging.debug("demux: Started with source %s and sinks %s", self.source, self.sinks)
buffer = bytearray(MAX_MUX_READ)
while self.sinks:
r, _, x = select.select([self.source], (), [self.source])
assert not x, x
for s in r:
header = s.read(PACKLEN)
if header == "":
logging.debug("demux: Received no bytes from source, closing all sinks")
for sink in self.sinks.values():
sink.close()
self.sinks = []
break
n, active, l = struct.unpack(PACKFORMAT, header)
if not active:
logging.debug("demux: Source %s now inactive, closing corresponding sink %s", s, self.sinks[n])
self.sinks[n].close()
del self.sinks[n]
else:
copy(self.source, self.sinks[n], buffer, l)
logging.debug("demux: End of data demultiplexer")
def quotedargs():
return " ".join(quote(x) for x in sys.argv[1:])
def quotedargs_ellipsized(cmdlist):
text = " ".join(quote(x) for x in cmdlist)
if len(text) > 80:
text = text[:77] + "..."
return text
def main_master():
set_proc_name("bombshell-client (master) %s" % quotedargs())
global logging
logging = LoggingEmu("master")
global logging
logging = LoggingEmu("master")
logging.info("Started with arguments: %s", quotedargs_ellipsized(sys.argv[1:]))
logging.info("Started with arguments: %s", sys.argv[1:])
global debug_enabled
args = sys.argv[1:]
if args[0] == "-d":
args = args[1:]
debug_enabled = True
global debug_enabled
args = sys.argv[1:]
if args[0] == "-d":
args = args[1:]
debug_enabled = True
remote_vm = args[0]
remote_command = args[1:]
assert remote_command
remote_vm = args[0]
remote_command = args[1:]
assert remote_command
def anypython(exe):
return "` test -x %s && echo %s || echo python3`" % (
quote(exe),
quote(exe),
)
def anypython(exe):
return "` test -x %s && echo %s || echo python`" % (pipes.quote(exe),
pipes.quote(exe))
remote_helper_text = b"exec "
remote_helper_text += bytes(anypython(sys.executable), "utf-8")
remote_helper_text += bytes(" -u -c ", "utf-8")
remote_helper_text += bytes(
quote(open(__file__, "r").read()),
"ascii",
)
remote_helper_text += b" -d " if debug_enabled else b" "
remote_helper_text += base64.b64encode(pickle.dumps(remote_command, 2))
remote_helper_text += b"\n"
remote_helper_text = b"exec "
remote_helper_text += bytes(anypython(sys.executable), "utf-8")
remote_helper_text += bytes(" -u -c ", "utf-8")
remote_helper_text += bytes(pipes.quote(open(__file__, "r").read()), "ascii")
remote_helper_text += b" -d " if debug_enabled else b" "
remote_helper_text += base64.b64encode(pickle.dumps(remote_command, 2))
remote_helper_text += b"\n"
saved_stderr = openfdforappend(os.dup(sys.stderr.fileno()))
saved_stderr = openfdforappend(os.dup(sys.stderr.fileno()))
try:
with mutexfile(os.path.expanduser("~/.bombshell-lock")):
try:
p = subprocess.Popen(
["qrexec-client-vm", remote_vm, "qubes.VMShell"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True,
preexec_fn=os.setpgrp,
bufsize=0,
["qrexec-client-vm", remote_vm, "qubes.VMShell"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True,
preexec_fn=os.setpgrp,
bufsize=0,
)
except OSError as e:
except OSError as e:
logging.error("cannot launch qrexec-client-vm: %s", e)
return 127
logging.debug("Writing the helper text into the other side")
p.stdin.write(remote_helper_text)
p.stdin.flush()
logging.debug("Writing the helper text into the other side")
p.stdin.write(remote_helper_text)
p.stdin.flush()
confirmation, errmsg = recv_confirmation(p.stdout)
if confirmation != 0:
confirmation, errmsg = recv_confirmation(p.stdout)
if confirmation != 0:
logging.error("remote: %s", errmsg)
return confirmation
handled_signals = (
signal.SIGINT,
signal.SIGABRT,
signal.SIGALRM,
signal.SIGTERM,
signal.SIGUSR1,
signal.SIGUSR2,
signal.SIGTSTP,
signal.SIGCONT,
)
read_signals, write_signals = pairofpipes()
signaler = SignalSender(handled_signals, write_signals)
signaler.name = "master signaler"
signaler.start()
handled_signals = (
signal.SIGINT,
signal.SIGABRT,
signal.SIGALRM,
signal.SIGTERM,
signal.SIGUSR1,
signal.SIGUSR2,
signal.SIGTSTP,
signal.SIGCONT,
)
read_signals, write_signals = pairofpipes()
signaler = SignalSender(handled_signals, write_signals)
signaler.setName("master signaler")
signaler.start()
muxer = DataMultiplexer([sys.stdin, read_signals], p.stdin)
muxer.name = "master multiplexer"
muxer.start()
muxer = DataMultiplexer([sys.stdin, read_signals], p.stdin)
muxer.setName("master multiplexer")
muxer.start()
demuxer = DataDemultiplexer(p.stdout, [sys.stdout, saved_stderr])
demuxer.name = "master demultiplexer"
demuxer.start()
demuxer = DataDemultiplexer(p.stdout, [sys.stdout, saved_stderr])
demuxer.setName("master demultiplexer")
demuxer.start()
retval = p.wait()
logging.info("Return code %s for qubes.VMShell proxy", retval)
demuxer.join()
logging.info("Ending bombshell")
return retval
retval = p.wait()
logging.info("Return code %s for qubes.VMShell proxy", retval)
demuxer.join()
logging.info("Ending bombshell")
return retval
def pairofpipes():
read, write = os.pipe()
return os.fdopen(read, "rb", 0), os.fdopen(write, "wb", 0)
read, write = os.pipe()
return os.fdopen(read, "rb", 0), os.fdopen(write, "wb", 0)
def main_remote():
set_proc_name("bombshell-client (remote) %s" % quotedargs())
global logging
logging = LoggingEmu("remote")
global logging
logging = LoggingEmu("remote")
logging.info("Started with arguments: %s", quotedargs_ellipsized(sys.argv[1:]))
logging.info("Started with arguments: %s", sys.argv[1:])
global debug_enabled
if "-d" in sys.argv[1:]:
debug_enabled = True
cmd = sys.argv[2]
else:
cmd = sys.argv[1]
global debug_enabled
if "-d" in sys.argv[1:]:
debug_enabled = True
cmd = sys.argv[2]
else:
cmd = sys.argv[1]
cmd = pickle.loads(base64.b64decode(cmd))
logging.debug("Received command: %s", cmd)
cmd = pickle.loads(base64.b64decode(cmd))
logging.debug("Received command: %s", cmd)
nicecmd = " ".join(quote(a) for a in cmd)
try:
p = subprocess.Popen(
cmd,
# ["strace", "-s4096", "-ff"] + cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
bufsize=0,
)
send_confirmation(sys.stdout, 0, b"")
except OSError as e:
msg = "cannot execute %s: %s" % (nicecmd, e)
logging.error(msg)
send_confirmation(sys.stdout, 127, bytes(msg, "utf-8"))
sys.exit(0)
except BaseException as e:
msg = "cannot execute %s: %s" % (nicecmd, e)
logging.error(msg)
send_confirmation(sys.stdout, 126, bytes(msg, "utf-8"))
sys.exit(0)
nicecmd = " ".join(pipes.quote(a) for a in cmd)
try:
p = subprocess.Popen(
cmd,
# ["strace", "-s4096", "-ff"] + cmd,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
close_fds=True,
bufsize=0,
)
send_confirmation(sys.stdout, 0, b"")
except OSError as e:
msg = "cannot execute %s: %s" % (nicecmd, e)
logging.error(msg)
send_confirmation(sys.stdout, 127, bytes(msg, "utf-8"))
sys.exit(0)
except BaseException as e:
msg = "cannot execute %s: %s" % (nicecmd, e)
logging.error(msg)
send_confirmation(sys.stdout, 126, bytes(msg, "utf-8"))
sys.exit(0)
signals_read, signals_written = pairofpipes()
signals_read, signals_written = pairofpipes()
signaler = Signaler(p, signals_read)
signaler.name = "remote signaler"
signaler.start()
signaler = Signaler(p, signals_read)
signaler.setName("remote signaler")
signaler.start()
demuxer = DataDemultiplexer(sys.stdin, [p.stdin, signals_written])
demuxer.name = "remote demultiplexer"
demuxer.start()
demuxer = DataDemultiplexer(sys.stdin, [p.stdin, signals_written])
demuxer.setName("remote demultiplexer")
demuxer.start()
muxer = DataMultiplexer([p.stdout, p.stderr], sys.stdout)
muxer.name = "remote multiplexer"
muxer.start()
muxer = DataMultiplexer([p.stdout, p.stderr], sys.stdout)
muxer.setName("remote multiplexer")
muxer.start()
nicecmd_ellipsized = quotedargs_ellipsized(cmd)
logging.info("Started %s", nicecmd_ellipsized)
logging.info("Started %s", nicecmd)
retval = p.wait()
logging.info("Return code %s for %s", retval, nicecmd_ellipsized)
muxer.join()
logging.info("Ending bombshell")
return retval
retval = p.wait()
logging.info("Return code %s for %s", retval, nicecmd)
muxer.join()
logging.info("Ending bombshell")
return retval
sys.stdin = openfdforread(sys.stdin.fileno())

View File

@ -1 +1 @@
["RELEASE": "q4.2 38 39"]
["RELEASE": "25 32 34 35"]

View File

@ -63,20 +63,14 @@ class x(object):
display = x()
BUFSIZE = 64*1024 # any bigger and it causes issues because we don't read multiple chunks until completion
BUFSIZE = 128*1024 # any bigger and it causes issues because we don't read multiple chunks until completion
CONNECTION_TRANSPORT = "qubes"
CONNECTION_OPTIONS = {
'management_proxy': '--management-proxy',
}
def debug(text):
return
print(text, file=sys.stderr)
def encode_exception(exc, stream):
debug("encoding exception")
stream.write('{}\n'.format(len(exc.__class__.__name__)).encode('ascii'))
stream.write('{}'.format(exc.__class__.__name__).encode('ascii'))
for attr in "errno", "filename", "message", "strerror":
@ -85,7 +79,6 @@ def encode_exception(exc, stream):
def decode_exception(stream):
debug("decoding exception")
name_len = stream.readline(16)
name_len = int(name_len)
name = stream.read(name_len)
@ -114,7 +107,6 @@ def decode_exception(stream):
def popen(cmd, in_data, outf=sys.stdout):
debug("popening on remote %s" % type(in_data))
try:
p = subprocess.Popen(
cmd, shell=False, stdin=subprocess.PIPE,
@ -132,11 +124,9 @@ def popen(cmd, in_data, outf=sys.stdout):
outf.write('{}\n'.format(len(err)).encode('ascii'))
outf.write(err)
outf.flush()
debug("finished popening")
def put(out_path):
debug("dest writing %s" % out_path)
try:
f = open(out_path, "wb")
sys.stdout.write(b'Y\n')
@ -146,25 +136,18 @@ def put(out_path):
return
while True:
chunksize = int(sys.stdin.readline(16))
if not chunksize:
debug("looks like we have no more to read")
if chunksize == 0:
break
while chunksize:
debug(type(chunksize))
chunk = sys.stdin.read(chunksize)
assert chunk
debug("dest writing %s" % len(chunk))
try:
f.write(chunk)
except (IOError, OSError) as e:
sys.stdout.write(b'N\n')
encode_exception(e, sys.stdout)
f.close()
return
chunksize = chunksize - len(chunk)
debug("remaining %s" % chunksize)
sys.stdout.write(b'Y\n')
sys.stdout.flush()
chunk = sys.stdin.read(chunksize)
assert len(chunk) == chunksize, ("Mismatch in chunk length", len(chunk), chunksize)
try:
f.write(chunk)
sys.stdout.write(b'Y\n')
except (IOError, OSError) as e:
sys.stdout.write(b'N\n')
encode_exception(e, sys.stdout)
f.close()
return
try:
f.flush()
except (IOError, OSError) as e:
@ -172,12 +155,10 @@ def put(out_path):
encode_exception(e, sys.stdout)
return
finally:
debug("finished writing dest")
f.close()
def fetch(in_path, bufsize):
debug("Fetching from remote %s" % in_path)
try:
f = open(in_path, "rb")
except (IOError, OSError) as e:
@ -225,7 +206,7 @@ sys.stdout = sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout
'''
payload = b'\n\n'.join(
inspect.getsource(x).encode("utf-8")
for x in (debug, encode_exception, popen, put, fetch)
for x in (encode_exception, popen, put, fetch)
) + \
b'''
@ -274,7 +255,7 @@ class Connection(ConnectionBase):
def set_options(self, task_keys=None, var_options=None, direct=None):
super(Connection, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
# FIXME HORRIBLE WORKAROUND FIXME
if task_keys and task_keys['delegate_to'] and self._options and 'management_proxy' in self._options:
if task_keys['delegate_to'] and 'management_proxy' in self._options:
self._options['management_proxy'] = ''
def __init__(self, play_context, new_stdin, *args, **kwargs):
@ -285,6 +266,7 @@ class Connection(ConnectionBase):
self.transport_cmd = kwargs['transport_cmd']
return
self.transport_cmd = distutils.spawn.find_executable('qrun')
self.transport_cmd = None
if not self.transport_cmd:
self.transport_cmd = os.path.join(
os.path.dirname(__file__),
@ -313,7 +295,7 @@ class Connection(ConnectionBase):
if not self._connected:
remote_cmd = [to_bytes(x, errors='surrogate_or_strict') for x in [
# 'strace', '-s', '2048', '-o', '/tmp/log',
'python3', '-u', '-i', '-c', preamble
'python', '-u', '-i', '-c', preamble
]]
addr = self._play_context.remote_addr
proxy = to_bytes(self.get_option("management_proxy")) if self.get_option("management_proxy") else ""
@ -375,18 +357,16 @@ class Connection(ConnectionBase):
cmd = shlex.split(cmd)
display.vvvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
try:
payload = ('popen(%r, %r)\n\n' % (cmd, in_data)).encode("utf-8")
payload = ('popen(%r, %r)\n' % (cmd, in_data)).encode("utf-8")
self._transport.stdin.write(payload)
self._transport.stdin.flush()
yesno = self._transport.stdout.readline(2)
debug("Reading yesno")
except Exception:
self._abort_transport()
raise
if yesno == "Y\n" or yesno == b"Y\n":
try:
retcode = self._transport.stdout.readline(16)
debug("Reading retcode")
try:
retcode = int(retcode)
except Exception:
@ -423,7 +403,6 @@ class Connection(ConnectionBase):
else:
self._abort_transport()
raise errors.AnsibleError("pass/fail from remote end is unexpected: %r" % yesno)
debug("finished popening on master")
def put_file(self, in_path, out_path):
'''Transfer a file from local to VM.'''
@ -445,7 +424,6 @@ class Connection(ConnectionBase):
with open(in_path, 'rb') as in_file:
while True:
chunk = in_file.read(BUFSIZE)
debug("source writing %s bytes" % len(chunk))
try:
self._transport.stdin.write(("%s\n" % len(chunk)).encode("utf-8"))
self._transport.stdin.flush()
@ -465,15 +443,9 @@ class Connection(ConnectionBase):
else:
self._abort_transport()
raise errors.AnsibleError("pass/fail from remote end is unexpected: %r" % yesno)
debug("on this side it's all good")
self._transport.stdin.write(("%s\n" % 0).encode("utf-8"))
self._transport.stdin.flush()
debug("finished writing source")
def fetch_file(self, in_path, out_path):
'''Fetch a file from VM to local.'''
debug("fetching to local")
super(Connection, self).fetch_file(in_path, out_path)
display.vvvv("FETCH %s to %s" % (in_path, out_path), host=self._play_context.remote_addr)
in_path = _prefix_login_path(in_path)

View File

@ -24,13 +24,13 @@ Integrate this software into your Ansible setup (within your `managevm`) VM) by:
## Set up the policy file for `qubes.VMShell`
Edit (as `root`) the file `/etc/qubes/policy.d/80-ansible-qubes.policy`
Edit (as `root`) the file `/etc/qubes-rpc/policy/qubes.VMShell`
located on the file system of your `dom0`.
At the top of the file, add the following two lines:
```
qubes.VMShell * managevm * allow
managevm $anyvm allow
```
This first line lets `managevm` execute any commands on any VM on your
@ -41,21 +41,25 @@ security prompt to allow `qubes.VMShell` on the target VM you're managing.
Now save that file, and exit your editor.
If your dom0 has a file `/etc/qubes-rpc/policy/qubes.VMShell`,
you can delete it now. It is obsolete.
### Optional: allow `managevm` to manage `dom0`
The next step is to add the RPC service proper to dom0. Edit the file
Before the line you added in the previous step, add this line:
```
managevm dom0 allow
```
This line lets `managevm` execute any commands in `dom0`. Be sure you
understand the security implications of such a thing.
The next step is to add the RPC service proper. Edit the file
`/etc/qubes-rpc/qubes.VMShell` to have a single line that contains:
```
exec bash
```
Make the file executable.
That is it. `dom0` should work now. Note you do this at your own risk.
That is it. `dom0` should work now.
## Test `qrun` works

View File

@ -13,11 +13,11 @@ to set up a policy that allows us to remotely execute commands on any VM of the
network server, without having to be physically present to click any dialogs authorizing
the execution of those commands.
In `dom0` of your Qubes server, edit `/etc/qubes/policy.d/80-ansible-qubes.policy` to add,
In `dom0` of your Qubes server, edit `/etc/qubes-rpc/policy/qubes.VMShell` to add,
at the top of the file, a policy that looks like this:
```
qubes.VMShell * managevm * allow
exp-manager $anyvm allow
```
This tells Qubes OS that `exp-manager` is now authorized to run any command in any of the VMs.
@ -25,13 +25,13 @@ This tells Qubes OS that `exp-manager` is now authorized to run any command in a
**Security note**: this does mean that anyone with access to `exp-manager` can do
literally anything on any of your VMs in your Qubes OS server.
If that is not what you want, then replace `*` after `managevm` with the name of the VMs you
would like to manage. For example: if you would like `exp-manager` to be authorized to run
commands *only* on `exp-net`, then you can use the following policy:
If that is not what you want, then replace `$anyvm` with the name of the VMs you would like
to manage. For example: if you would like `exp-manager` to be authorized to run commands
*only* on `exp-net`, then you can use the following policy:
```
qubes.VMShell * exp-manager exp-net allow
qubes.VMShell * exp-manager @anyvm deny
exp-manager exp-net allow
exp-manager $anyvm deny
```
Try it out now. SSH from your manager machine into `exp-manager` and run:
@ -47,7 +47,7 @@ You should see `yes` followed by `exp-net` on the output side.
If you expect that you will need to run commands in `dom0` from your manager machine
(say, to create, stop, start and modify VMs in the Qubes OS server),
then you will have to create a file `/etc/qubes-rpc/qubes.VMShell` as `root` in `dom0`,
with the contents `/bin/bash` and permission mode `0755`. Doing this will enable you
with the contents `/bin/bash` and permission mode `0644`. Doing this will enable you
to run commands on `dom0` which you can subsequently test in `exp-manager` by running command:
```
@ -57,7 +57,7 @@ qvm-run dom0 'echo yes ; hostname'
like you did before.
**Security note**: this does mean that anyone with access to `exp-manager` can do
*literally anything* on your Qubes OS server. You have been warned.
literally anything on your Qubes OS server.
## Integrate your Ansible setup

View File

@ -1,30 +0,0 @@
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
import json
import sys
import subprocess
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
UNDEFINED = object()
class LookupModule(LookupBase):
def run(self, args, variables):
i = json.dumps(args[0])
c = ["jq", args[1]]
p = subprocess.Popen(c, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
o, e = p.communicate(i)
r = p.wait()
if r != 0 or e:
assert 0, e
raise subprocess.CalledProcessError(r, c, o, e)
r = json.loads(o)
return r