mirror of
https://github.com/Rudd-O/ansible-qubes.git
synced 2025-03-01 14:22:33 +01:00
Compare commits
No commits in common. "master" and "v0.0.8" have entirely different histories.
14
README.md
14
README.md
@ -89,21 +89,21 @@ Enabling bombshell-client access to dom0
|
|||||||
----------------------------------------
|
----------------------------------------
|
||||||
|
|
||||||
`dom0` needs its `qubes.VMShell` service activated. As `root` in `dom0`,
|
`dom0` needs its `qubes.VMShell` service activated. As `root` in `dom0`,
|
||||||
create a file `/etc/qubes-rpc/qubes.VMshell` with mode `0755` and make
|
create a file `/etc/qubes-rpc/qubes.VMshell` with mode `0644` and make
|
||||||
sure its contents say `/bin/bash`.
|
sure its contents say `/bin/bash`.
|
||||||
|
|
||||||
You will then create a file `/etc/qubes/policy.d/80-ansible-qubes.policy`
|
You will then create a file `/etc/qubes-rpc/policy/qubes.VMShell` with
|
||||||
with mode 0664, owned by `root` and group `qubes`. Add a policy
|
mode 0664, owned by your login user, and group `qubes`. Add a policy
|
||||||
line towards the top of the file:
|
line towards the top of the file:
|
||||||
|
|
||||||
```
|
```
|
||||||
qubes.VMShell * controller * allow
|
yourvm dom0 ask
|
||||||
```
|
```
|
||||||
|
|
||||||
Where `controller` represents the name of the VM you will be executing
|
Where `yourvm` represents the name of the VM you will be executing
|
||||||
`bombshell-client` against `dom0` from.
|
`bombshell-client` against dom0 from.
|
||||||
|
|
||||||
That's it -- `bombshell-client` should work against `dom0` now. Of course,
|
That's it -- `bombshell-client` should work against dom0 now. Of course,
|
||||||
you can adjust the policy to have it not ask — do the security math
|
you can adjust the policy to have it not ask — do the security math
|
||||||
on what that implies.
|
on what that implies.
|
||||||
|
|
||||||
|
@ -96,8 +96,6 @@ def inject_qubes(inject):
|
|||||||
pass
|
pass
|
||||||
elif vmtype == "ProxyVM":
|
elif vmtype == "ProxyVM":
|
||||||
add(flags, "proxy")
|
add(flags, "proxy")
|
||||||
elif vmtype == "DispVM":
|
|
||||||
pass
|
|
||||||
elif vmtype == "TemplateVM":
|
elif vmtype == "TemplateVM":
|
||||||
try:
|
try:
|
||||||
qubes["source"] = qubes["template"]
|
qubes["source"] = qubes["template"]
|
||||||
|
@ -8,24 +8,15 @@ from ansible.plugins.action.template import ActionModule as template
|
|||||||
sys.path.insert(0, os.path.dirname(__file__))
|
sys.path.insert(0, os.path.dirname(__file__))
|
||||||
import commonlib
|
import commonlib
|
||||||
|
|
||||||
|
|
||||||
contents = """{{ vms | to_nice_yaml }}"""
|
contents = """{{ vms | to_nice_yaml }}"""
|
||||||
topcontents = "{{ saltenv }}:\n '*':\n - {{ recipename }}\n"
|
topcontents = "{{ saltenv }}:\n '*':\n - {{ recipename }}\n"
|
||||||
|
|
||||||
|
|
||||||
def generate_datastructure(vms, task_vars):
|
def generate_datastructure(vms, task_vars):
|
||||||
dc = collections.OrderedDict
|
dc = collections.OrderedDict
|
||||||
d = dc()
|
d = dc()
|
||||||
for n, data in vms.items():
|
for n, data in vms.items():
|
||||||
# This block will skip any VMs that are not in the groups defined in the 'formation_vm_groups' variable
|
|
||||||
# This allows you to deploy in multiple stages which is useful in cases
|
|
||||||
# where you want to create a template after another template is already provisioned.
|
|
||||||
if 'formation_vm_groups' in task_vars:
|
|
||||||
continueLoop = True
|
|
||||||
for group in task_vars['formation_vm_groups']:
|
|
||||||
if n in task_vars['hostvars'][n]['groups'][group]:
|
|
||||||
continueLoop = False
|
|
||||||
if continueLoop:
|
|
||||||
continue
|
|
||||||
|
|
||||||
qubes = data['qubes']
|
qubes = data['qubes']
|
||||||
d[task_vars['hostvars'][n]['inventory_hostname_short']] = dc(qvm=['vm'])
|
d[task_vars['hostvars'][n]['inventory_hostname_short']] = dc(qvm=['vm'])
|
||||||
vm = d[task_vars['hostvars'][n]['inventory_hostname_short']]
|
vm = d[task_vars['hostvars'][n]['inventory_hostname_short']]
|
||||||
@ -99,6 +90,7 @@ def generate_datastructure(vms, task_vars):
|
|||||||
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
class ActionModule(template):
|
class ActionModule(template):
|
||||||
|
|
||||||
TRANSFERS_FILES = True
|
TRANSFERS_FILES = True
|
||||||
@ -107,7 +99,7 @@ class ActionModule(template):
|
|||||||
qubesdata = commonlib.inject_qubes(task_vars)
|
qubesdata = commonlib.inject_qubes(task_vars)
|
||||||
task_vars["vms"] = generate_datastructure(qubesdata, task_vars)
|
task_vars["vms"] = generate_datastructure(qubesdata, task_vars)
|
||||||
with tempfile.NamedTemporaryFile() as x:
|
with tempfile.NamedTemporaryFile() as x:
|
||||||
x.write(contents.encode())
|
x.write(contents)
|
||||||
x.flush()
|
x.flush()
|
||||||
self._task.args['src'] = x.name
|
self._task.args['src'] = x.name
|
||||||
retval = template.run(self, tmp, task_vars)
|
retval = template.run(self, tmp, task_vars)
|
||||||
@ -115,7 +107,7 @@ class ActionModule(template):
|
|||||||
return retval
|
return retval
|
||||||
|
|
||||||
with tempfile.NamedTemporaryFile() as y:
|
with tempfile.NamedTemporaryFile() as y:
|
||||||
y.write(topcontents.encode())
|
y.write(topcontents)
|
||||||
y.flush()
|
y.flush()
|
||||||
|
|
||||||
# Create new tmp path -- the other was blown away.
|
# Create new tmp path -- the other was blown away.
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
%define mybuildnumber %{?build_number}%{?!build_number:1}
|
%define mybuildnumber %{?build_number}%{?!build_number:1}
|
||||||
|
|
||||||
Name: ansible-qubes
|
Name: ansible-qubes
|
||||||
Version: 0.0.21
|
Version: 0.0.8
|
||||||
Release: %{mybuildnumber}%{?dist}
|
Release: %{mybuildnumber}%{?dist}
|
||||||
Summary: Inter-VM program execution for Qubes OS AppVMs and StandaloneVMs
|
Summary: Inter-VM program execution for Qubes OS AppVMs and StandaloneVMs
|
||||||
BuildArch: noarch
|
BuildArch: noarch
|
||||||
|
@ -2,19 +2,17 @@
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
import pickle
|
import pickle
|
||||||
|
import contextlib
|
||||||
|
import ctypes
|
||||||
|
import ctypes.util
|
||||||
import errno
|
import errno
|
||||||
import fcntl
|
import fcntl
|
||||||
import os
|
import os
|
||||||
|
import pipes
|
||||||
try:
|
try:
|
||||||
from shlex import quote
|
import queue
|
||||||
except ImportError:
|
except ImportError:
|
||||||
from pipes import quote # noqa
|
import Queue as queue
|
||||||
|
|
||||||
try:
|
|
||||||
from queue import Queue
|
|
||||||
except ImportError:
|
|
||||||
from Queue import Queue # noqa
|
|
||||||
import select
|
import select
|
||||||
import signal
|
import signal
|
||||||
import struct
|
import struct
|
||||||
@ -31,15 +29,16 @@ PACKLEN = 8
|
|||||||
PACKFORMAT = "!HbIx"
|
PACKFORMAT = "!HbIx"
|
||||||
|
|
||||||
|
|
||||||
def set_proc_name(newname):
|
@contextlib.contextmanager
|
||||||
from ctypes import cdll, byref, create_string_buffer
|
def mutexfile(filepath):
|
||||||
|
oldumask = os.umask(0o077)
|
||||||
if isinstance(newname, str):
|
try:
|
||||||
newname = newname.encode("utf-8")
|
f = open(filepath, "a")
|
||||||
libc = cdll.LoadLibrary("libc.so.6")
|
finally:
|
||||||
buff = create_string_buffer(len(newname) + 1)
|
os.umask(oldumask)
|
||||||
buff.value = newname
|
fcntl.lockf(f.fileno(), fcntl.LOCK_EX)
|
||||||
libc.prctl(15, byref(buff), 0, 0, 0)
|
yield
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
|
||||||
def unset_cloexec(fd):
|
def unset_cloexec(fd):
|
||||||
@ -68,24 +67,18 @@ def openfdforread(fd):
|
|||||||
debug_lock = threading.Lock()
|
debug_lock = threading.Lock()
|
||||||
debug_enabled = False
|
debug_enabled = False
|
||||||
_startt = time.time()
|
_startt = time.time()
|
||||||
|
class LoggingEmu():
|
||||||
|
|
||||||
class LoggingEmu:
|
|
||||||
def __init__(self, prefix):
|
def __init__(self, prefix):
|
||||||
self.prefix = prefix
|
self.prefix = prefix
|
||||||
syslog.openlog("bombshell-client.%s" % self.prefix)
|
syslog.openlog("bombshell-client.%s" % self.prefix)
|
||||||
|
|
||||||
def debug(self, *a, **kw):
|
def debug(self, *a, **kw):
|
||||||
if not debug_enabled:
|
if not debug_enabled:
|
||||||
return
|
return
|
||||||
self._print(syslog.LOG_DEBUG, *a, **kw)
|
self._print(syslog.LOG_DEBUG, *a, **kw)
|
||||||
|
|
||||||
def info(self, *a, **kw):
|
def info(self, *a, **kw):
|
||||||
self._print(syslog.LOG_INFO, *a, **kw)
|
self._print(syslog.LOG_INFO, *a, **kw)
|
||||||
|
|
||||||
def error(self, *a, **kw):
|
def error(self, *a, **kw):
|
||||||
self._print(syslog.LOG_ERR, *a, **kw)
|
self._print(syslog.LOG_ERR, *a, **kw)
|
||||||
|
|
||||||
def _print(self, prio, *a, **kw):
|
def _print(self, prio, *a, **kw):
|
||||||
debug_lock.acquire()
|
debug_lock.acquire()
|
||||||
global _startt
|
global _startt
|
||||||
@ -95,31 +88,20 @@ class LoggingEmu:
|
|||||||
string = a[0]
|
string = a[0]
|
||||||
else:
|
else:
|
||||||
string = a[0] % a[1:]
|
string = a[0] % a[1:]
|
||||||
n = threading.current_thread().name
|
syslog.syslog(prio, ("%.3f " % deltat) + threading.currentThread().getName() + ": " + string)
|
||||||
syslog.syslog(
|
|
||||||
prio,
|
|
||||||
("%.3f " % deltat) + n + ": " + string,
|
|
||||||
)
|
|
||||||
finally:
|
finally:
|
||||||
debug_lock.release()
|
debug_lock.release()
|
||||||
|
|
||||||
|
|
||||||
logging = None
|
logging = None
|
||||||
|
|
||||||
|
|
||||||
def send_confirmation(chan, retval, errmsg):
|
def send_confirmation(chan, retval, errmsg):
|
||||||
chan.write(struct.pack("!H", retval))
|
chan.write(struct.pack("!H", retval))
|
||||||
ln = len(errmsg)
|
l = len(errmsg)
|
||||||
assert ln < 1 << 32
|
assert l < 1<<32
|
||||||
chan.write(struct.pack("!I", ln))
|
chan.write(struct.pack("!I", l))
|
||||||
chan.write(errmsg)
|
chan.write(errmsg)
|
||||||
chan.flush()
|
chan.flush()
|
||||||
logging.debug(
|
logging.debug("Sent confirmation on channel %s: %s %s", chan, retval, errmsg)
|
||||||
"Sent confirmation on channel %s: %s %s",
|
|
||||||
chan,
|
|
||||||
retval,
|
|
||||||
errmsg,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def recv_confirmation(chan):
|
def recv_confirmation(chan):
|
||||||
@ -132,33 +114,34 @@ def recv_confirmation(chan):
|
|||||||
return r, errmsg
|
return r, errmsg
|
||||||
assert len(r) == 2, r
|
assert len(r) == 2, r
|
||||||
r = struct.unpack("!H", r)[0]
|
r = struct.unpack("!H", r)[0]
|
||||||
lc = chan.read(4)
|
l = chan.read(4)
|
||||||
assert len(lc) == 4, lc
|
assert len(l) == 4, l
|
||||||
lu = struct.unpack("!I", lc)[0]
|
l = struct.unpack("!I", l)[0]
|
||||||
errmsg = chan.read(lu)
|
errmsg = chan.read(l)
|
||||||
logging.debug("Received confirmation: %s %s", r, errmsg)
|
logging.debug("Received confirmation: %s %s", r, errmsg)
|
||||||
return r, errmsg
|
return r, errmsg
|
||||||
|
|
||||||
|
|
||||||
class MyThread(threading.Thread):
|
class MyThread(threading.Thread):
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
try:
|
try:
|
||||||
self._run()
|
self._run()
|
||||||
except Exception:
|
except Exception as e:
|
||||||
n = threading.current_thread().name
|
logging.error("%s: unexpected exception", threading.currentThread())
|
||||||
logging.error("%s: unexpected exception", n)
|
|
||||||
tb = traceback.format_exc()
|
tb = traceback.format_exc()
|
||||||
logging.error("%s: traceback: %s", n, tb)
|
logging.error("%s: traceback: %s", threading.currentThread(), tb)
|
||||||
logging.error("%s: exiting program", n)
|
logging.error("%s: exiting program", threading.currentThread())
|
||||||
os._exit(124)
|
os._exit(124)
|
||||||
|
|
||||||
|
|
||||||
class SignalSender(MyThread):
|
class SignalSender(MyThread):
|
||||||
|
|
||||||
def __init__(self, signals, sigqueue):
|
def __init__(self, signals, sigqueue):
|
||||||
"""Handles signals by pushing them into a file-like object."""
|
"""Handles signals by pushing them into a file-like object."""
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
self.daemon = True
|
self.setDaemon(True)
|
||||||
self.queue = Queue()
|
self.queue = queue.Queue()
|
||||||
self.sigqueue = sigqueue
|
self.sigqueue = sigqueue
|
||||||
for sig in signals:
|
for sig in signals:
|
||||||
signal.signal(sig, self.copy)
|
signal.signal(sig, self.copy)
|
||||||
@ -180,10 +163,11 @@ class SignalSender(MyThread):
|
|||||||
|
|
||||||
|
|
||||||
class Signaler(MyThread):
|
class Signaler(MyThread):
|
||||||
|
|
||||||
def __init__(self, process, sigqueue):
|
def __init__(self, process, sigqueue):
|
||||||
"""Reads integers from a file-like object and relays that as kill()."""
|
"""Reads integers from a file-like object and relays that as kill()."""
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
self.daemon = True
|
self.setDaemon(True)
|
||||||
self.process = process
|
self.process = process
|
||||||
self.sigqueue = sigqueue
|
self.sigqueue = sigqueue
|
||||||
|
|
||||||
@ -195,26 +179,17 @@ class Signaler(MyThread):
|
|||||||
break
|
break
|
||||||
assert len(data) == 2
|
assert len(data) == 2
|
||||||
signum = struct.unpack("!H", data)[0]
|
signum = struct.unpack("!H", data)[0]
|
||||||
logging.debug(
|
logging.debug("Received relayed signal %s, sending to process %s", signum, self.process.pid)
|
||||||
"Received relayed signal %s, sending to process %s",
|
|
||||||
signum,
|
|
||||||
self.process.pid,
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
self.process.send_signal(signum)
|
self.process.send_signal(signum)
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
logging.error(
|
logging.error("Failed to relay signal %s to process %s: %s", signum, self.process.pid, e)
|
||||||
"Failed to relay signal %s to process %s: %s",
|
|
||||||
signum,
|
|
||||||
self.process.pid,
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
logging.debug("End of signaler")
|
logging.debug("End of signaler")
|
||||||
|
|
||||||
|
|
||||||
def write(dst, buffer, ln):
|
def write(dst, buffer, l):
|
||||||
alreadywritten = 0
|
alreadywritten = 0
|
||||||
mv = memoryview(buffer)[:ln]
|
mv = memoryview(buffer)[:l]
|
||||||
while len(mv):
|
while len(mv):
|
||||||
dst.write(mv)
|
dst.write(mv)
|
||||||
writtenthisloop = len(mv)
|
writtenthisloop = len(mv)
|
||||||
@ -224,10 +199,10 @@ def write(dst, buffer, ln):
|
|||||||
alreadywritten = alreadywritten + writtenthisloop
|
alreadywritten = alreadywritten + writtenthisloop
|
||||||
|
|
||||||
|
|
||||||
def copy(src, dst, buffer, ln):
|
def copy(src, dst, buffer, l):
|
||||||
alreadyread = 0
|
alreadyread = 0
|
||||||
mv = memoryview(buffer)[:ln]
|
mv = memoryview(buffer)[:l]
|
||||||
assert len(mv) == ln, "Buffer object is too small: %s %s" % (len(mv), ln)
|
assert len(mv) == l, "Buffer object is too small: %s %s" % (len(mv), l)
|
||||||
while len(mv):
|
while len(mv):
|
||||||
_, _, _ = select.select([src], (), ())
|
_, _, _ = select.select([src], (), ())
|
||||||
readthisloop = src.readinto(mv)
|
readthisloop = src.readinto(mv)
|
||||||
@ -235,104 +210,77 @@ def copy(src, dst, buffer, ln):
|
|||||||
raise Exception("copy: Failed to read any bytes")
|
raise Exception("copy: Failed to read any bytes")
|
||||||
mv = mv[readthisloop:]
|
mv = mv[readthisloop:]
|
||||||
alreadyread = alreadyread + readthisloop
|
alreadyread = alreadyread + readthisloop
|
||||||
return write(dst, buffer, ln)
|
return write(dst, buffer, l)
|
||||||
|
|
||||||
|
|
||||||
class DataMultiplexer(MyThread):
|
class DataMultiplexer(MyThread):
|
||||||
|
|
||||||
def __init__(self, sources, sink):
|
def __init__(self, sources, sink):
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
self.daemon = True
|
self.setDaemon(True)
|
||||||
self.sources = dict((s,num) for num, s in enumerate(sources))
|
self.sources = dict((s,num) for num, s in enumerate(sources))
|
||||||
self.sink = sink
|
self.sink = sink
|
||||||
|
|
||||||
def _run(self):
|
def _run(self):
|
||||||
logging.debug(
|
logging.debug("mux: Started with sources %s and sink %s", self.sources, self.sink)
|
||||||
"mux: Started with sources %s and sink %s", self.sources, self.sink
|
|
||||||
)
|
|
||||||
buffer = bytearray(MAX_MUX_READ)
|
buffer = bytearray(MAX_MUX_READ)
|
||||||
while self.sources:
|
while self.sources:
|
||||||
sources, _, x = select.select(
|
sources, _, x = select.select((s for s in self.sources), (), (s for s in self.sources))
|
||||||
(s for s in self.sources), (), (s for s in self.sources)
|
|
||||||
)
|
|
||||||
assert not x, x
|
assert not x, x
|
||||||
for s in sources:
|
for s in sources:
|
||||||
n = self.sources[s]
|
n = self.sources[s]
|
||||||
logging.debug("mux: Source %s (%s) is active", n, s)
|
logging.debug("mux: Source %s (%s) is active", n, s)
|
||||||
readthisloop = s.readinto(buffer)
|
readthisloop = s.readinto(buffer)
|
||||||
if readthisloop == 0:
|
if readthisloop == 0:
|
||||||
logging.debug(
|
logging.debug("mux: Received no bytes from source %s, signaling peer to close corresponding source", n)
|
||||||
"mux: Received no bytes from source %s, signaling"
|
|
||||||
" peer to close corresponding source",
|
|
||||||
n,
|
|
||||||
)
|
|
||||||
del self.sources[s]
|
del self.sources[s]
|
||||||
header = struct.pack(PACKFORMAT, n, False, 0)
|
header = struct.pack(PACKFORMAT, n, False, 0)
|
||||||
self.sink.write(header)
|
self.sink.write(header)
|
||||||
continue
|
continue
|
||||||
ln = readthisloop
|
l = readthisloop
|
||||||
header = struct.pack(PACKFORMAT, n, True, ln)
|
header = struct.pack(PACKFORMAT, n, True, l)
|
||||||
self.sink.write(header)
|
self.sink.write(header)
|
||||||
write(self.sink, buffer, ln)
|
write(self.sink, buffer, l)
|
||||||
logging.debug("mux: End of data multiplexer")
|
logging.debug("mux: End of data multiplexer")
|
||||||
|
|
||||||
|
|
||||||
class DataDemultiplexer(MyThread):
|
class DataDemultiplexer(MyThread):
|
||||||
|
|
||||||
def __init__(self, source, sinks):
|
def __init__(self, source, sinks):
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
self.daemon = True
|
self.setDaemon(True)
|
||||||
self.sinks = dict(enumerate(sinks))
|
self.sinks = dict(enumerate(sinks))
|
||||||
self.source = source
|
self.source = source
|
||||||
|
|
||||||
def _run(self):
|
def _run(self):
|
||||||
logging.debug(
|
logging.debug("demux: Started with source %s and sinks %s", self.source, self.sinks)
|
||||||
"demux: Started with source %s and sinks %s",
|
|
||||||
self.source,
|
|
||||||
self.sinks,
|
|
||||||
)
|
|
||||||
buffer = bytearray(MAX_MUX_READ)
|
buffer = bytearray(MAX_MUX_READ)
|
||||||
while self.sinks:
|
while self.sinks:
|
||||||
r, _, x = select.select([self.source], (), [self.source])
|
r, _, x = select.select([self.source], (), [self.source])
|
||||||
assert not x, x
|
assert not x, x
|
||||||
for s in r:
|
for s in r:
|
||||||
header = s.read(PACKLEN)
|
header = s.read(PACKLEN)
|
||||||
if header == b"":
|
if header == "":
|
||||||
logging.debug(
|
logging.debug("demux: Received no bytes from source, closing all sinks")
|
||||||
"demux: Received no bytes from source, closing sinks",
|
|
||||||
)
|
|
||||||
for sink in self.sinks.values():
|
for sink in self.sinks.values():
|
||||||
sink.close()
|
sink.close()
|
||||||
self.sinks = []
|
self.sinks = []
|
||||||
break
|
break
|
||||||
n, active, ln = struct.unpack(PACKFORMAT, header)
|
n, active, l = struct.unpack(PACKFORMAT, header)
|
||||||
if not active:
|
if not active:
|
||||||
logging.debug(
|
logging.debug("demux: Source %s now inactive, closing corresponding sink %s", s, self.sinks[n])
|
||||||
"demux: Source %s inactive, closing matching sink %s",
|
|
||||||
s,
|
|
||||||
self.sinks[n],
|
|
||||||
)
|
|
||||||
self.sinks[n].close()
|
self.sinks[n].close()
|
||||||
del self.sinks[n]
|
del self.sinks[n]
|
||||||
else:
|
else:
|
||||||
copy(self.source, self.sinks[n], buffer, ln)
|
copy(self.source, self.sinks[n], buffer, l)
|
||||||
logging.debug("demux: End of data demultiplexer")
|
logging.debug("demux: End of data demultiplexer")
|
||||||
|
|
||||||
|
|
||||||
def quotedargs():
|
|
||||||
return " ".join(quote(x) for x in sys.argv[1:])
|
|
||||||
|
|
||||||
|
|
||||||
def quotedargs_ellipsized(cmdlist):
|
|
||||||
text = " ".join(quote(x) for x in cmdlist)
|
|
||||||
if len(text) > 80:
|
|
||||||
text = text[:77] + "..."
|
|
||||||
return text
|
|
||||||
|
|
||||||
def main_master():
|
def main_master():
|
||||||
set_proc_name("bombshell-client (master) %s" % quotedargs())
|
|
||||||
global logging
|
global logging
|
||||||
logging = LoggingEmu("master")
|
logging = LoggingEmu("master")
|
||||||
|
|
||||||
logging.info("Started with arguments: %s", quotedargs_ellipsized(sys.argv[1:]))
|
logging.info("Started with arguments: %s", sys.argv[1:])
|
||||||
|
|
||||||
global debug_enabled
|
global debug_enabled
|
||||||
args = sys.argv[1:]
|
args = sys.argv[1:]
|
||||||
@ -345,24 +293,20 @@ def main_master():
|
|||||||
assert remote_command
|
assert remote_command
|
||||||
|
|
||||||
def anypython(exe):
|
def anypython(exe):
|
||||||
return "` test -x %s && echo %s || echo python3`" % (
|
return "` test -x %s && echo %s || echo python`" % (pipes.quote(exe),
|
||||||
quote(exe),
|
pipes.quote(exe))
|
||||||
quote(exe),
|
|
||||||
)
|
|
||||||
|
|
||||||
remote_helper_text = b"exec "
|
remote_helper_text = b"exec "
|
||||||
remote_helper_text += bytes(anypython(sys.executable), "utf-8")
|
remote_helper_text += bytes(anypython(sys.executable), "utf-8")
|
||||||
remote_helper_text += bytes(" -u -c ", "utf-8")
|
remote_helper_text += bytes(" -u -c ", "utf-8")
|
||||||
remote_helper_text += bytes(
|
remote_helper_text += bytes(pipes.quote(open(__file__, "r").read()), "ascii")
|
||||||
quote(open(__file__, "r").read()),
|
|
||||||
"ascii",
|
|
||||||
)
|
|
||||||
remote_helper_text += b" -d " if debug_enabled else b" "
|
remote_helper_text += b" -d " if debug_enabled else b" "
|
||||||
remote_helper_text += base64.b64encode(pickle.dumps(remote_command, 2))
|
remote_helper_text += base64.b64encode(pickle.dumps(remote_command, 2))
|
||||||
remote_helper_text += b"\n"
|
remote_helper_text += b"\n"
|
||||||
|
|
||||||
saved_stderr = openfdforappend(os.dup(sys.stderr.fileno()))
|
saved_stderr = openfdforappend(os.dup(sys.stderr.fileno()))
|
||||||
|
|
||||||
|
with mutexfile(os.path.expanduser("~/.bombshell-lock")):
|
||||||
try:
|
try:
|
||||||
p = subprocess.Popen(
|
p = subprocess.Popen(
|
||||||
["qrexec-client-vm", remote_vm, "qubes.VMShell"],
|
["qrexec-client-vm", remote_vm, "qubes.VMShell"],
|
||||||
@ -397,15 +341,15 @@ def main_master():
|
|||||||
)
|
)
|
||||||
read_signals, write_signals = pairofpipes()
|
read_signals, write_signals = pairofpipes()
|
||||||
signaler = SignalSender(handled_signals, write_signals)
|
signaler = SignalSender(handled_signals, write_signals)
|
||||||
signaler.name = "master signaler"
|
signaler.setName("master signaler")
|
||||||
signaler.start()
|
signaler.start()
|
||||||
|
|
||||||
muxer = DataMultiplexer([sys.stdin, read_signals], p.stdin)
|
muxer = DataMultiplexer([sys.stdin, read_signals], p.stdin)
|
||||||
muxer.name = "master multiplexer"
|
muxer.setName("master multiplexer")
|
||||||
muxer.start()
|
muxer.start()
|
||||||
|
|
||||||
demuxer = DataDemultiplexer(p.stdout, [sys.stdout, saved_stderr])
|
demuxer = DataDemultiplexer(p.stdout, [sys.stdout, saved_stderr])
|
||||||
demuxer.name = "master demultiplexer"
|
demuxer.setName("master demultiplexer")
|
||||||
demuxer.start()
|
demuxer.start()
|
||||||
|
|
||||||
retval = p.wait()
|
retval = p.wait()
|
||||||
@ -421,11 +365,10 @@ def pairofpipes():
|
|||||||
|
|
||||||
|
|
||||||
def main_remote():
|
def main_remote():
|
||||||
set_proc_name("bombshell-client (remote) %s" % quotedargs())
|
|
||||||
global logging
|
global logging
|
||||||
logging = LoggingEmu("remote")
|
logging = LoggingEmu("remote")
|
||||||
|
|
||||||
logging.info("Started with arguments: %s", quotedargs_ellipsized(sys.argv[1:]))
|
logging.info("Started with arguments: %s", sys.argv[1:])
|
||||||
|
|
||||||
global debug_enabled
|
global debug_enabled
|
||||||
if "-d" in sys.argv[1:]:
|
if "-d" in sys.argv[1:]:
|
||||||
@ -437,7 +380,7 @@ def main_remote():
|
|||||||
cmd = pickle.loads(base64.b64decode(cmd))
|
cmd = pickle.loads(base64.b64decode(cmd))
|
||||||
logging.debug("Received command: %s", cmd)
|
logging.debug("Received command: %s", cmd)
|
||||||
|
|
||||||
nicecmd = " ".join(quote(a) for a in cmd)
|
nicecmd = " ".join(pipes.quote(a) for a in cmd)
|
||||||
try:
|
try:
|
||||||
p = subprocess.Popen(
|
p = subprocess.Popen(
|
||||||
cmd,
|
cmd,
|
||||||
@ -463,22 +406,21 @@ def main_remote():
|
|||||||
signals_read, signals_written = pairofpipes()
|
signals_read, signals_written = pairofpipes()
|
||||||
|
|
||||||
signaler = Signaler(p, signals_read)
|
signaler = Signaler(p, signals_read)
|
||||||
signaler.name = "remote signaler"
|
signaler.setName("remote signaler")
|
||||||
signaler.start()
|
signaler.start()
|
||||||
|
|
||||||
demuxer = DataDemultiplexer(sys.stdin, [p.stdin, signals_written])
|
demuxer = DataDemultiplexer(sys.stdin, [p.stdin, signals_written])
|
||||||
demuxer.name = "remote demultiplexer"
|
demuxer.setName("remote demultiplexer")
|
||||||
demuxer.start()
|
demuxer.start()
|
||||||
|
|
||||||
muxer = DataMultiplexer([p.stdout, p.stderr], sys.stdout)
|
muxer = DataMultiplexer([p.stdout, p.stderr], sys.stdout)
|
||||||
muxer.name = "remote multiplexer"
|
muxer.setName("remote multiplexer")
|
||||||
muxer.start()
|
muxer.start()
|
||||||
|
|
||||||
nicecmd_ellipsized = quotedargs_ellipsized(cmd)
|
logging.info("Started %s", nicecmd)
|
||||||
logging.info("Started %s", nicecmd_ellipsized)
|
|
||||||
|
|
||||||
retval = p.wait()
|
retval = p.wait()
|
||||||
logging.info("Return code %s for %s", retval, nicecmd_ellipsized)
|
logging.info("Return code %s for %s", retval, nicecmd)
|
||||||
muxer.join()
|
muxer.join()
|
||||||
logging.info("Ending bombshell")
|
logging.info("Ending bombshell")
|
||||||
return retval
|
return retval
|
||||||
|
@ -1 +1 @@
|
|||||||
["RELEASE": "q4.2 38 39"]
|
["RELEASE": "25 32 34 35"]
|
||||||
|
@ -63,20 +63,14 @@ class x(object):
|
|||||||
display = x()
|
display = x()
|
||||||
|
|
||||||
|
|
||||||
BUFSIZE = 64*1024 # any bigger and it causes issues because we don't read multiple chunks until completion
|
BUFSIZE = 128*1024 # any bigger and it causes issues because we don't read multiple chunks until completion
|
||||||
CONNECTION_TRANSPORT = "qubes"
|
CONNECTION_TRANSPORT = "qubes"
|
||||||
CONNECTION_OPTIONS = {
|
CONNECTION_OPTIONS = {
|
||||||
'management_proxy': '--management-proxy',
|
'management_proxy': '--management-proxy',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def debug(text):
|
|
||||||
return
|
|
||||||
print(text, file=sys.stderr)
|
|
||||||
|
|
||||||
|
|
||||||
def encode_exception(exc, stream):
|
def encode_exception(exc, stream):
|
||||||
debug("encoding exception")
|
|
||||||
stream.write('{}\n'.format(len(exc.__class__.__name__)).encode('ascii'))
|
stream.write('{}\n'.format(len(exc.__class__.__name__)).encode('ascii'))
|
||||||
stream.write('{}'.format(exc.__class__.__name__).encode('ascii'))
|
stream.write('{}'.format(exc.__class__.__name__).encode('ascii'))
|
||||||
for attr in "errno", "filename", "message", "strerror":
|
for attr in "errno", "filename", "message", "strerror":
|
||||||
@ -85,7 +79,6 @@ def encode_exception(exc, stream):
|
|||||||
|
|
||||||
|
|
||||||
def decode_exception(stream):
|
def decode_exception(stream):
|
||||||
debug("decoding exception")
|
|
||||||
name_len = stream.readline(16)
|
name_len = stream.readline(16)
|
||||||
name_len = int(name_len)
|
name_len = int(name_len)
|
||||||
name = stream.read(name_len)
|
name = stream.read(name_len)
|
||||||
@ -114,7 +107,6 @@ def decode_exception(stream):
|
|||||||
|
|
||||||
|
|
||||||
def popen(cmd, in_data, outf=sys.stdout):
|
def popen(cmd, in_data, outf=sys.stdout):
|
||||||
debug("popening on remote %s" % type(in_data))
|
|
||||||
try:
|
try:
|
||||||
p = subprocess.Popen(
|
p = subprocess.Popen(
|
||||||
cmd, shell=False, stdin=subprocess.PIPE,
|
cmd, shell=False, stdin=subprocess.PIPE,
|
||||||
@ -132,11 +124,9 @@ def popen(cmd, in_data, outf=sys.stdout):
|
|||||||
outf.write('{}\n'.format(len(err)).encode('ascii'))
|
outf.write('{}\n'.format(len(err)).encode('ascii'))
|
||||||
outf.write(err)
|
outf.write(err)
|
||||||
outf.flush()
|
outf.flush()
|
||||||
debug("finished popening")
|
|
||||||
|
|
||||||
|
|
||||||
def put(out_path):
|
def put(out_path):
|
||||||
debug("dest writing %s" % out_path)
|
|
||||||
try:
|
try:
|
||||||
f = open(out_path, "wb")
|
f = open(out_path, "wb")
|
||||||
sys.stdout.write(b'Y\n')
|
sys.stdout.write(b'Y\n')
|
||||||
@ -146,25 +136,18 @@ def put(out_path):
|
|||||||
return
|
return
|
||||||
while True:
|
while True:
|
||||||
chunksize = int(sys.stdin.readline(16))
|
chunksize = int(sys.stdin.readline(16))
|
||||||
if not chunksize:
|
if chunksize == 0:
|
||||||
debug("looks like we have no more to read")
|
|
||||||
break
|
break
|
||||||
while chunksize:
|
|
||||||
debug(type(chunksize))
|
|
||||||
chunk = sys.stdin.read(chunksize)
|
chunk = sys.stdin.read(chunksize)
|
||||||
assert chunk
|
assert len(chunk) == chunksize, ("Mismatch in chunk length", len(chunk), chunksize)
|
||||||
debug("dest writing %s" % len(chunk))
|
|
||||||
try:
|
try:
|
||||||
f.write(chunk)
|
f.write(chunk)
|
||||||
|
sys.stdout.write(b'Y\n')
|
||||||
except (IOError, OSError) as e:
|
except (IOError, OSError) as e:
|
||||||
sys.stdout.write(b'N\n')
|
sys.stdout.write(b'N\n')
|
||||||
encode_exception(e, sys.stdout)
|
encode_exception(e, sys.stdout)
|
||||||
f.close()
|
f.close()
|
||||||
return
|
return
|
||||||
chunksize = chunksize - len(chunk)
|
|
||||||
debug("remaining %s" % chunksize)
|
|
||||||
sys.stdout.write(b'Y\n')
|
|
||||||
sys.stdout.flush()
|
|
||||||
try:
|
try:
|
||||||
f.flush()
|
f.flush()
|
||||||
except (IOError, OSError) as e:
|
except (IOError, OSError) as e:
|
||||||
@ -172,12 +155,10 @@ def put(out_path):
|
|||||||
encode_exception(e, sys.stdout)
|
encode_exception(e, sys.stdout)
|
||||||
return
|
return
|
||||||
finally:
|
finally:
|
||||||
debug("finished writing dest")
|
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
|
|
||||||
def fetch(in_path, bufsize):
|
def fetch(in_path, bufsize):
|
||||||
debug("Fetching from remote %s" % in_path)
|
|
||||||
try:
|
try:
|
||||||
f = open(in_path, "rb")
|
f = open(in_path, "rb")
|
||||||
except (IOError, OSError) as e:
|
except (IOError, OSError) as e:
|
||||||
@ -225,7 +206,7 @@ sys.stdout = sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout
|
|||||||
'''
|
'''
|
||||||
payload = b'\n\n'.join(
|
payload = b'\n\n'.join(
|
||||||
inspect.getsource(x).encode("utf-8")
|
inspect.getsource(x).encode("utf-8")
|
||||||
for x in (debug, encode_exception, popen, put, fetch)
|
for x in (encode_exception, popen, put, fetch)
|
||||||
) + \
|
) + \
|
||||||
b'''
|
b'''
|
||||||
|
|
||||||
@ -274,7 +255,7 @@ class Connection(ConnectionBase):
|
|||||||
def set_options(self, task_keys=None, var_options=None, direct=None):
|
def set_options(self, task_keys=None, var_options=None, direct=None):
|
||||||
super(Connection, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
super(Connection, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
|
||||||
# FIXME HORRIBLE WORKAROUND FIXME
|
# FIXME HORRIBLE WORKAROUND FIXME
|
||||||
if task_keys and task_keys['delegate_to'] and self._options and 'management_proxy' in self._options:
|
if task_keys['delegate_to'] and 'management_proxy' in self._options:
|
||||||
self._options['management_proxy'] = ''
|
self._options['management_proxy'] = ''
|
||||||
|
|
||||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||||
@ -285,6 +266,7 @@ class Connection(ConnectionBase):
|
|||||||
self.transport_cmd = kwargs['transport_cmd']
|
self.transport_cmd = kwargs['transport_cmd']
|
||||||
return
|
return
|
||||||
self.transport_cmd = distutils.spawn.find_executable('qrun')
|
self.transport_cmd = distutils.spawn.find_executable('qrun')
|
||||||
|
self.transport_cmd = None
|
||||||
if not self.transport_cmd:
|
if not self.transport_cmd:
|
||||||
self.transport_cmd = os.path.join(
|
self.transport_cmd = os.path.join(
|
||||||
os.path.dirname(__file__),
|
os.path.dirname(__file__),
|
||||||
@ -313,7 +295,7 @@ class Connection(ConnectionBase):
|
|||||||
if not self._connected:
|
if not self._connected:
|
||||||
remote_cmd = [to_bytes(x, errors='surrogate_or_strict') for x in [
|
remote_cmd = [to_bytes(x, errors='surrogate_or_strict') for x in [
|
||||||
# 'strace', '-s', '2048', '-o', '/tmp/log',
|
# 'strace', '-s', '2048', '-o', '/tmp/log',
|
||||||
'python3', '-u', '-i', '-c', preamble
|
'python', '-u', '-i', '-c', preamble
|
||||||
]]
|
]]
|
||||||
addr = self._play_context.remote_addr
|
addr = self._play_context.remote_addr
|
||||||
proxy = to_bytes(self.get_option("management_proxy")) if self.get_option("management_proxy") else ""
|
proxy = to_bytes(self.get_option("management_proxy")) if self.get_option("management_proxy") else ""
|
||||||
@ -375,18 +357,16 @@ class Connection(ConnectionBase):
|
|||||||
cmd = shlex.split(cmd)
|
cmd = shlex.split(cmd)
|
||||||
display.vvvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
|
display.vvvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
|
||||||
try:
|
try:
|
||||||
payload = ('popen(%r, %r)\n\n' % (cmd, in_data)).encode("utf-8")
|
payload = ('popen(%r, %r)\n' % (cmd, in_data)).encode("utf-8")
|
||||||
self._transport.stdin.write(payload)
|
self._transport.stdin.write(payload)
|
||||||
self._transport.stdin.flush()
|
self._transport.stdin.flush()
|
||||||
yesno = self._transport.stdout.readline(2)
|
yesno = self._transport.stdout.readline(2)
|
||||||
debug("Reading yesno")
|
|
||||||
except Exception:
|
except Exception:
|
||||||
self._abort_transport()
|
self._abort_transport()
|
||||||
raise
|
raise
|
||||||
if yesno == "Y\n" or yesno == b"Y\n":
|
if yesno == "Y\n" or yesno == b"Y\n":
|
||||||
try:
|
try:
|
||||||
retcode = self._transport.stdout.readline(16)
|
retcode = self._transport.stdout.readline(16)
|
||||||
debug("Reading retcode")
|
|
||||||
try:
|
try:
|
||||||
retcode = int(retcode)
|
retcode = int(retcode)
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -423,7 +403,6 @@ class Connection(ConnectionBase):
|
|||||||
else:
|
else:
|
||||||
self._abort_transport()
|
self._abort_transport()
|
||||||
raise errors.AnsibleError("pass/fail from remote end is unexpected: %r" % yesno)
|
raise errors.AnsibleError("pass/fail from remote end is unexpected: %r" % yesno)
|
||||||
debug("finished popening on master")
|
|
||||||
|
|
||||||
def put_file(self, in_path, out_path):
|
def put_file(self, in_path, out_path):
|
||||||
'''Transfer a file from local to VM.'''
|
'''Transfer a file from local to VM.'''
|
||||||
@ -445,7 +424,6 @@ class Connection(ConnectionBase):
|
|||||||
with open(in_path, 'rb') as in_file:
|
with open(in_path, 'rb') as in_file:
|
||||||
while True:
|
while True:
|
||||||
chunk = in_file.read(BUFSIZE)
|
chunk = in_file.read(BUFSIZE)
|
||||||
debug("source writing %s bytes" % len(chunk))
|
|
||||||
try:
|
try:
|
||||||
self._transport.stdin.write(("%s\n" % len(chunk)).encode("utf-8"))
|
self._transport.stdin.write(("%s\n" % len(chunk)).encode("utf-8"))
|
||||||
self._transport.stdin.flush()
|
self._transport.stdin.flush()
|
||||||
@ -465,15 +443,9 @@ class Connection(ConnectionBase):
|
|||||||
else:
|
else:
|
||||||
self._abort_transport()
|
self._abort_transport()
|
||||||
raise errors.AnsibleError("pass/fail from remote end is unexpected: %r" % yesno)
|
raise errors.AnsibleError("pass/fail from remote end is unexpected: %r" % yesno)
|
||||||
debug("on this side it's all good")
|
|
||||||
|
|
||||||
self._transport.stdin.write(("%s\n" % 0).encode("utf-8"))
|
|
||||||
self._transport.stdin.flush()
|
|
||||||
debug("finished writing source")
|
|
||||||
|
|
||||||
def fetch_file(self, in_path, out_path):
|
def fetch_file(self, in_path, out_path):
|
||||||
'''Fetch a file from VM to local.'''
|
'''Fetch a file from VM to local.'''
|
||||||
debug("fetching to local")
|
|
||||||
super(Connection, self).fetch_file(in_path, out_path)
|
super(Connection, self).fetch_file(in_path, out_path)
|
||||||
display.vvvv("FETCH %s to %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
display.vvvv("FETCH %s to %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
||||||
in_path = _prefix_login_path(in_path)
|
in_path = _prefix_login_path(in_path)
|
||||||
|
@ -24,13 +24,13 @@ Integrate this software into your Ansible setup (within your `managevm`) VM) by:
|
|||||||
|
|
||||||
## Set up the policy file for `qubes.VMShell`
|
## Set up the policy file for `qubes.VMShell`
|
||||||
|
|
||||||
Edit (as `root`) the file `/etc/qubes/policy.d/80-ansible-qubes.policy`
|
Edit (as `root`) the file `/etc/qubes-rpc/policy/qubes.VMShell`
|
||||||
located on the file system of your `dom0`.
|
located on the file system of your `dom0`.
|
||||||
|
|
||||||
At the top of the file, add the following two lines:
|
At the top of the file, add the following two lines:
|
||||||
|
|
||||||
```
|
```
|
||||||
qubes.VMShell * managevm * allow
|
managevm $anyvm allow
|
||||||
```
|
```
|
||||||
|
|
||||||
This first line lets `managevm` execute any commands on any VM on your
|
This first line lets `managevm` execute any commands on any VM on your
|
||||||
@ -41,21 +41,25 @@ security prompt to allow `qubes.VMShell` on the target VM you're managing.
|
|||||||
|
|
||||||
Now save that file, and exit your editor.
|
Now save that file, and exit your editor.
|
||||||
|
|
||||||
If your dom0 has a file `/etc/qubes-rpc/policy/qubes.VMShell`,
|
|
||||||
you can delete it now. It is obsolete.
|
|
||||||
|
|
||||||
### Optional: allow `managevm` to manage `dom0`
|
### Optional: allow `managevm` to manage `dom0`
|
||||||
|
|
||||||
The next step is to add the RPC service proper to dom0. Edit the file
|
Before the line you added in the previous step, add this line:
|
||||||
|
|
||||||
|
```
|
||||||
|
managevm dom0 allow
|
||||||
|
```
|
||||||
|
|
||||||
|
This line lets `managevm` execute any commands in `dom0`. Be sure you
|
||||||
|
understand the security implications of such a thing.
|
||||||
|
|
||||||
|
The next step is to add the RPC service proper. Edit the file
|
||||||
`/etc/qubes-rpc/qubes.VMShell` to have a single line that contains:
|
`/etc/qubes-rpc/qubes.VMShell` to have a single line that contains:
|
||||||
|
|
||||||
```
|
```
|
||||||
exec bash
|
exec bash
|
||||||
```
|
```
|
||||||
|
|
||||||
Make the file executable.
|
That is it. `dom0` should work now.
|
||||||
|
|
||||||
That is it. `dom0` should work now. Note you do this at your own risk.
|
|
||||||
|
|
||||||
|
|
||||||
## Test `qrun` works
|
## Test `qrun` works
|
||||||
|
@ -13,11 +13,11 @@ to set up a policy that allows us to remotely execute commands on any VM of the
|
|||||||
network server, without having to be physically present to click any dialogs authorizing
|
network server, without having to be physically present to click any dialogs authorizing
|
||||||
the execution of those commands.
|
the execution of those commands.
|
||||||
|
|
||||||
In `dom0` of your Qubes server, edit `/etc/qubes/policy.d/80-ansible-qubes.policy` to add,
|
In `dom0` of your Qubes server, edit `/etc/qubes-rpc/policy/qubes.VMShell` to add,
|
||||||
at the top of the file, a policy that looks like this:
|
at the top of the file, a policy that looks like this:
|
||||||
|
|
||||||
```
|
```
|
||||||
qubes.VMShell * managevm * allow
|
exp-manager $anyvm allow
|
||||||
```
|
```
|
||||||
|
|
||||||
This tells Qubes OS that `exp-manager` is now authorized to run any command in any of the VMs.
|
This tells Qubes OS that `exp-manager` is now authorized to run any command in any of the VMs.
|
||||||
@ -25,13 +25,13 @@ This tells Qubes OS that `exp-manager` is now authorized to run any command in a
|
|||||||
**Security note**: this does mean that anyone with access to `exp-manager` can do
|
**Security note**: this does mean that anyone with access to `exp-manager` can do
|
||||||
literally anything on any of your VMs in your Qubes OS server.
|
literally anything on any of your VMs in your Qubes OS server.
|
||||||
|
|
||||||
If that is not what you want, then replace `*` after `managevm` with the name of the VMs you
|
If that is not what you want, then replace `$anyvm` with the name of the VMs you would like
|
||||||
would like to manage. For example: if you would like `exp-manager` to be authorized to run
|
to manage. For example: if you would like `exp-manager` to be authorized to run commands
|
||||||
commands *only* on `exp-net`, then you can use the following policy:
|
*only* on `exp-net`, then you can use the following policy:
|
||||||
|
|
||||||
```
|
```
|
||||||
qubes.VMShell * exp-manager exp-net allow
|
exp-manager exp-net allow
|
||||||
qubes.VMShell * exp-manager @anyvm deny
|
exp-manager $anyvm deny
|
||||||
```
|
```
|
||||||
|
|
||||||
Try it out now. SSH from your manager machine into `exp-manager` and run:
|
Try it out now. SSH from your manager machine into `exp-manager` and run:
|
||||||
@ -47,7 +47,7 @@ You should see `yes` followed by `exp-net` on the output side.
|
|||||||
If you expect that you will need to run commands in `dom0` from your manager machine
|
If you expect that you will need to run commands in `dom0` from your manager machine
|
||||||
(say, to create, stop, start and modify VMs in the Qubes OS server),
|
(say, to create, stop, start and modify VMs in the Qubes OS server),
|
||||||
then you will have to create a file `/etc/qubes-rpc/qubes.VMShell` as `root` in `dom0`,
|
then you will have to create a file `/etc/qubes-rpc/qubes.VMShell` as `root` in `dom0`,
|
||||||
with the contents `/bin/bash` and permission mode `0755`. Doing this will enable you
|
with the contents `/bin/bash` and permission mode `0644`. Doing this will enable you
|
||||||
to run commands on `dom0` which you can subsequently test in `exp-manager` by running command:
|
to run commands on `dom0` which you can subsequently test in `exp-manager` by running command:
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -57,7 +57,7 @@ qvm-run dom0 'echo yes ; hostname'
|
|||||||
like you did before.
|
like you did before.
|
||||||
|
|
||||||
**Security note**: this does mean that anyone with access to `exp-manager` can do
|
**Security note**: this does mean that anyone with access to `exp-manager` can do
|
||||||
*literally anything* on your Qubes OS server. You have been warned.
|
literally anything on your Qubes OS server.
|
||||||
|
|
||||||
## Integrate your Ansible setup
|
## Integrate your Ansible setup
|
||||||
|
|
||||||
|
@ -1,30 +0,0 @@
|
|||||||
from ansible.errors import AnsibleError
|
|
||||||
from ansible.plugins.lookup import LookupBase
|
|
||||||
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
try:
|
|
||||||
from __main__ import display
|
|
||||||
except ImportError:
|
|
||||||
from ansible.utils.display import Display
|
|
||||||
display = Display()
|
|
||||||
|
|
||||||
|
|
||||||
UNDEFINED = object()
|
|
||||||
|
|
||||||
|
|
||||||
class LookupModule(LookupBase):
|
|
||||||
|
|
||||||
def run(self, args, variables):
|
|
||||||
i = json.dumps(args[0])
|
|
||||||
c = ["jq", args[1]]
|
|
||||||
p = subprocess.Popen(c, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
|
||||||
o, e = p.communicate(i)
|
|
||||||
r = p.wait()
|
|
||||||
if r != 0 or e:
|
|
||||||
assert 0, e
|
|
||||||
raise subprocess.CalledProcessError(r, c, o, e)
|
|
||||||
r = json.loads(o)
|
|
||||||
return r
|
|
Loading…
x
Reference in New Issue
Block a user