Rewrite testing system (#119)
The new test system is written with py.test. These tests are more comprehensive, run faster by an order of magnitude, and are far more maintainable. The tests themselves conform to PEP8.
This commit is contained in:
parent
09a018ea5a
commit
e7f9616b39
34 changed files with 3218 additions and 3 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1,4 +1,6 @@
|
|||
.DS_Store
|
||||
.env
|
||||
.jekyll-metadata
|
||||
.pytest_cache
|
||||
.sass-cache
|
||||
_site
|
||||
|
|
17
Dockerfile
17
Dockerfile
|
@ -1,9 +1,20 @@
|
|||
FROM ubuntu:yakkety
|
||||
FROM ubuntu:18.04
|
||||
MAINTAINER Tim Byrne <sultan@locehilios.com>
|
||||
|
||||
# No input during build
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# UTF8 locale
|
||||
RUN apt-get update && apt-get install -y locales
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8'
|
||||
|
||||
# Convenience settings for the testbed's root account
|
||||
RUN echo 'set -o vi' >> /root/.bashrc
|
||||
|
||||
# Install prerequisites
|
||||
RUN apt-get update && apt-get install -y git gnupg1 make shellcheck bats expect curl python-pip lsb-release
|
||||
RUN pip install envtpl
|
||||
RUN apt-get update && apt-get install -y git gnupg1 make shellcheck=0.4.6-1 bats expect curl python3-pip lsb-release
|
||||
RUN pip3 install envtpl pytest==3.6.4 pylint==1.9.2 flake8==3.5.0
|
||||
|
||||
# Force GNUPG version 1 at path /usr/bin/gpg
|
||||
RUN ln -fs /usr/bin/gpg1 /usr/bin/gpg
|
||||
|
|
15
Makefile
15
Makefile
|
@ -23,6 +23,11 @@ test: bats shellcheck
|
|||
parallel:
|
||||
ls test/*bats | time parallel -q -P0 -- docker run --rm -v "$$PWD:/yadm:ro" yadm/testbed bash -c 'bats {}'
|
||||
|
||||
.PHONY: pytest
|
||||
pytest:
|
||||
@echo Running all pytest tests
|
||||
@pytest -v
|
||||
|
||||
.PHONY: bats
|
||||
bats:
|
||||
@echo Running all bats tests
|
||||
|
@ -58,3 +63,13 @@ man:
|
|||
.PHONY: wide
|
||||
wide:
|
||||
man ./yadm.1
|
||||
|
||||
.PHONY: sync-clock
|
||||
sync-clock:
|
||||
docker run --rm --privileged alpine hwclock -s
|
||||
|
||||
.PHONY: .env
|
||||
.env:
|
||||
virtualenv --python=python3 .env
|
||||
.env/bin/pip3 install --upgrade pip setuptools
|
||||
.env/bin/pip3 install --upgrade pytest pylint==1.9.2 flake8==3.5.0
|
||||
|
|
7
docker-compose.yml
Normal file
7
docker-compose.yml
Normal file
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
version: '3'
|
||||
services:
|
||||
testbed:
|
||||
volumes:
|
||||
- .:/yadm:ro
|
||||
image: yadm/testbed:latest
|
11
pylintrc
Normal file
11
pylintrc
Normal file
|
@ -0,0 +1,11 @@
|
|||
[BASIC]
|
||||
good-names=pytestmark
|
||||
|
||||
[DESIGN]
|
||||
max-args=14
|
||||
max-locals=26
|
||||
max-attributes=8
|
||||
max-statements=65
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
disable=redefined-outer-name
|
3
pytest.ini
Normal file
3
pytest.ini
Normal file
|
@ -0,0 +1,3 @@
|
|||
[pytest]
|
||||
cache_dir = /tmp
|
||||
addopts = -ra
|
559
test/conftest.py
Normal file
559
test/conftest.py
Normal file
|
@ -0,0 +1,559 @@
|
|||
"""Global tests configuration and fixtures"""
|
||||
|
||||
import collections
|
||||
import copy
|
||||
import distutils.dir_util # pylint: disable=no-name-in-module,import-error
|
||||
import os
|
||||
import platform
|
||||
import pwd
|
||||
from subprocess import Popen, PIPE
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def shellcheck_version():
|
||||
"""Version of shellcheck supported"""
|
||||
return '0.4.6'
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def pylint_version():
|
||||
"""Version of pylint supported"""
|
||||
return '1.9.2'
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def flake8_version():
|
||||
"""Version of flake8 supported"""
|
||||
return '3.5.0'
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def tst_user():
|
||||
"""Test session's user id"""
|
||||
return pwd.getpwuid(os.getuid()).pw_name
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def tst_host():
|
||||
"""Test session's short hostname value"""
|
||||
return platform.node().split('.')[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def tst_distro(runner):
|
||||
"""Test session's distro"""
|
||||
distro = ''
|
||||
try:
|
||||
run = runner(command=['lsb_release', '-si'], report=False)
|
||||
distro = run.out.strip()
|
||||
except BaseException:
|
||||
pass
|
||||
return distro
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def tst_sys():
|
||||
"""Test session's uname value"""
|
||||
return platform.system()
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def cygwin_sys():
|
||||
"""CYGWIN uname id"""
|
||||
return 'CYGWIN_NT-6.1-WOW64'
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def supported_commands():
|
||||
"""List of supported commands
|
||||
|
||||
This list should be updated every time yadm learns a new command.
|
||||
"""
|
||||
return [
|
||||
'alt',
|
||||
'bootstrap',
|
||||
'clean',
|
||||
'clone',
|
||||
'config',
|
||||
'decrypt',
|
||||
'encrypt',
|
||||
'enter',
|
||||
'gitconfig',
|
||||
'help',
|
||||
'init',
|
||||
'introspect',
|
||||
'list',
|
||||
'perms',
|
||||
'version',
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def supported_configs():
|
||||
"""List of supported config options
|
||||
|
||||
This list should be updated every time yadm learns a new config.
|
||||
"""
|
||||
return [
|
||||
'local.class',
|
||||
'local.hostname',
|
||||
'local.os',
|
||||
'local.user',
|
||||
'yadm.auto-alt',
|
||||
'yadm.auto-perms',
|
||||
'yadm.auto-private-dirs',
|
||||
'yadm.cygwin-copy',
|
||||
'yadm.git-program',
|
||||
'yadm.gpg-perms',
|
||||
'yadm.gpg-program',
|
||||
'yadm.gpg-recipient',
|
||||
'yadm.ssh-perms',
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def supported_switches():
|
||||
"""List of supported switches
|
||||
|
||||
This list should be updated every time yadm learns a new switch.
|
||||
"""
|
||||
return [
|
||||
'--yadm-archive',
|
||||
'--yadm-bootstrap',
|
||||
'--yadm-config',
|
||||
'--yadm-dir',
|
||||
'--yadm-encrypt',
|
||||
'--yadm-repo',
|
||||
'-Y',
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def supported_local_configs(supported_configs):
|
||||
"""List of supported local config options"""
|
||||
return [c for c in supported_configs if c.startswith('local.')]
|
||||
|
||||
|
||||
class Runner(object):
|
||||
"""Class for running commands
|
||||
|
||||
Within yadm tests, this object should be used when running commands that
|
||||
require:
|
||||
|
||||
* Acting on the status code
|
||||
* Parsing the output of the command
|
||||
* Passing input to the command
|
||||
|
||||
Other instances of simply running commands should use os.system().
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
command,
|
||||
inp=None,
|
||||
shell=False,
|
||||
cwd=None,
|
||||
env=None,
|
||||
expect=None,
|
||||
report=True):
|
||||
if shell:
|
||||
self.command = ' '.join([str(cmd) for cmd in command])
|
||||
else:
|
||||
self.command = command
|
||||
self.inp = inp
|
||||
self.wrap(expect)
|
||||
process = Popen(
|
||||
self.command,
|
||||
stdin=PIPE,
|
||||
stdout=PIPE,
|
||||
stderr=PIPE,
|
||||
shell=shell,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
)
|
||||
input_bytes = self.inp
|
||||
if self.inp:
|
||||
input_bytes = self.inp.encode()
|
||||
(out_bstream, err_bstream) = process.communicate(input=input_bytes)
|
||||
self.out = out_bstream.decode()
|
||||
self.err = err_bstream.decode()
|
||||
self.code = process.wait()
|
||||
self.success = self.code == 0
|
||||
self.failure = self.code != 0
|
||||
if report:
|
||||
self.report()
|
||||
|
||||
def __repr__(self):
|
||||
return f'Runner({self.command})'
|
||||
|
||||
def report(self):
|
||||
"""Print code/stdout/stderr"""
|
||||
print(f'{self}')
|
||||
print(f' RUN: code:{self.code}')
|
||||
if self.inp:
|
||||
print(f' RUN: input:\n{self.inp}')
|
||||
print(f' RUN: stdout:\n{self.out}')
|
||||
print(f' RUN: stderr:\n{self.err}')
|
||||
|
||||
def wrap(self, expect):
|
||||
"""Wrap command with expect"""
|
||||
if not expect:
|
||||
return
|
||||
cmdline = ' '.join([f'"{w}"' for w in self.command])
|
||||
expect_script = f'set timeout 2\nspawn {cmdline}\n'
|
||||
for question, answer in expect:
|
||||
expect_script += (
|
||||
'expect {\n'
|
||||
f'"{question}" {{send "{answer}\\r"}}\n'
|
||||
'timeout {close;exit 128}\n'
|
||||
'}\n')
|
||||
expect_script += (
|
||||
'expect eof\n'
|
||||
'foreach {pid spawnid os_error_flag value} [wait] break\n'
|
||||
'exit $value')
|
||||
self.inp = expect_script
|
||||
print(f'EXPECT:{expect_script}')
|
||||
self.command = ['expect']
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def runner():
|
||||
"""Class for running commands"""
|
||||
return Runner
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def config_git():
|
||||
"""Configure global git configuration, if missing"""
|
||||
os.system(
|
||||
'git config user.name || '
|
||||
'git config --global user.name "test"')
|
||||
os.system(
|
||||
'git config user.email || '
|
||||
'git config --global user.email "test@test.test"')
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def repo_config(runner, paths):
|
||||
"""Function to query a yadm repo configuration value"""
|
||||
|
||||
def query_func(key):
|
||||
"""Query a yadm repo configuration value"""
|
||||
run = runner(
|
||||
command=('git', 'config', '--local', key),
|
||||
env={'GIT_DIR': paths.repo},
|
||||
report=False,
|
||||
)
|
||||
return run.out.rstrip()
|
||||
|
||||
return query_func
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def yadm():
|
||||
"""Path to yadm program to be tested"""
|
||||
full_path = os.path.realpath('yadm')
|
||||
assert os.path.isfile(full_path), "yadm program file isn't present"
|
||||
return full_path
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def paths(tmpdir, yadm):
|
||||
"""Function scoped test paths"""
|
||||
dir_root = tmpdir.mkdir('root')
|
||||
dir_work = dir_root.mkdir('work')
|
||||
dir_yadm = dir_root.mkdir('yadm')
|
||||
dir_repo = dir_yadm.mkdir('repo.git')
|
||||
dir_hooks = dir_yadm.mkdir('hooks')
|
||||
dir_remote = dir_root.mkdir('remote')
|
||||
file_archive = dir_yadm.join('files.gpg')
|
||||
file_bootstrap = dir_yadm.join('bootstrap')
|
||||
file_config = dir_yadm.join('config')
|
||||
file_encrypt = dir_yadm.join('encrypt')
|
||||
paths = collections.namedtuple(
|
||||
'Paths', [
|
||||
'pgm',
|
||||
'root',
|
||||
'work',
|
||||
'yadm',
|
||||
'repo',
|
||||
'hooks',
|
||||
'remote',
|
||||
'archive',
|
||||
'bootstrap',
|
||||
'config',
|
||||
'encrypt',
|
||||
])
|
||||
return paths(
|
||||
yadm,
|
||||
dir_root,
|
||||
dir_work,
|
||||
dir_yadm,
|
||||
dir_repo,
|
||||
dir_hooks,
|
||||
dir_remote,
|
||||
file_archive,
|
||||
file_bootstrap,
|
||||
file_config,
|
||||
file_encrypt,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def yadm_y(paths):
|
||||
"""Generate custom command_list function"""
|
||||
def command_list(*args):
|
||||
"""Produce params for running yadm with -Y"""
|
||||
return [paths.pgm, '-Y', str(paths.yadm)] + list(args)
|
||||
return command_list
|
||||
|
||||
|
||||
class DataFile(object):
|
||||
"""Datafile object"""
|
||||
|
||||
def __init__(self, path, tracked=True, private=False):
|
||||
self.__path = path
|
||||
self.__parent = None
|
||||
self.__tracked = tracked
|
||||
self.__private = private
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
"""Path property"""
|
||||
return self.__path
|
||||
|
||||
@property
|
||||
def relative(self):
|
||||
"""Relative path property"""
|
||||
if self.__parent:
|
||||
return self.__parent.join(self.path)
|
||||
raise BaseException('Unable to provide relative path, no parent')
|
||||
|
||||
@property
|
||||
def tracked(self):
|
||||
"""Tracked property"""
|
||||
return self.__tracked
|
||||
|
||||
@property
|
||||
def private(self):
|
||||
"""Private property"""
|
||||
return self.__private
|
||||
|
||||
def relative_to(self, parent):
|
||||
"""Update all relative paths to this py.path"""
|
||||
self.__parent = parent
|
||||
return
|
||||
|
||||
|
||||
class DataSet(object):
|
||||
"""Dataset object"""
|
||||
|
||||
def __init__(self):
|
||||
self.__files = list()
|
||||
self.__dirs = list()
|
||||
self.__tracked_dirs = list()
|
||||
self.__private_dirs = list()
|
||||
self.__relpath = None
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f'[DS with {len(self)} files; '
|
||||
f'{len(self.tracked)} tracked, '
|
||||
f'{len(self.private)} private]'
|
||||
)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.__files)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.__files)
|
||||
|
||||
def __contains__(self, datafile):
|
||||
if [f for f in self.__files if f.path == datafile]:
|
||||
return True
|
||||
if datafile in self.__files:
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def files(self):
|
||||
"""List of DataFiles in DataSet"""
|
||||
return list(self.__files)
|
||||
|
||||
@property
|
||||
def tracked(self):
|
||||
"""List of tracked DataFiles in DataSet"""
|
||||
return [f for f in self.__files if f.tracked]
|
||||
|
||||
@property
|
||||
def private(self):
|
||||
"""List of private DataFiles in DataSet"""
|
||||
return [f for f in self.__files if f.private]
|
||||
|
||||
@property
|
||||
def dirs(self):
|
||||
"""List of directories in DataSet"""
|
||||
return list(self.__dirs)
|
||||
|
||||
@property
|
||||
def plain_dirs(self):
|
||||
"""List of directories in DataSet not starting with '.'"""
|
||||
return [d for d in self.dirs if not d.startswith('.')]
|
||||
|
||||
@property
|
||||
def hidden_dirs(self):
|
||||
"""List of directories in DataSet starting with '.'"""
|
||||
return [d for d in self.dirs if d.startswith('.')]
|
||||
|
||||
@property
|
||||
def tracked_dirs(self):
|
||||
"""List of directories in DataSet not starting with '.'"""
|
||||
return [d for d in self.__tracked_dirs if not d.startswith('.')]
|
||||
|
||||
@property
|
||||
def private_dirs(self):
|
||||
"""List of directories in DataSet considered 'private'"""
|
||||
return list(self.__private_dirs)
|
||||
|
||||
def add_file(self, path, tracked=True, private=False):
|
||||
"""Add file to data set"""
|
||||
if path not in self:
|
||||
datafile = DataFile(path, tracked, private)
|
||||
if self.__relpath:
|
||||
datafile.relative_to(self.__relpath)
|
||||
self.__files.append(datafile)
|
||||
|
||||
dname = os.path.dirname(path)
|
||||
if dname and dname not in self.__dirs:
|
||||
self.__dirs.append(dname)
|
||||
if tracked:
|
||||
self.__tracked_dirs.append(dname)
|
||||
if private:
|
||||
self.__private_dirs.append(dname)
|
||||
|
||||
def relative_to(self, relpath):
|
||||
"""Update all relative paths to this py.path"""
|
||||
self.__relpath = relpath
|
||||
for datafile in self.files:
|
||||
datafile.relative_to(self.__relpath)
|
||||
return
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def ds1_dset(tst_sys, cygwin_sys):
|
||||
"""Meta-data for dataset one files"""
|
||||
dset = DataSet()
|
||||
dset.add_file('t1')
|
||||
dset.add_file('d1/t2')
|
||||
dset.add_file(f'test_alt##S')
|
||||
dset.add_file(f'test_alt##S.H')
|
||||
dset.add_file(f'test_alt##S.H.U')
|
||||
dset.add_file(f'test_alt##C.S.H.U')
|
||||
dset.add_file(f'test alt/test alt##S')
|
||||
dset.add_file(f'test alt/test alt##S.H')
|
||||
dset.add_file(f'test alt/test alt##S.H.U')
|
||||
dset.add_file(f'test alt/test alt##C.S.H.U')
|
||||
dset.add_file(f'test_cygwin_copy##{tst_sys}')
|
||||
dset.add_file(f'test_cygwin_copy##{cygwin_sys}')
|
||||
dset.add_file('u1', tracked=False)
|
||||
dset.add_file('d2/u2', tracked=False)
|
||||
dset.add_file('.ssh/p1', tracked=False, private=True)
|
||||
dset.add_file('.ssh/.p2', tracked=False, private=True)
|
||||
dset.add_file('.gnupg/p3', tracked=False, private=True)
|
||||
dset.add_file('.gnupg/.p4', tracked=False, private=True)
|
||||
return dset
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def ds1_data(tmpdir_factory, config_git, ds1_dset, runner):
|
||||
"""A set of test data, worktree & repo"""
|
||||
# pylint: disable=unused-argument
|
||||
# This is ignored because
|
||||
# @pytest.mark.usefixtures('config_git')
|
||||
# cannot be applied to another fixture.
|
||||
|
||||
data = tmpdir_factory.mktemp('ds1')
|
||||
|
||||
work = data.mkdir('work')
|
||||
for datafile in ds1_dset:
|
||||
work.join(datafile.path).write(datafile.path, ensure=True)
|
||||
|
||||
repo = data.mkdir('repo.git')
|
||||
env = os.environ.copy()
|
||||
env['GIT_DIR'] = str(repo)
|
||||
runner(
|
||||
command=['git', 'init', '--shared=0600', '--bare', str(repo)],
|
||||
report=False)
|
||||
runner(
|
||||
command=['git', 'config', 'core.bare', 'false'],
|
||||
env=env,
|
||||
report=False)
|
||||
runner(
|
||||
command=['git', 'config', 'status.showUntrackedFiles', 'no'],
|
||||
env=env,
|
||||
report=False)
|
||||
runner(
|
||||
command=['git', 'config', 'yadm.managed', 'true'],
|
||||
env=env,
|
||||
report=False)
|
||||
runner(
|
||||
command=['git', 'config', 'core.worktree', str(work)],
|
||||
env=env,
|
||||
report=False)
|
||||
runner(
|
||||
command=['git', 'add'] +
|
||||
[str(work.join(f.path)) for f in ds1_dset if f.tracked],
|
||||
env=env)
|
||||
runner(
|
||||
command=['git', 'commit', '--allow-empty', '-m', 'Initial commit'],
|
||||
env=env,
|
||||
report=False)
|
||||
|
||||
data = collections.namedtuple('Data', ['work', 'repo'])
|
||||
return data(work, repo)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def ds1_work_copy(ds1_data, paths):
|
||||
"""Function scoped copy of ds1_data.work"""
|
||||
distutils.dir_util.copy_tree( # pylint: disable=no-member
|
||||
str(ds1_data.work), str(paths.work))
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def ds1_repo_copy(runner, ds1_data, paths):
|
||||
"""Function scoped copy of ds1_data.repo"""
|
||||
distutils.dir_util.copy_tree( # pylint: disable=no-member
|
||||
str(ds1_data.repo), str(paths.repo))
|
||||
env = os.environ.copy()
|
||||
env['GIT_DIR'] = str(paths.repo)
|
||||
runner(
|
||||
command=['git', 'config', 'core.worktree', str(paths.work)],
|
||||
env=env,
|
||||
report=False)
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def ds1_copy(ds1_work_copy, ds1_repo_copy):
|
||||
"""Function scoped copy of ds1_data"""
|
||||
# pylint: disable=unused-argument
|
||||
# This is ignored because
|
||||
# @pytest.mark.usefixtures('ds1_work_copy', 'ds1_repo_copy')
|
||||
# cannot be applied to another fixture.
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def ds1(ds1_work_copy, paths, ds1_dset):
|
||||
"""Function scoped ds1_dset w/paths"""
|
||||
# pylint: disable=unused-argument
|
||||
# This is ignored because
|
||||
# @pytest.mark.usefixtures('ds1_copy')
|
||||
# cannot be applied to another fixture.
|
||||
dscopy = copy.deepcopy(ds1_dset)
|
||||
dscopy.relative_to(copy.deepcopy(paths.work))
|
||||
return dscopy
|
1
test/pylintrc
Symbolic link
1
test/pylintrc
Symbolic link
|
@ -0,0 +1 @@
|
|||
../pylintrc
|
345
test/test_alt.py
Normal file
345
test/test_alt.py
Normal file
|
@ -0,0 +1,345 @@
|
|||
"""Test alt"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import string
|
||||
import pytest
|
||||
import utils
|
||||
|
||||
# These test IDs are broken. During the writing of these tests, problems have
|
||||
# been discovered in the way yadm orders matching files.
|
||||
BROKEN_TEST_IDS = [
|
||||
'test_wild[tracked-##C.S.H.U-C-S%-H%-U]',
|
||||
'test_wild[tracked-##C.S.H.U-C-S-H%-U]',
|
||||
'test_wild[encrypted-##C.S.H.U-C-S%-H%-U]',
|
||||
'test_wild[encrypted-##C.S.H.U-C-S-H%-U]',
|
||||
]
|
||||
|
||||
PRECEDENCE = [
|
||||
'##',
|
||||
'##$tst_sys',
|
||||
'##$tst_sys.$tst_host',
|
||||
'##$tst_sys.$tst_host.$tst_user',
|
||||
'##$tst_class',
|
||||
'##$tst_class.$tst_sys',
|
||||
'##$tst_class.$tst_sys.$tst_host',
|
||||
'##$tst_class.$tst_sys.$tst_host.$tst_user',
|
||||
]
|
||||
|
||||
WILD_TEMPLATES = [
|
||||
'##$tst_class',
|
||||
'##$tst_class.$tst_sys',
|
||||
'##$tst_class.$tst_sys.$tst_host',
|
||||
'##$tst_class.$tst_sys.$tst_host.$tst_user',
|
||||
]
|
||||
|
||||
WILD_TESTED = set()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('precedence_index', range(len(PRECEDENCE)))
|
||||
@pytest.mark.parametrize(
|
||||
'tracked, encrypt, exclude', [
|
||||
(False, False, False),
|
||||
(True, False, False),
|
||||
(False, True, False),
|
||||
(False, True, True),
|
||||
], ids=[
|
||||
'untracked',
|
||||
'tracked',
|
||||
'encrypted',
|
||||
'excluded',
|
||||
])
|
||||
@pytest.mark.usefixtures('ds1_copy')
|
||||
def test_alt(runner, yadm_y, paths,
|
||||
tst_sys, tst_host, tst_user,
|
||||
tracked, encrypt, exclude,
|
||||
precedence_index):
|
||||
"""Test alternate linking
|
||||
|
||||
This test is done by iterating for the number of templates in PRECEDENCE.
|
||||
With each iteration, another file is left off the list. So with each
|
||||
iteration, the template with the "highest precedence" is left out. The file
|
||||
using the highest precedence should be the one linked.
|
||||
"""
|
||||
|
||||
# set the class
|
||||
tst_class = 'testclass'
|
||||
utils.set_local(paths, 'class', tst_class)
|
||||
|
||||
# process the templates in PRECEDENCE
|
||||
precedence = list()
|
||||
for template in PRECEDENCE:
|
||||
precedence.append(
|
||||
string.Template(template).substitute(
|
||||
tst_class=tst_class,
|
||||
tst_host=tst_host,
|
||||
tst_sys=tst_sys,
|
||||
tst_user=tst_user,
|
||||
)
|
||||
)
|
||||
|
||||
# create files using a subset of files
|
||||
for suffix in precedence[0:precedence_index+1]:
|
||||
utils.create_alt_files(paths, suffix, tracked=tracked,
|
||||
encrypt=encrypt, exclude=exclude)
|
||||
|
||||
# run alt to trigger linking
|
||||
run = runner(yadm_y('alt'))
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
linked = linked_list(run.out)
|
||||
|
||||
# assert the proper linking has occurred
|
||||
for file_path in (utils.ALT_FILE1, utils.ALT_FILE2):
|
||||
source_file = file_path + precedence[precedence_index]
|
||||
if tracked or (encrypt and not exclude):
|
||||
assert paths.work.join(file_path).islink()
|
||||
assert paths.work.join(file_path).read() == source_file
|
||||
assert str(paths.work.join(source_file)) in linked
|
||||
else:
|
||||
assert not paths.work.join(file_path).exists()
|
||||
assert str(paths.work.join(source_file)) not in linked
|
||||
|
||||
|
||||
def short_template(template):
|
||||
"""Translate template into something short for test IDs"""
|
||||
return string.Template(template).substitute(
|
||||
tst_class='C',
|
||||
tst_host='H',
|
||||
tst_sys='S',
|
||||
tst_user='U',
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('wild_user', [True, False], ids=['U%', 'U'])
|
||||
@pytest.mark.parametrize('wild_host', [True, False], ids=['H%', 'H'])
|
||||
@pytest.mark.parametrize('wild_sys', [True, False], ids=['S%', 'S'])
|
||||
@pytest.mark.parametrize('wild_class', [True, False], ids=['C%', 'C'])
|
||||
@pytest.mark.parametrize('template', WILD_TEMPLATES, ids=short_template)
|
||||
@pytest.mark.parametrize(
|
||||
'tracked, encrypt', [
|
||||
(True, False),
|
||||
(False, True),
|
||||
], ids=[
|
||||
'tracked',
|
||||
'encrypted',
|
||||
])
|
||||
@pytest.mark.usefixtures('ds1_copy')
|
||||
def test_wild(request, runner, yadm_y, paths,
|
||||
tst_sys, tst_host, tst_user,
|
||||
tracked, encrypt,
|
||||
wild_class, wild_host, wild_sys, wild_user,
|
||||
template):
|
||||
"""Test wild linking
|
||||
|
||||
These tests are done by creating permutations of the possible files using
|
||||
WILD_TEMPLATES. Each case is then tested (while skipping the already tested
|
||||
permutations for efficiency).
|
||||
"""
|
||||
|
||||
if request.node.name in BROKEN_TEST_IDS:
|
||||
pytest.xfail(
|
||||
'This test is known to be broken. '
|
||||
'This bug needs to be fixed.')
|
||||
|
||||
tst_class = 'testclass'
|
||||
|
||||
# determine the "wild" version of the suffix
|
||||
str_class = '%' if wild_class else tst_class
|
||||
str_host = '%' if wild_host else tst_host
|
||||
str_sys = '%' if wild_sys else tst_sys
|
||||
str_user = '%' if wild_user else tst_user
|
||||
wild_suffix = string.Template(template).substitute(
|
||||
tst_class=str_class,
|
||||
tst_host=str_host,
|
||||
tst_sys=str_sys,
|
||||
tst_user=str_user,
|
||||
)
|
||||
|
||||
# determine the "standard" version of the suffix
|
||||
std_suffix = string.Template(template).substitute(
|
||||
tst_class=tst_class,
|
||||
tst_host=tst_host,
|
||||
tst_sys=tst_sys,
|
||||
tst_user=tst_user,
|
||||
)
|
||||
|
||||
# skip over duplicate tests (this seems to be the simplest way to cover the
|
||||
# permutations of tests, while skipping duplicates.)
|
||||
test_key = f'{tracked}{encrypt}{wild_suffix}{std_suffix}'
|
||||
if test_key in WILD_TESTED:
|
||||
return
|
||||
else:
|
||||
WILD_TESTED.add(test_key)
|
||||
|
||||
# set the class
|
||||
utils.set_local(paths, 'class', tst_class)
|
||||
|
||||
# create files using the wild suffix
|
||||
utils.create_alt_files(paths, wild_suffix, tracked=tracked,
|
||||
encrypt=encrypt, exclude=False)
|
||||
|
||||
# run alt to trigger linking
|
||||
run = runner(yadm_y('alt'))
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
linked = linked_list(run.out)
|
||||
|
||||
# assert the proper linking has occurred
|
||||
for file_path in (utils.ALT_FILE1, utils.ALT_FILE2):
|
||||
source_file = file_path + wild_suffix
|
||||
assert paths.work.join(file_path).islink()
|
||||
assert paths.work.join(file_path).read() == source_file
|
||||
assert str(paths.work.join(source_file)) in linked
|
||||
|
||||
# create files using the standard suffix
|
||||
utils.create_alt_files(paths, std_suffix, tracked=tracked,
|
||||
encrypt=encrypt, exclude=False)
|
||||
|
||||
# run alt to trigger linking
|
||||
run = runner(yadm_y('alt'))
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
linked = linked_list(run.out)
|
||||
|
||||
# assert the proper linking has occurred
|
||||
for file_path in (utils.ALT_FILE1, utils.ALT_FILE2):
|
||||
source_file = file_path + std_suffix
|
||||
assert paths.work.join(file_path).islink()
|
||||
assert paths.work.join(file_path).read() == source_file
|
||||
assert str(paths.work.join(source_file)) in linked
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('ds1_copy')
|
||||
def test_local_override(runner, yadm_y, paths,
|
||||
tst_sys, tst_host, tst_user):
|
||||
"""Test local overrides"""
|
||||
|
||||
# define local overrides
|
||||
utils.set_local(paths, 'class', 'or-class')
|
||||
utils.set_local(paths, 'hostname', 'or-hostname')
|
||||
utils.set_local(paths, 'os', 'or-os')
|
||||
utils.set_local(paths, 'user', 'or-user')
|
||||
|
||||
# create files, the first would normally be the most specific version
|
||||
# however, the second is the overridden version which should be preferred.
|
||||
utils.create_alt_files(
|
||||
paths, f'##or-class.{tst_sys}.{tst_host}.{tst_user}')
|
||||
utils.create_alt_files(
|
||||
paths, '##or-class.or-os.or-hostname.or-user')
|
||||
|
||||
# run alt to trigger linking
|
||||
run = runner(yadm_y('alt'))
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
linked = linked_list(run.out)
|
||||
|
||||
# assert the proper linking has occurred
|
||||
for file_path in (utils.ALT_FILE1, utils.ALT_FILE2):
|
||||
source_file = file_path + '##or-class.or-os.or-hostname.or-user'
|
||||
assert paths.work.join(file_path).islink()
|
||||
assert paths.work.join(file_path).read() == source_file
|
||||
assert str(paths.work.join(source_file)) in linked
|
||||
|
||||
|
||||
@pytest.mark.parametrize('suffix', ['AAA', 'ZZZ', 'aaa', 'zzz'])
|
||||
@pytest.mark.usefixtures('ds1_copy')
|
||||
def test_class_case(runner, yadm_y, paths, tst_sys, suffix):
|
||||
"""Test range of class cases"""
|
||||
|
||||
# set the class
|
||||
utils.set_local(paths, 'class', suffix)
|
||||
|
||||
# create files
|
||||
endings = [suffix]
|
||||
if tst_sys == 'Linux':
|
||||
# Only create all of these side-by-side on Linux, which is
|
||||
# unquestionably case-sensitive. This would break tests on
|
||||
# case-insensitive systems.
|
||||
endings = ['AAA', 'ZZZ', 'aaa', 'zzz']
|
||||
for ending in endings:
|
||||
utils.create_alt_files(paths, f'##{ending}')
|
||||
|
||||
# run alt to trigger linking
|
||||
run = runner(yadm_y('alt'))
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
linked = linked_list(run.out)
|
||||
|
||||
# assert the proper linking has occurred
|
||||
for file_path in (utils.ALT_FILE1, utils.ALT_FILE2):
|
||||
source_file = file_path + f'##{suffix}'
|
||||
assert paths.work.join(file_path).islink()
|
||||
assert paths.work.join(file_path).read() == source_file
|
||||
assert str(paths.work.join(source_file)) in linked
|
||||
|
||||
|
||||
@pytest.mark.parametrize('autoalt', [None, 'true', 'false'])
|
||||
@pytest.mark.usefixtures('ds1_copy')
|
||||
def test_auto_alt(runner, yadm_y, paths, autoalt):
|
||||
"""Test setting auto-alt"""
|
||||
|
||||
# set the value of auto-alt
|
||||
if autoalt:
|
||||
os.system(' '.join(yadm_y('config', 'yadm.auto-alt', autoalt)))
|
||||
|
||||
# create file
|
||||
suffix = '##'
|
||||
utils.create_alt_files(paths, suffix)
|
||||
|
||||
# run status to possibly trigger linking
|
||||
run = runner(yadm_y('status'))
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
linked = linked_list(run.out)
|
||||
|
||||
# assert the proper linking has occurred
|
||||
for file_path in (utils.ALT_FILE1, utils.ALT_FILE2):
|
||||
source_file = file_path + suffix
|
||||
if autoalt == 'false':
|
||||
assert not paths.work.join(file_path).exists()
|
||||
else:
|
||||
assert paths.work.join(file_path).islink()
|
||||
assert paths.work.join(file_path).read() == source_file
|
||||
# no linking output when run via auto-alt
|
||||
assert str(paths.work.join(source_file)) not in linked
|
||||
|
||||
|
||||
@pytest.mark.parametrize('delimiter', ['.', '_'])
|
||||
@pytest.mark.usefixtures('ds1_copy')
|
||||
def test_delimiter(runner, yadm_y, paths,
|
||||
tst_sys, tst_host, tst_user, delimiter):
|
||||
"""Test delimiters used"""
|
||||
|
||||
suffix = '##' + delimiter.join([tst_sys, tst_host, tst_user])
|
||||
|
||||
# create file
|
||||
utils.create_alt_files(paths, suffix)
|
||||
|
||||
# run alt to trigger linking
|
||||
run = runner(yadm_y('alt'))
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
linked = linked_list(run.out)
|
||||
|
||||
# assert the proper linking has occurred
|
||||
# only a delimiter of '.' is valid
|
||||
for file_path in (utils.ALT_FILE1, utils.ALT_FILE2):
|
||||
source_file = file_path + suffix
|
||||
if delimiter == '.':
|
||||
assert paths.work.join(file_path).islink()
|
||||
assert paths.work.join(file_path).read() == source_file
|
||||
assert str(paths.work.join(source_file)) in linked
|
||||
else:
|
||||
assert not paths.work.join(file_path).exists()
|
||||
assert str(paths.work.join(source_file)) not in linked
|
||||
|
||||
|
||||
def linked_list(output):
|
||||
"""Parse output, and return list of linked files"""
|
||||
linked = dict()
|
||||
for line in output.splitlines():
|
||||
match = re.match('Linking (.+) to (.+)$', line)
|
||||
if match:
|
||||
linked[match.group(2)] = match.group(1)
|
||||
return linked.values()
|
106
test/test_assert_private_dirs.py
Normal file
106
test/test_assert_private_dirs.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
"""Test asserting private directories"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import pytest
|
||||
|
||||
pytestmark = pytest.mark.usefixtures('ds1_copy')
|
||||
PRIVATE_DIRS = ['.gnupg', '.ssh']
|
||||
|
||||
|
||||
def test_pdirs_missing(runner, yadm_y, paths):
|
||||
"""Private dirs (private dirs missing)
|
||||
|
||||
When a git command is run
|
||||
And private directories are missing
|
||||
Create private directories prior to command
|
||||
"""
|
||||
|
||||
# confirm directories are missing at start
|
||||
for pdir in PRIVATE_DIRS:
|
||||
path = paths.work.join(pdir)
|
||||
if path.exists():
|
||||
path.remove()
|
||||
assert not path.exists()
|
||||
|
||||
# run status
|
||||
run = runner(command=yadm_y('status'), env={'DEBUG': 'yes'})
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
assert 'On branch master' in run.out
|
||||
|
||||
# confirm directories are created
|
||||
# and are protected
|
||||
for pdir in PRIVATE_DIRS:
|
||||
path = paths.work.join(pdir)
|
||||
assert path.exists()
|
||||
assert oct(path.stat().mode).endswith('00'), 'Directory is not secured'
|
||||
|
||||
# confirm directories are created before command is run:
|
||||
assert re.search(
|
||||
r'Creating.+\.gnupg.+Creating.+\.ssh.+Running git command git status',
|
||||
run.out, re.DOTALL), 'directories created before command is run'
|
||||
|
||||
|
||||
def test_pdirs_missing_apd_false(runner, yadm_y, paths):
|
||||
"""Private dirs (private dirs missing / yadm.auto-private-dirs=false)
|
||||
|
||||
When a git command is run
|
||||
And private directories are missing
|
||||
But auto-private-dirs is false
|
||||
Do not create private dirs
|
||||
"""
|
||||
|
||||
# confirm directories are missing at start
|
||||
for pdir in PRIVATE_DIRS:
|
||||
path = paths.work.join(pdir)
|
||||
if path.exists():
|
||||
path.remove()
|
||||
assert not path.exists()
|
||||
|
||||
# set configuration
|
||||
os.system(' '.join(yadm_y(
|
||||
'config', '--bool', 'yadm.auto-private-dirs', 'false')))
|
||||
|
||||
# run status
|
||||
run = runner(command=yadm_y('status'))
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
assert 'On branch master' in run.out
|
||||
|
||||
# confirm directories are STILL missing
|
||||
for pdir in PRIVATE_DIRS:
|
||||
assert not paths.work.join(pdir).exists()
|
||||
|
||||
|
||||
def test_pdirs_exist_apd_false(runner, yadm_y, paths):
|
||||
"""Private dirs (private dirs exist / yadm.auto-perms=false)
|
||||
|
||||
When a git command is run
|
||||
And private directories exist
|
||||
And yadm is configured not to auto update perms
|
||||
Do not alter directories
|
||||
"""
|
||||
|
||||
# create permissive directories
|
||||
for pdir in PRIVATE_DIRS:
|
||||
path = paths.work.join(pdir)
|
||||
if not path.isdir():
|
||||
path.mkdir()
|
||||
path.chmod(0o777)
|
||||
assert oct(path.stat().mode).endswith('77'), 'Directory is secure.'
|
||||
|
||||
# set configuration
|
||||
os.system(' '.join(yadm_y(
|
||||
'config', '--bool', 'yadm.auto-perms', 'false')))
|
||||
|
||||
# run status
|
||||
run = runner(command=yadm_y('status'))
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
assert 'On branch master' in run.out
|
||||
|
||||
# created directories are STILL permissive
|
||||
for pdir in PRIVATE_DIRS:
|
||||
path = paths.work.join(pdir)
|
||||
assert oct(path.stat().mode).endswith('77'), 'Directory is secure'
|
31
test/test_bootstrap.py
Normal file
31
test/test_bootstrap.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
"""Test bootstrap"""
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'exists, executable, code, expect', [
|
||||
(False, False, 1, 'Cannot execute bootstrap'),
|
||||
(True, False, 1, 'is not an executable program'),
|
||||
(True, True, 123, 'Bootstrap successful'),
|
||||
], ids=[
|
||||
'missing',
|
||||
'not executable',
|
||||
'executable',
|
||||
])
|
||||
def test_bootstrap(
|
||||
runner, yadm_y, paths, exists, executable, code, expect):
|
||||
"""Test bootstrap command"""
|
||||
if exists:
|
||||
paths.bootstrap.write('')
|
||||
if executable:
|
||||
paths.bootstrap.write(
|
||||
'#!/bin/bash\n'
|
||||
f'echo {expect}\n'
|
||||
f'exit {code}\n'
|
||||
)
|
||||
paths.bootstrap.chmod(0o775)
|
||||
run = runner(command=yadm_y('bootstrap'))
|
||||
assert run.code == code
|
||||
assert run.err == ''
|
||||
assert expect in run.out
|
11
test/test_clean.py
Normal file
11
test/test_clean.py
Normal file
|
@ -0,0 +1,11 @@
|
|||
"""Test clean"""
|
||||
|
||||
|
||||
def test_clean_command(runner, yadm_y):
|
||||
"""Run with clean command"""
|
||||
run = runner(command=yadm_y('clean'))
|
||||
# do nothing, this is a dangerous Git command when managing dot files
|
||||
# report the command as disabled and exit as a failure
|
||||
assert run.failure
|
||||
assert run.err == ''
|
||||
assert 'disabled' in run.out
|
274
test/test_clone.py
Normal file
274
test/test_clone.py
Normal file
|
@ -0,0 +1,274 @@
|
|||
"""Test clone"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import pytest
|
||||
|
||||
BOOTSTRAP_CODE = 123
|
||||
BOOTSTRAP_MSG = 'Bootstrap successful'
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('remote')
|
||||
@pytest.mark.parametrize(
|
||||
'good_remote, repo_exists, force, conflicts', [
|
||||
(False, False, False, False),
|
||||
(True, False, False, False),
|
||||
(True, True, False, False),
|
||||
(True, True, True, False),
|
||||
(True, False, False, True),
|
||||
], ids=[
|
||||
'bad remote',
|
||||
'simple',
|
||||
'existing repo',
|
||||
'-f',
|
||||
'conflicts',
|
||||
])
|
||||
def test_clone(
|
||||
runner, paths, yadm_y, repo_config, ds1,
|
||||
good_remote, repo_exists, force, conflicts):
|
||||
"""Test basic clone operation"""
|
||||
|
||||
# determine remote url
|
||||
remote_url = f'file://{paths.remote}'
|
||||
if not good_remote:
|
||||
remote_url = 'file://bad_remote'
|
||||
|
||||
old_repo = None
|
||||
if repo_exists:
|
||||
# put a repo in the way
|
||||
paths.repo.mkdir()
|
||||
old_repo = paths.repo.join('old_repo')
|
||||
old_repo.write('old_repo')
|
||||
|
||||
if conflicts:
|
||||
ds1.tracked[0].relative.write('conflict')
|
||||
assert ds1.tracked[0].relative.exists()
|
||||
|
||||
# run the clone command
|
||||
args = ['clone', '-w', paths.work]
|
||||
if force:
|
||||
args += ['-f']
|
||||
args += [remote_url]
|
||||
run = runner(command=yadm_y(*args))
|
||||
|
||||
if not good_remote:
|
||||
# clone should fail
|
||||
assert run.failure
|
||||
assert run.err != ''
|
||||
assert 'Unable to fetch origin' in run.out
|
||||
assert not paths.repo.exists()
|
||||
elif repo_exists and not force:
|
||||
# can't overwrite data
|
||||
assert run.failure
|
||||
assert run.err == ''
|
||||
assert 'Git repo already exists' in run.out
|
||||
else:
|
||||
# clone should succeed, and repo should be configured properly
|
||||
assert successful_clone(run, paths, repo_config)
|
||||
|
||||
# ensure conflicts are handled properly
|
||||
if conflicts:
|
||||
assert 'NOTE' in run.out
|
||||
assert 'Merging origin/master failed' in run.out
|
||||
assert 'Conflicts preserved' in run.out
|
||||
|
||||
# confirm correct Git origin
|
||||
run = runner(
|
||||
command=('git', 'remote', '-v', 'show'),
|
||||
env={'GIT_DIR': paths.repo})
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
assert f'origin\t{remote_url}' in run.out
|
||||
|
||||
# ensure conflicts are really preserved
|
||||
if conflicts:
|
||||
# test to see if the work tree is actually "clean"
|
||||
run = runner(
|
||||
command=yadm_y('status', '-uno', '--porcelain'),
|
||||
cwd=paths.work)
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
assert run.out == '', 'worktree has unexpected changes'
|
||||
|
||||
# test to see if the conflicts are stashed
|
||||
run = runner(command=yadm_y('stash', 'list'), cwd=paths.work)
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
assert 'Conflicts preserved' in run.out, 'conflicts not stashed'
|
||||
|
||||
# verify content of the stashed conflicts
|
||||
run = runner(command=yadm_y('stash', 'show', '-p'), cwd=paths.work)
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
assert '\n+conflict' in run.out, 'conflicts not stashed'
|
||||
|
||||
# another force-related assertion
|
||||
if old_repo:
|
||||
if force:
|
||||
assert not old_repo.exists()
|
||||
else:
|
||||
assert old_repo.exists()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('remote')
|
||||
@pytest.mark.parametrize(
|
||||
'bs_exists, bs_param, answer', [
|
||||
(False, '--bootstrap', None),
|
||||
(True, '--bootstrap', None),
|
||||
(True, '--no-bootstrap', None),
|
||||
(True, None, 'n'),
|
||||
(True, None, 'y'),
|
||||
], ids=[
|
||||
'force, missing',
|
||||
'force, existing',
|
||||
'prevent',
|
||||
'existing, answer n',
|
||||
'existing, answer y',
|
||||
])
|
||||
def test_clone_bootstrap(
|
||||
runner, paths, yadm_y, repo_config, bs_exists, bs_param, answer):
|
||||
"""Test bootstrap clone features"""
|
||||
|
||||
# establish a bootstrap
|
||||
create_bootstrap(paths, bs_exists)
|
||||
|
||||
# run the clone command
|
||||
args = ['clone', '-w', paths.work]
|
||||
if bs_param:
|
||||
args += [bs_param]
|
||||
args += [f'file://{paths.remote}']
|
||||
expect = []
|
||||
if answer:
|
||||
expect.append(('Would you like to execute it now', answer))
|
||||
run = runner(command=yadm_y(*args), expect=expect)
|
||||
|
||||
if answer:
|
||||
assert 'Would you like to execute it now' in run.out
|
||||
|
||||
expected_code = 0
|
||||
if bs_exists and bs_param != '--no-bootstrap':
|
||||
expected_code = BOOTSTRAP_CODE
|
||||
|
||||
if answer == 'y':
|
||||
expected_code = BOOTSTRAP_CODE
|
||||
assert BOOTSTRAP_MSG in run.out
|
||||
elif answer == 'n':
|
||||
expected_code = 0
|
||||
assert BOOTSTRAP_MSG not in run.out
|
||||
|
||||
assert successful_clone(run, paths, repo_config, expected_code)
|
||||
|
||||
if not bs_exists:
|
||||
assert BOOTSTRAP_MSG not in run.out
|
||||
|
||||
|
||||
def create_bootstrap(paths, exists):
|
||||
"""Create bootstrap file for test"""
|
||||
if exists:
|
||||
paths.bootstrap.write(
|
||||
'#!/bin/sh\n'
|
||||
f'echo {BOOTSTRAP_MSG}\n'
|
||||
f'exit {BOOTSTRAP_CODE}\n')
|
||||
paths.bootstrap.chmod(0o775)
|
||||
assert paths.bootstrap.exists()
|
||||
else:
|
||||
assert not paths.bootstrap.exists()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures('remote')
|
||||
@pytest.mark.parametrize(
|
||||
'private_type, in_repo, in_work', [
|
||||
('ssh', False, True),
|
||||
('gnupg', False, True),
|
||||
('ssh', True, True),
|
||||
('gnupg', True, True),
|
||||
('ssh', True, False),
|
||||
('gnupg', True, False),
|
||||
], ids=[
|
||||
'open ssh, not tracked',
|
||||
'open gnupg, not tracked',
|
||||
'open ssh, tracked',
|
||||
'open gnupg, tracked',
|
||||
'missing ssh, tracked',
|
||||
'missing gnupg, tracked',
|
||||
])
|
||||
def test_clone_perms(
|
||||
runner, yadm_y, paths, repo_config,
|
||||
private_type, in_repo, in_work):
|
||||
"""Test clone permission-related functions"""
|
||||
|
||||
# update remote repo to include private data
|
||||
if in_repo:
|
||||
rpath = paths.work.mkdir(f'.{private_type}').join('related')
|
||||
rpath.write('related')
|
||||
os.system(f'GIT_DIR="{paths.remote}" git add {rpath}')
|
||||
os.system(f'GIT_DIR="{paths.remote}" git commit -m "{rpath}"')
|
||||
rpath.remove()
|
||||
|
||||
# ensure local private data is insecure at the start
|
||||
if in_work:
|
||||
pdir = paths.work.join(f'.{private_type}')
|
||||
if not pdir.exists():
|
||||
pdir.mkdir()
|
||||
pfile = pdir.join('existing')
|
||||
pfile.write('existing')
|
||||
pdir.chmod(0o777)
|
||||
pfile.chmod(0o777)
|
||||
else:
|
||||
paths.work.remove()
|
||||
paths.work.mkdir()
|
||||
|
||||
run = runner(
|
||||
yadm_y('clone', '-d', '-w', paths.work, f'file://{paths.remote}'))
|
||||
|
||||
assert successful_clone(run, paths, repo_config)
|
||||
if in_work:
|
||||
# private directories which already exist, should be left as they are,
|
||||
# which in this test is "insecure".
|
||||
assert re.search(
|
||||
f'initial private dir perms drwxrwxrwx.+.{private_type}',
|
||||
run.out)
|
||||
assert re.search(
|
||||
f'pre-merge private dir perms drwxrwxrwx.+.{private_type}',
|
||||
run.out)
|
||||
assert re.search(
|
||||
f'post-merge private dir perms drwxrwxrwx.+.{private_type}',
|
||||
run.out)
|
||||
else:
|
||||
# private directories which are created, should be done prior to
|
||||
# merging, and with secure permissions.
|
||||
assert 'initial private dir perms' not in run.out
|
||||
assert re.search(
|
||||
f'pre-merge private dir perms drwx------.+.{private_type}',
|
||||
run.out)
|
||||
assert re.search(
|
||||
f'post-merge private dir perms drwx------.+.{private_type}',
|
||||
run.out)
|
||||
|
||||
# standard perms still apply afterwards unless disabled with auto.perms
|
||||
assert oct(
|
||||
paths.work.join(f'.{private_type}').stat().mode).endswith('00'), (
|
||||
f'.{private_type} has not been secured by auto.perms')
|
||||
|
||||
|
||||
def successful_clone(run, paths, repo_config, expected_code=0):
|
||||
"""Assert clone is successful"""
|
||||
assert run.code == expected_code
|
||||
assert 'Initialized' in run.out
|
||||
assert oct(paths.repo.stat().mode).endswith('00'), 'Repo is not secured'
|
||||
assert repo_config('core.bare') == 'false'
|
||||
assert repo_config('status.showUntrackedFiles') == 'no'
|
||||
assert repo_config('yadm.managed') == 'true'
|
||||
return True
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def remote(paths, ds1_repo_copy):
|
||||
"""Function scoped remote (based on ds1)"""
|
||||
# pylint: disable=unused-argument
|
||||
# This is ignored because
|
||||
# @pytest.mark.usefixtures('ds1_remote_copy')
|
||||
# cannot be applied to another fixture.
|
||||
paths.remote.remove()
|
||||
paths.repo.move(paths.remote)
|
||||
return None
|
139
test/test_config.py
Normal file
139
test/test_config.py
Normal file
|
@ -0,0 +1,139 @@
|
|||
"""Test config"""
|
||||
|
||||
import os
|
||||
import pytest
|
||||
|
||||
TEST_SECTION = 'test'
|
||||
TEST_ATTRIBUTE = 'attribute'
|
||||
TEST_KEY = f'{TEST_SECTION}.{TEST_ATTRIBUTE}'
|
||||
TEST_VALUE = 'testvalue'
|
||||
TEST_FILE = f'[{TEST_SECTION}]\n\t{TEST_ATTRIBUTE} = {TEST_VALUE}'
|
||||
|
||||
|
||||
def test_config_no_params(runner, yadm_y, supported_configs):
|
||||
"""No parameters
|
||||
|
||||
Display instructions
|
||||
Display supported configs
|
||||
Exit with 0
|
||||
"""
|
||||
|
||||
run = runner(yadm_y('config'))
|
||||
|
||||
assert run.success
|
||||
assert run.err == ''
|
||||
assert 'Please read the CONFIGURATION section' in run.out
|
||||