Skip to content
Commits on Source (114)
[source.crates-io]
replace-with = "vendored-sources"
[source.vendored-sources]
directory = "./vendor"
......@@ -225,8 +225,10 @@ html/
src/lib389/dist/
src/lib389/man/
src/libsds/target/
src/librslapd/target/
dist
venv
.idea
src/cockpit/389-console/cockpit_dist/
src/cockpit/389-console/node_modules/
ldap/servers/slapd/rust-slapi-private.h
This diff is collapsed.
......@@ -27,15 +27,19 @@ Building
./configure --enable-debug --with-openldap --enable-cmocka --enable-asan
make
make lib389
make check
sudo make install
sudo make lib389-install
Testing
-------
make check
sudo py.test -s 389-ds-base/dirsrvtests/tests/suites/basic/
To debug the make check item's, you'll need libtool to help:
libtool --mode=execute gdb /home/william/build/ds/test_slapd
More information
----------------
......
......@@ -10,7 +10,7 @@ vendor="389 Project"
# PACKAGE_VERSION is constructed from these
VERSION_MAJOR=1
VERSION_MINOR=4
VERSION_MAINT=1.6
VERSION_MAINT=2.4
# NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
VERSION_PREREL=
VERSION_DATE=$(date -u +%Y%m%d)
......
......@@ -85,37 +85,40 @@ AC_CHECK_FUNCS([clock_gettime], [], AC_MSG_ERROR([unable to locate required symb
LT_LIB_DLLOAD
# Optional rust component support.
AC_MSG_CHECKING(for --enable-rust-offline)
AC_ARG_ENABLE(rust_offline, AS_HELP_STRING([--enable-rust-offline], [Enable rust building offline. you MUST have run vendor! (default: no)]),
[], [ enable_rust_offline=no ])
AC_MSG_RESULT($enable_rust_offline)
AM_CONDITIONAL([RUST_ENABLE_OFFLINE],[test "$enable_rust_offline" = yes])
AC_MSG_CHECKING(for --enable-rust)
AC_ARG_ENABLE(rust, AS_HELP_STRING([--enable-rust], [Enable rust language features (default: no)]),
[
[], [ enable_rust=no ])
AC_MSG_RESULT($enable_rust)
if test "$enable_rust" = yes -o "$enable_rust_offline" = yes; then
AC_CHECK_PROG(CARGO, [cargo], [yes], [no])
AC_CHECK_PROG(RUSTC, [rustc], [yes], [no])
AS_IF([test "$CARGO" != "yes" -o "$RUSTC" != "yes"], [
AC_MSG_FAILURE("Rust based plugins cannot be built cargo=$CARGO rustc=$RUSTC")
])
with_rust=yes
AC_MSG_RESULT(yes)
],
[
AC_MSG_RESULT(no)
])
AM_CONDITIONAL([RUST_ENABLE],[test -n "$with_rust"])
fi
AM_CONDITIONAL([RUST_ENABLE],[test "$enable_rust" = yes -o "$enable_rust_offline" = yes])
AC_MSG_CHECKING(for --enable-debug)
AC_ARG_ENABLE(debug, AS_HELP_STRING([--enable-debug], [Enable debug features (default: no)]),
[
AC_MSG_RESULT(yes)
[], [ enable_debug=no ])
AC_MSG_RESULT($enable_debug)
if test "$enable_debug" = yes ; then
debug_defs="-DDEBUG -DMCC_DEBUG"
debug_cflags="-g3 -O0"
debug_cxxflags="-g3 -O0"
debug_rust_defs="-C debuginfo=2"
cargo_defs=""
rust_target_dir="debug"
with_debug=yes
],
[
AC_MSG_RESULT(no)
else
debug_defs=""
# set the default safe CFLAGS that would be set by AC_PROG_CC otherwise
debug_cflags="-g -O2"
......@@ -123,104 +126,98 @@ AC_ARG_ENABLE(debug, AS_HELP_STRING([--enable-debug], [Enable debug features (de
debug_rust_defs="-C debuginfo=2"
cargo_defs="--release"
rust_target_dir="release"
])
fi
AC_SUBST([debug_defs])
AC_SUBST([debug_cflags])
AC_SUBST([debug_cxxflags])
AC_SUBST([debug_rust_defs])
AC_SUBST([cargo_defs])
AC_SUBST([rust_target_dir])
AM_CONDITIONAL([DEBUG],[test -n "$with_debug"])
AM_CONDITIONAL([DEBUG],[test "$enable_debug" = yes])
AC_MSG_CHECKING(for --enable-asan)
AC_ARG_ENABLE(asan, AS_HELP_STRING([--enable-asan], [Enable gcc/clang address sanitizer options (default: no)]),
[
AC_MSG_RESULT(yes)
[], [ enable_asan=no ])
AC_MSG_RESULT($enable_asan)
if test "$enable_asan" = yes ; then
asan_cflags="-fsanitize=address -fno-omit-frame-pointer -lasan"
asan_rust_defs="-Z sanitizer=address"
],
[
AC_MSG_RESULT(no)
else
asan_cflags=""
asan_rust_defs=""
])
fi
AC_SUBST([asan_cflags])
AC_SUBST([asan_rust_defs])
AM_CONDITIONAL(enable_asan,test "$enable_asan" = "yes")
AC_MSG_CHECKING(for --enable-msan)
AC_ARG_ENABLE(msan, AS_HELP_STRING([--enable-msan], [Enable gcc/clang memory sanitizer options (default: no)]),
[
AC_MSG_RESULT(yes)
[], [ enable_msan=no ])
AC_MSG_RESULT($enable_msan)
if test "$enable_msan" = yes ; then
msan_cflags="-fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer"
msan_rust_defs="-Z sanitizer=memory"
],
[
AC_MSG_RESULT(no)
else
msan_cflags=""
msan_rust_defs=""
])
fi
AC_SUBST([msan_cflags])
AC_SUBST([msan_rust_defs])
AM_CONDITIONAL(enable_msan,test "$enable_msan" = "yes")
AC_MSG_CHECKING(for --enable-tsan)
AC_ARG_ENABLE(tsan, AS_HELP_STRING([--enable-tsan], [Enable gcc/clang thread sanitizer options (default: no)]),
[
AC_MSG_RESULT(yes)
[], [ enable_tsan=no ])
AC_MSG_RESULT($enable_tsan)
if test "$enable_tsan" = yes ; then
tsan_cflags="-fsanitize=thread -fno-omit-frame-pointer"
tsan_rust_defs="-Z sanitizer=thread"
],
[
AC_MSG_RESULT(no)
else
tsan_cflags=""
tsan_rust_defs=""
])
fi
AC_SUBST([tsan_cflags])
AC_SUBST([tsan_rust_defs])
AM_CONDITIONAL(enable_tsan,test "$enable_tsan" = "yes")
AC_MSG_CHECKING(for --enable-ubsan)
AC_ARG_ENABLE(ubsan, AS_HELP_STRING([--enable-tsan], [Enable gcc/clang undefined behaviour sanitizer options (default: no)]),
[
AC_MSG_RESULT(yes)
[], [ enable_ubsan=no ])
AC_MSG_RESULT($enable_ubsan)
if test "$enable_ubsan" = yes ; then
ubsan_cflags="-fsanitize=undefined -fno-omit-frame-pointer"
ubsan_rust_defs=""
],
[
AC_MSG_RESULT(no)
else
ubsan_cflags=""
ubsan_rust_defs=""
])
fi
AC_SUBST([ubsan_cflags])
AC_SUBST([ubsan_rust_defs])
AM_CONDITIONAL(enable_ubsan,test "$enable_ubsan" = "yes")
AM_CONDITIONAL(with_sanitizer,test "$enable_asan" = "yes" -o "$enable_msan" = "yes" -o "$enable_tsan" = "yes" -o "$enable_ubsan" = "yes")
# Enable CLANG
AC_MSG_CHECKING(for --enable-clang)
AC_ARG_ENABLE(clang, AS_HELP_STRING([--enable-clang], [Enable clang (default: no)]),
[
AC_MSG_RESULT(yes)
],
[
AC_MSG_RESULT(no)
])
[], [ enable_clang=no ])
AC_MSG_RESULT($enable_clang)
AM_CONDITIONAL(CLANG_ENABLE,test "$enable_clang" = "yes")
# Enable Perl
if test -z "$enable_perl" ; then
enable_perl=no
AC_MSG_CHECKING(for --enable-legacy)
AC_ARG_ENABLE(legacy, AS_HELP_STRING([--enable-legacy], [Enable deprecated legacy functionality (default: no)]),
[], [ enable_legacy=no ])
AC_MSG_RESULT($enable_legacy)
AC_SUBST([enable_legacy])
AM_CONDITIONAL(ENABLE_LEGACY,test "$enable_legacy" = "yes")
if test "$enable_legacy" = yes ; then
enable_perl=yes
fi
AC_MSG_CHECKING(for --enable-perl)
AC_ARG_ENABLE(perl, AS_HELP_STRING([--enable-perl], [Enable deprecated legacy perl and shell scripts (default: no)]),
[
AC_MSG_RESULT(yes)
],
[
AC_MSG_RESULT(no)
])
AC_ARG_ENABLE(perl, AS_HELP_STRING([--enable-perl], [Enable deprecated legacy perl scripts (default: no)]),
[], [ enable_perl=no ])
AC_MSG_RESULT($enable_perl)
AC_SUBST([enable_perl])
AM_CONDITIONAL(ENABLE_PERL,test "$enable_perl" = "yes")
......@@ -228,54 +225,45 @@ AM_CONDITIONAL(ENABLE_PERL,test "$enable_perl" = "yes")
AM_CONDITIONAL([RPM_HARDEND_CC], [test -f /usr/lib/rpm/redhat/redhat-hardened-cc1])
AC_MSG_CHECKING(for --enable-gcc-security)
AC_ARG_ENABLE(gcc-security, AS_HELP_STRING([--enable-gcc-security], [Enable gcc secure compilation options (default: no)]),
[
AC_MSG_RESULT(yes)
AM_COND_IF([RPM_HARDEND_CC],
[ gccsec_cflags="-Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -Werror=format-security -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 " ],
[ gccsec_cflags="-Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -Werror=format-security" ]
)
],
[
[], [ enable_gcc_security=no ])
AC_MSG_RESULT($enable_gcc_security)
if test "$enable_gcc_security" = yes ; then
gccsec_cflags="-Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -Werror=format-security"
else
# Without this, -fPIC doesn't work on generic fedora builds, --disable-gcc-sec.
AC_MSG_RESULT(no)
gccsec_cflags=""
fi
AM_COND_IF([RPM_HARDEND_CC],
[ gccsec_cflags="-specs=/usr/lib/rpm/redhat/redhat-hardened-cc1" ],
[ gccsec_cflags="" ]
)
])
[ gccsec_cflags="$gccsec_flags -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1" ],
[])
AC_SUBST([gccsec_cflags])
# Pull in profiling.
AC_MSG_CHECKING(for --enable-profiling)
AC_ARG_ENABLE(profiling, AS_HELP_STRING([--enable-profiling], [Enable gcov profiling features (default: no)]),
[
AC_MSG_RESULT(yes)
[], [ enable_profiling=no ])
AC_MSG_RESULT($enable_profiling)
if test "$enable_profiling" = yes ; then
profiling_defs="-fprofile-arcs -ftest-coverage -g3 -O0"
profiling_links="-lgcov --coverage"
],
[
AC_MSG_RESULT(no)
else
profiling_defs=""
profiling_links=""
])
fi
AC_SUBST([profiling_defs])
AC_SUBST([profiling_links])
# these enables are for optional or experimental features
if test -z "$enable_pam_passthru" ; then
enable_pam_passthru=yes # if not set on cmdline, set default
fi
AC_MSG_CHECKING(for --enable-pam-passthru)
AC_ARG_ENABLE(pam-passthru,
AS_HELP_STRING([--enable-pam-passthru],
[enable the PAM passthrough auth plugin (default: yes)]))
[enable the PAM passthrough auth plugin (default: yes)]),
[], [ enable_pam_passthru=yes ])
AC_MSG_RESULT($enable_pam_passthru)
if test "$enable_pam_passthru" = yes ; then
# check for pam header file used by plugins/pass_passthru/pam_ptimpl.c
AC_CHECK_HEADER([security/pam_appl.h], [], [AC_MSG_ERROR([Missing header file security/pam_appl.h])])
AC_MSG_RESULT(yes)
AC_DEFINE([ENABLE_PAM_PASSTHRU], [1], [enable the pam passthru auth plugin])
else
AC_MSG_RESULT(no)
fi
AM_CONDITIONAL(enable_pam_passthru,test "$enable_pam_passthru" = "yes")
......@@ -892,9 +880,6 @@ AC_SUBST(ldaplib_defs)
AC_SUBST(ldaptool_bindir)
AC_SUBST(ldaptool_opts)
AC_SUBST(plainldif_opts)
AC_SUBST(nunc_stans_inc)
AC_SUBST(nunc_stans_lib)
AC_SUBST(nunc_stans_libdir)
AC_SUBST(localrundir)
AC_SUBST(brand)
......@@ -913,7 +898,7 @@ AC_DEFINE([LDAP_ERROR_LOGGING], [1], [LDAP error logging flag])
# AC_CONFIG_FILES([ldap/admin/src/defaults.inf])
AC_CONFIG_FILES([src/pkgconfig/dirsrv.pc src/pkgconfig/nunc-stans.pc src/pkgconfig/libsds.pc src/pkgconfig/svrcore.pc])
AC_CONFIG_FILES([src/pkgconfig/dirsrv.pc src/pkgconfig/libsds.pc src/pkgconfig/svrcore.pc])
AC_CONFIG_FILES([Makefile rpm/389-ds-base.spec ])
......
import subprocess
import logging
import pytest
import shutil
import glob
import os
from lib389.paths import Paths
from enum import Enum
pkgs = ['389-ds-base', 'nss', 'nspr', 'openldap', 'cyrus-sasl']
p = Paths()
class FIPSState(Enum):
ENABLED = 'enabled'
......@@ -34,7 +38,7 @@ def is_fips():
return FIPSState.NOT_AVAILABLE
state = None
with open('/proc/sys/crypto/fips_enabled', 'r') as f:
state = f.readline()
state = f.readline().strip()
if state == '1':
return FIPSState.ENABLED
else:
......@@ -61,11 +65,38 @@ def pytest_report_header(config):
return header
@pytest.mark.optionalhook
def pytest_html_results_table_header(cells):
cells.pop()
@pytest.fixture(scope="function", autouse=True)
def log_test_name_to_journald(request):
if p.with_systemd:
def log_current_test():
subprocess.Popen("echo $PYTEST_CURRENT_TEST | systemd-cat -t pytest", stdin=subprocess.PIPE, shell=True)
log_current_test()
request.addfinalizer(log_current_test)
return log_test_name_to_journald
@pytest.mark.optionalhook
def pytest_html_results_table_row(report, cells):
cells.pop()
@pytest.fixture(scope="function", autouse=True)
def rotate_xsan_logs(request):
if p.asan_enabled:
xsan_logs_dir = f'{p.run_dir}/bak'
if not os.path.exists(xsan_logs_dir):
os.mkdir(xsan_logs_dir)
else:
for f in glob.glob(f'{p.run_dir}/ns-slapd-*san*'):
shutil.move(f, xsan_logs_dir)
return rotate_xsan_logs
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
pytest_html = item.config.pluginmanager.getplugin('html')
outcome = yield
report = outcome.get_result()
extra = getattr(report, 'extra', [])
if report.when == 'call' and pytest_html is not None:
for f in glob.glob(f'{p.run_dir}/ns-slapd-*san*'):
with open(f) as asan_report:
text = asan_report.read()
extra.append(pytest_html.extras.text(text, name=os.path.basename(f)))
report.extra = extra
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2019 William Brown <william@blackhats.net.au>
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
from lib389.topologies import topology_st
from lib389.dbgen import dbgen
from lib389.ldclt import Ldclt
from lib389.tasks import ImportTask
from lib389._constants import DEFAULT_SUFFIX
def test_stress_search_simple(topology_st):
"""Test a simple stress test of searches on the directory server.
:id: 3786d01c-ea03-4655-a4f9-450693c75863
:setup: Standalone Instance
:steps:
1. Create test users
2. Import them
3. Stress test!
:expectedresults:
1. Success
2. Success
3. Results are written to /tmp
"""
inst = topology_st.standalone
inst.config.set("nsslapd-verify-filter-schema", "off")
# Bump idllimit to test OR worst cases.
from lib389.config import LDBMConfig
lconfig = LDBMConfig(inst)
# lconfig.set("nsslapd-idlistscanlimit", '20000')
# lconfig.set("nsslapd-lookthroughlimit", '20000')
ldif_dir = inst.get_ldif_dir()
import_ldif = ldif_dir + '/basic_import.ldif'
dbgen(inst, 10000, import_ldif, DEFAULT_SUFFIX)
r = ImportTask(inst)
r.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX)
r.wait()
# Run a small to warm up the server's caches ...
l = Ldclt(inst)
l.search_loadtest(DEFAULT_SUFFIX, "(mail=XXXX@example.com)", rounds=1)
# Now do it for realsies!
# l.search_loadtest(DEFAULT_SUFFIX, "(|(mail=XXXX@example.com)(nonexist=foo))", rounds=10)
l.search_loadtest(DEFAULT_SUFFIX, "(mail=XXXX@example.com)", rounds=10)
......@@ -15,7 +15,7 @@ from lib389.cos import CosTemplate, CosClassicDefinition
from lib389.topologies import topology_st as topo
from lib389.idm.nscontainer import nsContainer
from lib389.idm.domain import Domain
from lib389.idm.role import FilterRoles
from lib389.idm.role import FilteredRoles
pytestmark = pytest.mark.tier1
......@@ -55,7 +55,7 @@ def _add_user(request, topo):
ou = OrganizationalUnit(topo.standalone, "ou=sales,o=acivattr,{}".format(DEFAULT_SUFFIX))
ou.create(properties={'ou': 'sales'})
roles = FilterRoles(topo.standalone, DNBASE)
roles = FilteredRoles(topo.standalone, DNBASE)
roles.create(properties={'cn':'FILTERROLEENGROLE', 'nsRoleFilter':'cn=eng*'})
roles.create(properties={'cn': 'FILTERROLESALESROLE', 'nsRoleFilter': 'cn=sales*'})
......
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2019 William Brown <william@blackhats.net.au>
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
import pytest
from lib389.idm.user import nsUserAccounts, UserAccounts
from lib389.topologies import topology_st as topology
from lib389.paths import Paths
from lib389.utils import ds_is_older
from lib389._constants import *
default_paths = Paths()
pytestmark = pytest.mark.tier1
USER_PASSWORD = "some test password"
NEW_USER_PASSWORD = "some new password"
@pytest.mark.skipif(default_paths.perl_enabled or ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality")
def test_acl_default_allow_self_write_nsuser(topology):
"""
Testing nsusers can self write and self read. This it a sanity test
so that our default entries have their aci's checked.
:id: 4f0fb01a-36a6-430c-a2ee-ebeb036bd951
:setup: Standalone instance
:steps:
1. Testing comparison of two different users.
:expectedresults:
1. Should fail to compare
"""
topology.standalone.enable_tls()
nsusers = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX)
# Create a user as dm.
user = nsusers.create(properties={
'uid': 'test_nsuser',
'cn': 'test_nsuser',
'displayName': 'testNsuser',
'legalName': 'testNsuser',
'uidNumber': '1001',
'gidNumber': '1001',
'homeDirectory': '/home/testnsuser',
'userPassword': USER_PASSWORD,
})
# Create a new con and bind as the user.
user_conn = user.bind(USER_PASSWORD)
user_nsusers = nsUserAccounts(user_conn, DEFAULT_SUFFIX)
self_ent = user_nsusers.get(dn=user.dn)
# Can we self read x,y,z
check = self_ent.get_attrs_vals_utf8([
'uid',
'cn',
'displayName',
'legalName',
'uidNumber',
'gidNumber',
'homeDirectory',
])
for k in check.values():
# Could we read the values?
assert(isinstance(k, list))
assert(len(k) > 0)
# Can we self change a,b,c
self_ent.ensure_attr_state({
'legalName': ['testNsuser_update'],
'displayName': ['testNsuser_update'],
'nsSshPublicKey': ['testkey'],
})
# self change pw
self_ent.change_password(USER_PASSWORD, NEW_USER_PASSWORD)
@pytest.mark.skipif(default_paths.perl_enabled or ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality")
def test_acl_default_allow_self_write_user(topology):
"""
Testing users can self write and self read. This it a sanity test
so that our default entries have their aci's checked.
:id: 4c52321b-f473-4c32-a1d5-489b138cd199
:setup: Standalone instance
:steps:
1. Testing comparison of two different users.
:expectedresults:
1. Should fail to compare
"""
topology.standalone.enable_tls()
users = UserAccounts(topology.standalone, DEFAULT_SUFFIX)
# Create a user as dm.
user = users.create(properties={
'uid': 'test_user',
'cn': 'test_user',
'sn': 'User',
'uidNumber': '1002',
'gidNumber': '1002',
'homeDirectory': '/home/testuser',
'userPassword': USER_PASSWORD,
})
# Create a new con and bind as the user.
user_conn = user.bind(USER_PASSWORD)
user_users = UserAccounts(user_conn, DEFAULT_SUFFIX)
self_ent = user_users.get(dn=user.dn)
# Can we self read x,y,z
check = self_ent.get_attrs_vals_utf8([
'uid',
'cn',
'sn',
'uidNumber',
'gidNumber',
'homeDirectory',
])
for (a, k) in check.items():
print(a)
# Could we read the values?
assert(isinstance(k, list))
assert(len(k) > 0)
# Self change pw
self_ent.change_password(USER_PASSWORD, NEW_USER_PASSWORD)
......@@ -18,7 +18,7 @@ from lib389.idm.user import UserAccounts, UserAccount
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.topologies import topology_st as topo
from lib389.idm.domain import Domain
from lib389.idm.role import NestedRoles, ManagedRoles, FilterRoles
from lib389.idm.role import NestedRoles, ManagedRoles, FilteredRoles
from lib389.idm.account import Anonymous
import ldap
......@@ -94,7 +94,7 @@ def _add_user(request, topo):
for i in ['ROLE1', 'ROLE21', 'ROLE31']:
managedroles.create(properties={'cn': i})
filterroles = FilterRoles(topo.standalone, OU_ROLE)
filterroles = FilteredRoles(topo.standalone, OU_ROLE)
filterroles.create(properties={'cn': 'filterRole',
'nsRoleFilter': 'sn=Dr Drake',
'description': 'filter role tester'})
......
This diff is collapsed.
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2019 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
import os
import pytest
from lib389.topologies import topology_st as topo
from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, MemberOfPlugin
import ldap
pytestmark = pytest.mark.tier1
@pytest.mark.bz834056
def test_configuration(topo):
"""
Automembership plugin and mixed in the plugin configuration
:id: 45a5a8f8-e800-11e8-ab16-8c16451d917b
:setup: Single Instance
:steps:
1. Automembership plugin fails in a MMR setup, if data and config
area mixed in the plugin configuration
2. Plugin configuration should throw proper error messages if not configured properly
:expected results:
1. Should success
2. Should success
"""
# Configure pluginConfigArea for PLUGIN_AUTO
AutoMembershipPlugin(topo.standalone).set("nsslapd-pluginConfigArea", 'cn=config')
# Enable MemberOf plugin
MemberOfPlugin(topo.standalone).enable()
topo.standalone.restart()
# Add invalid configuration, which mixes data and config area: All will fail
automembers = AutoMembershipDefinitions(topo.standalone)
with pytest.raises(ldap.UNWILLING_TO_PERFORM):
automembers.create(properties={
'cn': 'autouserGroups',
'autoMemberScope': f'ou=Employees,cn=config',
'autoMemberFilter': "objectclass=posixAccount",
'autoMemberDefaultGroup': [f'cn=SuffDef1,ou=autouserGroups,cn=config',
f'cn=SuffDef2,ou=autouserGroups,cn=config'],
'autoMemberGroupingAttr': 'member:dn'
})
# Search in error logs
assert topo.standalone.ds_error_log.match('.*ERR - auto-membership-plugin - '
'automember_parse_config_entry - The default group '
'"cn=SuffDef1,ou=autouserGroups,cn=config" '
'can not be a child of the plugin config area "cn=config"')
if __name__ == "__main__":
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s -v %s" % CURRENT_FILE)
import logging
import pytest
import os
from datetime import datetime
from lib389._constants import DEFAULT_SUFFIX, INSTALL_LATEST_CONFIG
from lib389.properties import BACKEND_SAMPLE_ENTRIES, TASK_WAIT
from lib389.topologies import topology_st as topo
from lib389.backend import Backend
from lib389.tasks import BackupTask, RestoreTask
DEBUGGING = os.getenv("DEBUGGING", default=False)
if DEBUGGING:
logging.getLogger(__name__).setLevel(logging.DEBUG)
else:
logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
def test_missing_backend(topo):
"""Test that an error is returned when a restore is performed for a
backend that is no longer present.
:id: 889b8028-35cf-41d7-91f6-bc5193683646
:setup: Standalone Instance
:steps:
1. Create a second backend
2. Perform a back up
3. Remove one of the backends from the config
4. Perform a restore
:expectedresults:
1. Success
2. Success
3. Success
4. Failure
"""
# Create a new backend
BE_NAME = 'backupRoot'
BE_SUFFIX = 'dc=back,dc=up'
props = {
'cn': BE_NAME,
'nsslapd-suffix': BE_SUFFIX,
BACKEND_SAMPLE_ENTRIES: INSTALL_LATEST_CONFIG
}
be = Backend(topo.standalone)
backend_entry = be.create(properties=props)
# perform backup
backup_dir_name = "backup-%s" % datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
archive = os.path.join(topo.standalone.ds_paths.backup_dir, backup_dir_name)
backup_task = BackupTask(topo.standalone)
task_properties = {'nsArchiveDir': archive}
backup_task.create(properties=task_properties)
backup_task.wait()
assert backup_task.get_exit_code() == 0
# Remove new backend
backend_entry.delete()
# Restore the backup - it should fail
restore_task = RestoreTask(topo.standalone)
task_properties = {'nsArchiveDir': archive}
restore_task.create(properties=task_properties)
restore_task.wait()
assert restore_task.get_exit_code() != 0
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main(["-s", CURRENT_FILE])
......@@ -11,7 +11,7 @@ from lib389._mapped_object import DSLdapObject
from lib389.utils import *
from lib389.topologies import topology_st as topo
from lib389._constants import DN_CONFIG_LDBM, DN_USERROOT_LDBM, DEFAULT_SUFFIX
from lib389._constants import DN_CONFIG_LDBM, DN_CONFIG_LDBM_BDB, DN_USERROOT_LDBM, DEFAULT_SUFFIX
pytestmark = pytest.mark.tier0
......@@ -119,15 +119,22 @@ def test_cache_autosize_non_zero(topo, autosize, autosize_split):
"""
config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM)
bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB)
userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM)
cachesize = '33333333'
if ds_is_older('1.4.2'):
dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
else:
dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize')
autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize before the test")
log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
......@@ -164,11 +171,17 @@ def test_cache_autosize_non_zero(topo, autosize, autosize_split):
config_ldbm.set('nsslapd-dbcachesize ', cachesize)
topo.standalone.restart()
if ds_is_older('1.4.2'):
dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
else:
dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize')
autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range.")
log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
......@@ -208,16 +221,23 @@ def test_cache_autosize_basic_sane(topo, autosize_split):
"""
config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM)
bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB)
userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM)
config_ldbm.set('nsslapd-cache-autosize', '0')
# Test with caches with both real values and 0
for cachesize in ('0', '33333333'):
if ds_is_older('1.4.2'):
dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
else:
dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize')
autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize before the test")
log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
......@@ -241,11 +261,17 @@ def test_cache_autosize_basic_sane(topo, autosize_split):
userroot_ldbm.set('nsslapd-cachememsize', cachesize)
topo.standalone.restart()
if ds_is_older('1.4.2'):
dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
else:
dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize')
autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range.")
log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
......@@ -277,8 +303,13 @@ def test_cache_autosize_invalid_values(topo, invalid_value):
"""
config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM)
bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB)
if ds_is_older('1.4.2'):
autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
else:
autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
log.info("Set nsslapd-cache-autosize-split to {}".format(invalid_value))
with pytest.raises(ldap.UNWILLING_TO_PERFORM):
......
......@@ -15,7 +15,7 @@ from lib389.utils import *
from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX
from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
from lib389.backend import *
from lib389.config import LDBMConfig
from lib389.config import LDBMConfig, BDB_LDBMConfig
from lib389.cos import CosPointerDefinitions, CosTemplates
from lib389.backend import Backends
from lib389.monitor import MonitorLDBM
......@@ -144,13 +144,24 @@ def test_config_deadlock_policy(topology_m2):
default_val = b'9'
ldbmconfig = LDBMConfig(topology_m2.ms["master1"])
bdbconfig = BDB_LDBMConfig(topology_m2.ms["master1"])
if ds_is_older('1.4.2'):
deadlock_policy = ldbmconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy')
else:
deadlock_policy = bdbconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy')
assert deadlock_policy == default_val
# Try a range of valid values
for val in ('0', '5', '9'):
for val in (b'0', b'5', b'9'):
ldbmconfig.replace('nsslapd-db-deadlock-policy', val)
if ds_is_older('1.4.2'):
deadlock_policy = ldbmconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy')
else:
deadlock_policy = bdbconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy')
assert deadlock_policy == val
# Try a range of invalid values
for val in ('-1', '10'):
......
......@@ -10,7 +10,7 @@ import pytest, os, ldap
from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate
from lib389._constants import DEFAULT_SUFFIX
from lib389.topologies import topology_st as topo
from lib389.idm.role import FilterRoles
from lib389.idm.role import FilteredRoles
from lib389.idm.nscontainer import nsContainer
from lib389.idm.user import UserAccount
......@@ -36,7 +36,7 @@ def test_positive(topo):
6. Operation should success
"""
# Adding ns filter role
roles = FilterRoles(topo.standalone, DEFAULT_SUFFIX)
roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX)
roles.create(properties={'cn': 'FILTERROLEENGROLE',
'nsRoleFilter': 'cn=eng*'})
# adding ns container
......
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2019 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
from lib389.monitor import MonitorDiskSpace
from lib389.topologies import topology_st as topo
def test_basic(topo):
"""Test that the cn=disk space,cn=monitor gives at least one value
:id: f1962762-2c6c-4e50-97af-a00012a7486d
:setup: Standalone
:steps:
1. Get cn=disk space,cn=monitor entry
2. Check it has at least one dsDisk attribute
3. Check dsDisk attribute has the partition and sizes
4. Check the numbers are valid integers
:expectedresults:
1. It should succeed
2. It should succeed
3. It should succeed
4. It should succeed
"""
inst = topo.standalone
# Turn off disk monitoring
disk_space_mon = MonitorDiskSpace(inst)
disk_str = disk_space_mon.get_disks()[0]
inst.log.info('Check that "partition", "size", "used", "available", "use%" words are present in the string')
words = ["partition", "size", "used", "available", "use%"]
assert all(map(lambda word: word in disk_str, words))
inst.log.info("Check that the sizes are numbers")
for word in words[1:]:
number = disk_str.split(f'{word}="')[1].split('"')[0]
try:
int(number)
except ValueError:
raise ValueError(f'A "{word}" value is not a number')