Skip to content
Commits on Source (45)
......@@ -177,7 +177,6 @@ PCRE_LINK = @pcre_lib@ -lpcre
NETSNMP_LINK = @netsnmp_lib@ @netsnmp_link@
PAM_LINK = -lpam
KERBEROS_LINK = $(kerberos_lib)
TCMALLOC_LINK = @tcmalloc_lib@
EVENT_LINK = @event_lib@
LIBSOCKET=@LIBSOCKET@
......@@ -193,8 +192,7 @@ if HPUX
AM_LDFLAGS = -lpthread
else
#AM_LDFLAGS = -Wl,-z,defs
# Provide the tcmalloc links if needed
AM_LDFLAGS = $(RUST_LDFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) $(PROFILING_LINKS) $(TCMALLOC_LINK) $(CLANG_LDFLAGS)
AM_LDFLAGS = $(RUST_LDFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) $(PROFILING_LINKS) $(CLANG_LDFLAGS)
endif #end hpux
# https://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html#Updating-version-info
......@@ -355,7 +353,7 @@ bin_PROGRAMS = dbscan \
# based on defines
# ----------------------------------------------------------------------------------------
server_LTLIBRARIES = libsds.la libnunc-stans.la libldaputil.la libslapd.la libns-dshttpd.la
server_LTLIBRARIES = libsds.la libnunc-stans.la libslapd.la libldaputil.la libns-dshttpd.la
lib_LTLIBRARIES = libsvrcore.la
......@@ -939,6 +937,7 @@ include_HEADERS = src/svrcore/src/svrcore.h
#------------------------
dist_man_MANS = man/man1/dbscan.1 \
man/man1/cl-dump.1 \
man/man1/cl-dump.pl.1 \
man/man1/dbgen.pl.1 \
man/man1/ds-logpipe.py.1 \
man/man1/ds-replcheck.1 \
......@@ -951,8 +950,10 @@ dist_man_MANS = man/man1/dbscan.1 \
man/man1/mmldif.1 \
man/man1/pwdhash.1 \
man/man1/repl-monitor.1 \
man/man1/repl-monitor.pl.1 \
man/man1/rsearch.1 \
man/man1/readnsstate.1 \
man/man5/99user.ldif.5 \
man/man8/migrate-ds.pl.8 \
man/man8/ns-slapd.8 \
man/man8/restart-dirsrv.8 \
......@@ -960,23 +961,44 @@ dist_man_MANS = man/man1/dbscan.1 \
man/man8/start-dirsrv.8 \
man/man8/stop-dirsrv.8 \
man/man8/status-dirsrv.8 \
man/man8/bak2db.8 man/man8/bak2db.pl.8 \
man/man8/db2bak.8 man/man8/db2bak.pl.8 \
man/man8/db2ldif.8 man/man8/db2ldif.pl.8 \
man/man8/db2index.8 man/man8/db2index.pl.8 \
man/man8/ldif2db.8 man/man8/ldif2db.pl.8 \
man/man8/dbverify.8 man/man8/verify-db.pl.8 \
man/man8/bak2db.8 \
man/man8/bak2db.pl.8 \
man/man5/certmap.conf.5 \
man/man8/cleanallruv.pl.8 \
man/man8/dbverify.8 \
man/man8/db2bak.8 \
man/man8/db2bak.pl.8 \
man/man8/db2ldif.8 \
man/man8/db2ldif.pl.8 \
man/man8/db2index.8 \
man/man8/db2index.pl.8 \
man/man8/fixup-linkedattrs.pl.8 \
man/man8/fixup-memberof.pl.8 \
man/man8/ldif2db.8 \
man/man8/ldif2db.pl.8 \
man/man8/dbmon.sh.8 \
man/man8/dn2rdn.8 man/man8/ldif2ldap.8 \
man/man8/restoreconfig.8 man/man8/saveconfig.8 \
man/man8/suffix2instance.8 man/man8/monitor.8 \
man/man8/upgradednformat.8 man/man8/vlvindex.8 \
man/man8/cleanallruv.pl.8 man/man8/schema-reload.pl.8 \
man/man8/fixup-linkedattrs.pl.8 man/man8/fixup-memberof.pl.8 \
man/man8/syntax-validate.pl.8 man/man8/usn-tombstone-cleanup.pl.8 \
man/man8/ns-accountstatus.pl.8 man/man8/ns-newpwpolicy.pl.8 \
man/man8/ns-activate.pl.8 man/man8/ns-inactivate.pl.8 \
man/man8/upgradedb.8 man/man8/remove-ds.pl.8
man/man5/dirsrv.5 \
man/man5/dirsrv.systemd.5 \
man/man8/dn2rdn.8 \
man/man8/ldif2ldap.8 \
man/man8/monitor.8 \
man/man8/ns-accountstatus.pl.8 \
man/man8/ns-newpwpolicy.pl.8 \
man/man8/ns-activate.pl.8 \
man/man8/ns-inactivate.pl.8 \
man/man8/remove-ds.pl.8 \
man/man8/restoreconfig.8 \
man/man8/saveconfig.8 \
man/man8/schema-reload.pl.8 \
man/man5/slapd-collations.conf.5 \
man/man8/suffix2instance.8 \
man/man8/syntax-validate.pl.8 \
man/man5/template-initconfig.5 \
man/man8/upgradednformat.8 \
man/man8/upgradedb.8 \
man/man8/usn-tombstone-cleanup.pl.8 \
man/man8/vlvindex.8 \
man/man8/verify-db.pl.8
#------------------------
# updates
......@@ -2352,7 +2374,7 @@ git-archive:
# How will we update this to python 3?
lib389: src/lib389/setup.py
cd $(srcdir)/src/lib389; $(PYTHON) setup.py build
cd $(srcdir)/src/lib389; $(PYTHON) setup.py build ; $(PYTHON) setup.py build_manpages
lib389-install: lib389
cd $(srcdir)/src/lib389; $(PYTHON) setup.py install --skip-build --force
......
......@@ -10,7 +10,7 @@ vendor="389 Project"
# PACKAGE_VERSION is constructed from these
VERSION_MAJOR=1
VERSION_MINOR=4
VERSION_MAINT=0.11
VERSION_MAINT=0.13
# NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
VERSION_PREREL=
VERSION_DATE=$(date -u +%Y%m%d)
......
......@@ -652,7 +652,6 @@ case $host in
platform="linux"
initdir='$(sysconfdir)/rc.d/init.d'
# do arch specific linux stuff here
# TCMalloc is only on i686, x86_64, ppc64 and arm, so we pick that here.
case $host in
i*86-*-linux*)
AC_DEFINE([CPU_x86], [], [cpu type x86])
......@@ -849,7 +848,6 @@ m4_include(m4/selinux.m4)
m4_include(m4/systemd.m4)
m4_include(m4/cmocka.m4)
m4_include(m4/doxygen.m4)
m4_include(m4/tcmalloc.m4)
PACKAGE_BASE_VERSION=`echo $PACKAGE_VERSION | awk -F\. '{print $1"."$2}'`
AC_SUBST(PACKAGE_BASE_VERSION)
......
......@@ -18,4 +18,5 @@ usr/sbin/
usr/share/dirsrv/
usr/share/gdb/auto-load/usr/sbin/ns-slapd-gdb.py
usr/share/man/man1/*.1
usr/share/man/man5/*.5
usr/share/man/man8/*.8
389-ds-base (1.4.0.11-1) UNRELEASED; urgency=medium
389-ds-base (1.4.0.13-1) experimental; urgency=medium
* New upstream release.
- CVE-2018-10850 (Closes: #903501)
* control: Update maintainer address.
* control: Upstream dropped support for non-64bit architectures, so
build only on supported 64bit archs (amd64, arm64, mips64el,
ppc64el, s390x).
* control: Drop 389-ds metapackage, the other 389-ds-* packages are
EOL'd upstream and will be removed soon.
* control: svrcore got merged here, drop it from build-depends.
* ftbs_lsoftotkn3.diff: Dropped, obsolete.
* fix-linking.diff: Fix the order of linking
liblslapd.la/libldaputil.la.
* control: Add rsync to build-depends.
* libs, dev, control: Add libsvrcore files, replace old package.
* base: Add new scripts, add python3-selinux, -semanage, -sepolicy to
......@@ -21,8 +18,13 @@
* Add varions libjs packages to cockpit-389-ds Depends, add the rest
to d/missing-sources.
* copyright: Updated. (Closes: #904760)
* control: Modify 389-ds to depend on cockpit-389-ds and drop the old
GUI packages which are deprecated upstream.
* dont-build-new-manpages.diff: Debian doesn't have argparse-manpage,
so in order to not FTBFS don't build new manpages.
* base.install: Add man5/*.
-- Timo Aaltonen <tjaalton@debian.org> Sat, 02 Jun 2018 09:34:05 +0300
-- Timo Aaltonen <tjaalton@debian.org> Tue, 31 Jul 2018 23:46:17 +0300
389-ds-base (1.3.8.2-1) unstable; urgency=medium
......
......@@ -36,6 +36,20 @@ Vcs-Git: https://salsa.debian.org/freeipa-team/389-ds-base.git
Vcs-Browser: http://salsa.debian.org/freeipa-team/389-ds-base.git
Homepage: http://directory.fedoraproject.org
Package: 389-ds
Architecture: all
Depends:
389-ds-base,
cockpit-389-ds,
${misc:Depends},
Description: 389 Directory Server suite - metapackage
Based on the Lightweight Directory Access Protocol (LDAP), the 389
Directory Server is designed to manage large directories of users and
resources robustly and scalably.
.
This is a metapackage depending on the LDAPv3 server and a Cockpit UI plugin
for administration.
Package: 389-ds-base-libs
Section: libs
Architecture: amd64 arm64 mips64el ppc64el s390x
......
--- a/Makefile.am
+++ b/Makefile.am
@@ -2374,7 +2371,7 @@ git-archive:
# How will we update this to python 3?
lib389: src/lib389/setup.py
- cd $(srcdir)/src/lib389; $(PYTHON) setup.py build ; $(PYTHON) setup.py build_manpages
+ cd $(srcdir)/src/lib389; $(PYTHON) setup.py build ; $(PYTHON) setup.py
lib389-install: lib389
cd $(srcdir)/src/lib389; $(PYTHON) setup.py install --skip-build --force
--- a/src/lib389/setup.py
+++ b/src/lib389/setup.py
@@ -14,7 +14,7 @@
from setuptools import setup, find_packages
from os import path
-from build_manpages import build_manpages
+#from build_manpages import build_manpages
from setuptools.command.build_py import build_py
from setuptools.command.install import install
@@ -66,12 +66,6 @@ setup(
'cli/dscreate',
'cli/dsidm',
]),
- ('/usr/share/man/man8', [
- 'man/dsctl.8',
- 'man/dsconf.8',
- 'man/dscreate.8',
- 'man/dsidm.8',
- ]),
],
install_requires=[
@@ -85,10 +79,4 @@ setup(
'python-ldap',
],
- cmdclass={
- # Dynamically build man pages for cli tools
- 'build_manpages': build_manpages.build_manpages,
- 'build_py': build_manpages.get_build_py_cmd(build_py),
- }
-
)
--- a/Makefile.am
+++ b/Makefile.am
@@ -355,7 +355,7 @@ bin_PROGRAMS = dbscan \
# based on defines
# ----------------------------------------------------------------------------------------
-server_LTLIBRARIES = libsds.la libnunc-stans.la libldaputil.la libslapd.la libns-dshttpd.la
+server_LTLIBRARIES = libsds.la libnunc-stans.la libslapd.la libldaputil.la libns-dshttpd.la
lib_LTLIBRARIES = libsvrcore.la
......@@ -4,5 +4,5 @@ fix-obsolete-target.diff
fix-saslpath.diff
fix-systemctl-path.diff
CVE-2017-15135.patch
fix-linking.diff
use-packaged-js.diff
dont-build-new-manpages.diff
import subprocess
import logging
import pytest
pkgs = ['389-ds-base', 'nss', 'nspr', 'openldap', 'cyrus-sasl']
def get_rpm_version(pkg):
try:
result = subprocess.check_output(['rpm', '-q', '--queryformat',
'%{VERSION}-%{RELEASE}', pkg])
except:
result = b"not installed"
return result.decode('utf-8')
def is_fips():
# Are we running in FIPS mode?
with open('/proc/sys/crypto/fips_enabled', 'r') as f:
return f.readline()
@pytest.fixture(autouse=True)
def _environment(request):
if "_metadata" in dir(request.config):
for pkg in pkgs:
request.config._metadata[pkg] = get_rpm_version(pkg)
request.config._metadata['FIPS'] = is_fips()
def pytest_cmdline_main(config):
logging.basicConfig(level=logging.DEBUG)
def pytest_report_header(config):
header = ""
for pkg in pkgs:
header += pkg + ": " + get_rpm_version(pkg) + "\n"
header += "FIPS: " + is_fips()
return header
@pytest.mark.optionalhook
def pytest_html_results_table_header(cells):
cells.pop()
@pytest.mark.optionalhook
def pytest_html_results_table_row(report, cells):
cells.pop()
......@@ -25,6 +25,52 @@ TEST_SUFFIX1 = "dc=importest1,dc=com"
TEST_BACKEND1 = "importest1"
TEST_SUFFIX2 = "dc=importest2,dc=com"
TEST_BACKEND2 = "importest2"
TEST_DEFAULT_SUFFIX = "dc=default,dc=com"
TEST_DEFAULT_NAME = "default"
def test_import_be_default(topo):
""" Create a backend using the name "default". previously this name was
used int
:id: 8e507beb-e917-4330-8cac-1ff0eee10508
:feature: Import
:setup: Standalone instance
:steps:
1. Create a test suffix using the be name of "default"
2. Create an ldif for the "default" backend
3. Import ldif
4. Verify all entries were imported
:expectedresults:
1. Success
2. Success
3. Success
4. Success
"""
log.info('Adding suffix:{} and backend: {}...'.format(TEST_DEFAULT_SUFFIX,
TEST_DEFAULT_NAME))
backends = Backends(topo.standalone)
backends.create(properties={BACKEND_SUFFIX: TEST_DEFAULT_SUFFIX,
BACKEND_NAME: TEST_DEFAULT_NAME})
log.info('Create LDIF file and import it...')
ldif_dir = topo.standalone.get_ldif_dir()
ldif_file = os.path.join(ldif_dir, 'default.ldif')
dbgen(topo.standalone, 5, ldif_file, TEST_DEFAULT_SUFFIX)
log.info('Stopping the server and running offline import...')
topo.standalone.stop()
assert topo.standalone.ldif2db(TEST_DEFAULT_NAME, None, None,
None, ldif_file)
topo.standalone.start()
log.info('Verifying entry count after import...')
entries = topo.standalone.search_s(TEST_DEFAULT_SUFFIX,
ldap.SCOPE_SUBTREE,
"(objectclass=*)")
assert len(entries) > 1
log.info('Test PASSED')
def test_del_suffix_import(topo):
......
......@@ -13,24 +13,28 @@ import time
import ldap
import subprocess
from random import sample
from lib389.utils import ds_is_older, ensure_list_bytes, ensure_bytes
from lib389.topologies import topology_m1h1c1 as topo
from lib389.utils import ds_is_older, ensure_list_bytes, ensure_bytes, ensure_str
from lib389.topologies import topology_m1h1c1 as topo, topology_st, topology_m2 as topo_m2
from lib389._constants import *
from lib389.plugins import MemberOfPlugin
from lib389 import agreement, Entry
from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES
from lib389.idm.group import Groups, Group
from lib389.topologies import topology_m2 as topo_m2
from lib389.replica import ReplicationManager
from lib389.tasks import *
from lib389.idm.nscontainer import nsContainers
# Skip on older versions
pytestmark = pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented")
USER_CN = 'user_'
GROUP_CN = 'group1'
DEBUGGING = os.getenv('DEBUGGING', False)
SUBTREE_1 = 'cn=sub1,%s' % SUFFIX
SUBTREE_2 = 'cn=sub2,%s' % SUFFIX
DEBUGGING = os.getenv("DEBUGGING", default=False)
if DEBUGGING:
logging.getLogger(__name__).setLevel(logging.DEBUG)
else:
......@@ -387,10 +391,127 @@ def test_memberof_with_changelog_reset(topo_m2):
repl.test_replication_topology(topo_m2)
def add_container(inst, dn, name, sleep=False):
"""Creates container entry"""
conts = nsContainers(inst, dn)
cont = conts.create(properties={'cn': name})
if sleep:
time.sleep(1)
return cont
def add_member(server, cn, subtree):
dn = subtree
users = UserAccounts(server, dn, rdn=None)
users.create(properties={'uid': 'test_%s' % cn,
'cn': "%s" % cn,
'sn': 'SN',
'description': 'member',
'uidNumber': '1000',
'gidNumber': '2000',
'homeDirectory': '/home/testuser'
})
def add_group(server, cn, subtree):
group = Groups(server, subtree, rdn=None)
group.create(properties={'cn': "%s" % cn,
'member': ['uid=test_m1,%s' % SUBTREE_1, 'uid=test_m2,%s' % SUBTREE_1],
'description': 'group'})
def rename_entry(server, cn, from_subtree, to_subtree):
dn = '%s,%s' % (cn, from_subtree)
nrdn = '%s-new' % cn
log.fatal('Renaming user (%s): new %s' % (dn, nrdn))
server.rename_s(dn, nrdn, newsuperior=to_subtree, delold=0)
def _find_memberof(server, user_dn=None, group_dn=None, find_result=True):
assert (server)
assert (user_dn)
assert (group_dn)
ent = server.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof'])
found = False
if ent.hasAttr('memberof'):
for val in ent.getValues('memberof'):
server.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val))
if ensure_str(val) == group_dn:
found = True
break
if find_result:
assert found
else:
assert (not found)
@pytest.mark.ds49161
def test_memberof_group(topology_st):
"""Test memberof does not fail if group is moved into scope
:id: 552850aa-agc3-473e-9d39-aae812b46f11
:setup: Single instance
:steps:
1. Enable memberof plugin and set memberofentryscope
2. Restart the server
3. Add test sub-suffixes
4. Add test users
5. Add test groups
6. Check for memberof attribute added to the test users
7. Rename the group entry
8. Check the new name is reflected in memberof attribute of user
:expectedresults:
1. memberof plugin should be enabled and memberofentryscope should be set
2. Server should be restarted
3. Sub-suffixes should be added
4. Test users should be added
5. Test groups should be added
6. memberof attribute should be present in the test users
7. Group entry should be renamed
8. New group name should be present in memberof attribute of user
"""
inst = topology_st.standalone
log.info('Enable memberof plugin and set the scope as cn=sub1,dc=example,dc=com')
memberof = MemberOfPlugin(inst)
memberof.enable()
memberof.replace('memberOfEntryScope', SUBTREE_1)
inst.restart()
add_container(inst, SUFFIX, 'sub1')
add_container(inst, SUFFIX, 'sub2')
add_member(inst, 'm1', SUBTREE_1)
add_member(inst, 'm2', SUBTREE_1)
add_group(inst, 'g1', SUBTREE_1)
add_group(inst, 'g2', SUBTREE_2)
# _check_memberof
dn1 = '%s,%s' % ('uid=test_m1', SUBTREE_1)
dn2 = '%s,%s' % ('uid=test_m2', SUBTREE_1)
g1 = '%s,%s' % ('cn=g1', SUBTREE_1)
g2 = '%s,%s' % ('cn=g2', SUBTREE_2)
_find_memberof(inst, dn1, g1, True)
_find_memberof(inst, dn2, g1, True)
_find_memberof(inst, dn1, g2, False)
_find_memberof(inst, dn2, g2, False)
rename_entry(inst, 'cn=g2', SUBTREE_2, SUBTREE_1)
g2n = '%s,%s' % ('cn=g2-new', SUBTREE_1)
_find_memberof(inst, dn1, g1, True)
_find_memberof(inst, dn2, g1, True)
_find_memberof(inst, dn1, g2n, True)
_find_memberof(inst, dn2, g2n, True)
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
......@@ -7,10 +7,12 @@
#
import pytest
import time
from lib389._constants import SUFFIX, PASSWORD, DN_DM
from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB
from lib389 import Entry
from lib389.idm.user import UserAccounts
from lib389.utils import ldap, os, logging, ensure_bytes
from lib389.topologies import topology_st as topo
from lib389.topologies import topology_m1 as topo_master
from lib389.idm.organizationalunit import OrganizationalUnits
DEBUGGING = os.getenv("DEBUGGING", default=False)
......
......@@ -396,6 +396,8 @@ class TestTwoMasters:
10. It should pass
"""
pytest.xfail("Issue 49591 - work in progress")
M1 = topology_m2.ms["master1"]
M2 = topology_m2.ms["master2"]
test_users_m1 = UserAccounts(M1, test_base.dn, rdn=None)
......@@ -485,6 +487,8 @@ class TestTwoMasters:
8. It should pass
"""
pytest.xfail("Issue 49591 - work in progress")
M1 = topology_m2.ms["master1"]
M2 = topology_m2.ms["master2"]
repl = ReplicationManager(SUFFIX)
......@@ -574,6 +578,8 @@ class TestTwoMasters:
14. It should pass
"""
pytest.xfail("Issue 49591 - work in progress")
M1 = topology_m2.ms["master1"]
M2 = topology_m2.ms["master2"]
repl = ReplicationManager(SUFFIX)
......@@ -785,6 +791,8 @@ class TestThreeMasters:
8. It should pass
"""
pytest.xfail("Issue 49591 - work in progress")
M1 = topology_m3.ms["master1"]
M2 = topology_m3.ms["master2"]
M3 = topology_m3.ms["master3"]
......
......@@ -13,7 +13,10 @@ from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3
from lib389._constants import *
from . import get_repl_entries
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.agreement import Agreements
from lib389.idm.user import UserAccount
from lib389 import Entry
from lib389.idm.group import Groups, Group
from lib389.replica import Replicas, ReplicationManager
from lib389.changelog import Changelog5
......@@ -32,6 +35,41 @@ else:
logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
def find_start_location(file, no):
log_pattern = re.compile("slapd_daemon - slapd started.")
count = 0
while True:
line = file.readline()
log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line))
found = log_pattern.search(line)
if (found):
count = count + 1
if (count == no):
return file.tell()
if (line == ''):
break
return -1
def pattern_errorlog(file, log_pattern, start_location=0):
count = 0
log.debug("_pattern_errorlog: start from the beginning" )
file.seek(start_location)
# Use a while true iteration because 'for line in file: hit a
# python bug that break file.tell()
while True:
line = file.readline()
log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line))
found = log_pattern.search(line)
if (found):
count = count + 1
if (line == ''):
break
log.debug("_pattern_errorlog: complete (count=%d)" % count)
return count
@pytest.fixture()
def test_entry(topo_m2, request):
......@@ -254,6 +292,161 @@ def test_invalid_agmt(topo_m2):
repl.test_replication(m1, m2)
repl.test_replication(m2, m1)
def test_fetch_bindDnGroup(topo_m2):
"""Check the bindDNGroup is fetched on first replication session
:id: 5f1b1f59-6744-4260-b091-c82d22130025
:setup: 2 Master Instances
:steps:
1. Create a replication bound user and group, but the user *not* member of the group
2. Check that replication is working
3. Some preparation is required because of lib389 magic that already define a replication via group
- define the group as groupDN for replication and 60sec as fetch interval
- pause RA in both direction
- Define the user as bindDn of the RAs
4. restart servers.
It sets the fetch time to 0, so next session will refetch the group
5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time)
6. trigger an update and check replication is working and
there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica'
:expectedresults:
1. Success
2. Success
3. Success
4. Success
5. Success
6. Success
"""
# If you need any test suite initialization,
# please, write additional fixture for that (including finalizer).
# Topology for suites are predefined in lib389/topologies.py.
# If you need host, port or any other data about instance,
# Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid)
M1 = topo_m2.ms['master1']
M2 = topo_m2.ms['master2']
# Enable replication log level. Not really necessary
M1.modify_s('cn=config',[(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
M2.modify_s('cn=config',[(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
# Create a group and a user
PEOPLE = "ou=People,%s" % SUFFIX
PASSWD = 'password'
REPL_MGR_BOUND_DN='repl_mgr_bound_dn'
uid = REPL_MGR_BOUND_DN.encode()
users = UserAccounts(M1, PEOPLE, rdn=None)
user_props = TEST_USER_PROPERTIES.copy()
user_props.update({'uid': uid, 'cn': uid, 'sn': '_%s' % uid, 'userpassword': PASSWD.encode(), 'description': b'value creation'})
test_user = users.create(properties=user_props)
groups_M1 = Groups(M1, DEFAULT_SUFFIX)
group_properties = {
'cn' : 'group1',
'description' : 'testgroup'}
group_M1 = groups_M1.create(properties=group_properties)
group_M2 = Group(M2, group_M1.dn)
assert(not group_M1.is_member(test_user.dn))
# Check that M1 and M2 are in sync
repl = ReplicationManager(DEFAULT_SUFFIX)
repl.wait_for_replication(M1, M2, timeout=20)
# Define the group as the replication manager and fetch interval as 60sec
replicas = Replicas(M1)
replica = replicas.list()[0]
replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)])
replicas = Replicas(M2)
replica = replicas.list()[0]
replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)])
# Then pause the replication agreement to prevent them trying to acquire
# while the user is not member of the group
topo_m2.pause_all_replicas()
# Define the user as the bindDN of the RAs
for inst in (M1, M2):
agmts = Agreements(inst)
agmt = agmts.list()[0]
agmt.replace('nsDS5ReplicaBindDN', test_user.dn.encode())
agmt.replace('nsds5ReplicaCredentials', PASSWD.encode())
# Key step
# The restart will fetch the group/members define in the replica
#
# The user NOT member of the group replication will not work until bindDNcheckInterval
#
# With the fix, the first fetch is not taken into account (fetch time=0)
# so on the first session, the group will be fetched
M1.restart()
M2.restart()
# Replication being broken here we need to directly do the same update.
# Sorry not found another solution except total update
group_M1.add_member(test_user.dn)
group_M2.add_member(test_user.dn)
topo_m2.resume_all_replicas()
# trigger updates to be sure to have a replication session, giving some time
M1.modify_s(test_user.dn,[(ldap.MOD_ADD, 'description', b'value_1_1')])
M2.modify_s(test_user.dn,[(ldap.MOD_ADD, 'description', b'value_2_2')])
time.sleep(10)
# Check replication is working
ents = M1.search_s(test_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
for ent in ents:
assert (ent.hasAttr('description'))
found = 0
for val in ent.getValues('description'):
if (val == b'value_1_1'):
found = found + 1
elif (val == b'value_2_2'):
found = found + 1
assert (found == 2)
ents = M2.search_s(test_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
for ent in ents:
assert (ent.hasAttr('description'))
found = 0
for val in ent.getValues('description'):
if (val == b'value_1_1'):
found = found + 1
elif (val == b'value_2_2'):
found = found + 1
assert (found == 2)
# Check in the logs that the member was detected in the group although
# at startup it was not member of the group
regex = re.compile("does not have permission to supply replication updates to the replica.")
errorlog_M1 = open(M1.errlog, "r")
errorlog_M2 = open(M1.errlog, "r")
# Find the last restart position
restart_location_M1 = find_start_location(errorlog_M1, 2)
assert (restart_location_M1 != -1)
restart_location_M2 = find_start_location(errorlog_M2, 2)
assert (restart_location_M2 != -1)
# Then check there is no failure to authenticate
count = pattern_errorlog(errorlog_M1, regex, start_location=restart_location_M1)
assert(count <= 1)
count = pattern_errorlog(errorlog_M2, regex, start_location=restart_location_M2)
assert(count <=1)
if DEBUGGING:
# Add debugging steps(if any)...
pass
def test_cleanallruv_repl(topo_m3):
"""Test that cleanallruv could not break replication if anchor csn in ruv originated in deleted replica
......@@ -360,6 +553,7 @@ def test_cleanallruv_repl(topo_m3):
assert set(expected_m2_users).issubset(current_m2_users)
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
......
......@@ -54,11 +54,11 @@ def enable_ssl(server, ldapsport, mycert):
(ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'),
(ldap.MOD_REPLACE, 'nsslapd-secureport', ldapsport)])
server.rsa.create(properties={'objectclass': "top nsEncryptionModule".split(),
'cn': 'RSA',
'nsSSLPersonalitySSL': mycert,
'nsSSLToken': 'internal (software)',
'nsSSLActivation': 'on'})
server.rsa.ensure_state(properties={'objectclass': "top nsEncryptionModule".split(),
'cn': 'RSA',
'nsSSLPersonalitySSL': mycert,
'nsSSLToken': 'internal (software)',
'nsSSLActivation': 'on'})
def check_pems(confdir, mycacert, myservercert, myserverkey, notexist):
......@@ -126,6 +126,12 @@ def doAndPrintIt(cmdline):
def create_keys_certs(topology_m2):
log.info("\n######################### Creating SSL Keys and Certs ######################\n")
for inst in topology_m2:
log.info("##### Ensure that nsslapd-extract-pemfiles is 'off' on {}".format(inst.serverid))
inst.config.set('nsslapd-extract-pemfiles', 'off')
log.info("##### restart {}".format(inst.serverid))
inst.restart()
global m1confdir
m1confdir = topology_m2.ms["master1"].confdir
global m2confdir
......
......@@ -15,6 +15,7 @@ from lib389 import Entry
from lib389._constants import *
from lib389.properties import *
from lib389.topologies import topology_st
from lib389.utils import *
log = logging.getLogger(__name__)
......@@ -95,7 +96,7 @@ def test_ticket47560(topology_st):
except ldap.ALREADY_EXISTS:
log.debug("Entry %s already exists" % (member_DN))
replace = [(ldap.MOD_REPLACE, 'memberof', group_DN)]
replace = [(ldap.MOD_REPLACE, 'memberof', ensure_bytes(group_DN))]
topology_st.standalone.modify_s(member_DN, replace)
#
......@@ -111,7 +112,7 @@ def test_ticket47560(topology_st):
assert len(ents) == 1
ent = ents[0]
# print ent
value = ent.getValue('memberof')
value = ensure_str(ent.getValue('memberof'))
# print "memberof: %s" % (value)
assert value == group_DN
......@@ -165,7 +166,7 @@ def test_ticket47560(topology_st):
ent = ents[0]
log.debug("Fixed entry %r\n" % ent)
if ent.getValue('memberof') == group_DN:
if ensure_str(ent.getValue('memberof')) == group_DN:
log.warning("Error the fixupMemberOf did not fix %s" % (member_DN))
result_successful = False
else:
......
......@@ -20,6 +20,7 @@ import pytest
from lib389 import Entry
from lib389._constants import *
from lib389.topologies import topology_m1c1
from lib389.utils import *
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
......@@ -68,7 +69,7 @@ def _oc_definition(oid_ext, name, must=None, may=None):
may = MAY_OLD
new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
return new_oc
return ensure_bytes(new_oc)
def add_OC(instance, oid_ext, name):
......@@ -92,7 +93,7 @@ def trigger_schema_push(topology_m1c1):
trigger_schema_push.value += 1
except AttributeError:
trigger_schema_push.value = 1
replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_schema_push.value))]
replace = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(str(trigger_schema_push.value)))]
topology_m1c1.ms["master1"].modify_s(ENTRY_DN, replace)
# wait 10 seconds that the update is replicated
......
......@@ -28,7 +28,7 @@ def test_ticket47640(topology_st):
# Enable Dynamic plugins, and the linked Attrs plugin
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')])
except ldap.LDAPError as e:
log.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
assert False
......
......@@ -56,7 +56,7 @@ def _oc_definition(oid_ext, name, must=None, may=None):
may = MAY
new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
return new_oc
return ensure_bytes(new_oc)
def test_ticket47653_init(topology_m2):
......@@ -82,7 +82,7 @@ def test_ticket47653_init(topology_m2):
if DEBUGGING:
# enable acl error logging
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))] # ACL + REPL
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(128 + 8192)))] # ACL + REPL
topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
......@@ -159,7 +159,7 @@ def test_ticket47653_add(topology_m2):
ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)"
ACI_SUBJECT = " userattr = \"member#selfDN\";)"
ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))]
topology_m2.ms["master1"].modify_s(SUFFIX, mod)
time.sleep(1)
......@@ -215,7 +215,7 @@ def test_ticket47653_add(topology_m2):
# Now update the entry on Master2 (as DM because 47653 is possibly not fixed on M2)
topology_m2.ms["master1"].log.info("Update %s on M2" % ENTRY_DN)
mod = [(ldap.MOD_REPLACE, 'description', 'test_add')]
mod = [(ldap.MOD_REPLACE, 'description', b'test_add')]
topology_m2.ms["master2"].modify_s(ENTRY_DN, mod)
time.sleep(1)
......@@ -224,13 +224,13 @@ def test_ticket47653_add(topology_m2):
while loop <= 10:
try:
ent = topology_m2.ms["master1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'):
if ent.hasAttr('description') and (ensure_str(ent.getValue('description')) == 'test_add'):
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
loop += 1
assert ent.getValue('description') == 'test_add'
assert ensure_str(ent.getValue('description')) == 'test_add'
def test_ticket47653_modify(topology_m2):
......@@ -252,7 +252,7 @@ def test_ticket47653_modify(topology_m2):
# entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS
try:
topology_m2.ms["master1"].log.info("Try to modify %s (aci is missing)" % ENTRY_DN)
mod = [(ldap.MOD_REPLACE, 'postalCode', '9876')]
mod = [(ldap.MOD_REPLACE, 'postalCode', b'9876')]
topology_m2.ms["master1"].modify_s(ENTRY_DN, mod)
except Exception as e:
topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
......@@ -268,7 +268,7 @@ def test_ticket47653_modify(topology_m2):
ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)"
ACI_SUBJECT = " userattr = \"member#selfDN\";)"
ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))]
topology_m2.ms["master1"].modify_s(SUFFIX, mod)
time.sleep(2)
......@@ -279,7 +279,7 @@ def test_ticket47653_modify(topology_m2):
# modify the entry and checks the value
topology_m2.ms["master1"].log.info("M1: Try to modify %s. It should succeeds" % ENTRY_DN)
mod = [(ldap.MOD_REPLACE, 'postalCode', '1928')]
mod = [(ldap.MOD_REPLACE, 'postalCode', b'1928')]
topology_m2.ms["master1"].modify_s(ENTRY_DN, mod)
topology_m2.ms["master1"].log.info("M1: Bind as %s" % DN_DM)
......@@ -288,7 +288,7 @@ def test_ticket47653_modify(topology_m2):
topology_m2.ms["master1"].log.info("M1: Check the update of %s" % ENTRY_DN)
ents = topology_m2.ms["master1"].search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
assert len(ents) == 1
assert ents[0].postalCode == '1928'
assert ensure_str(ents[0].postalCode) == '1928'
# Now check the update has been replicated on M2
topology_m2.ms["master1"].log.info("M2: Bind as %s" % DN_DM)
......@@ -298,13 +298,13 @@ def test_ticket47653_modify(topology_m2):
while loop <= 10:
try:
ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('postalCode') and (ent.getValue('postalCode') == '1928'):
if ent.hasAttr('postalCode') and (ensure_str(ent.getValue('postalCode')) == '1928'):
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
loop += 1
assert loop <= 10
assert ent.getValue('postalCode') == '1928'
assert ensure_str(ent.getValue('postalCode')) == '1928'
# Now update the entry on Master2 bound as BIND_DN (update may fail if 47653 is not fixed on M2)
topology_m2.ms["master1"].log.info("M2: Update %s (bound as %s)" % (ENTRY_DN, BIND_DN))
......@@ -312,7 +312,7 @@ def test_ticket47653_modify(topology_m2):
time.sleep(1)
fail = False
try:
mod = [(ldap.MOD_REPLACE, 'postalCode', '1929')]
mod = [(ldap.MOD_REPLACE, 'postalCode', b'1929')]
topology_m2.ms["master2"].modify_s(ENTRY_DN, mod)
fail = False
except ldap.INSUFFICIENT_ACCESS:
......@@ -332,12 +332,12 @@ def test_ticket47653_modify(topology_m2):
while loop <= 10:
try:
ent = topology_m2.ms["master1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('postalCode') and (ent.getValue('postalCode') == '1929'):
if ent.hasAttr('postalCode') and (ensure_str(ent.getValue('postalCode')) == '1929'):
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
loop += 1
assert ent.getValue('postalCode') == '1929'
assert ensure_str(ent.getValue('postalCode')) == '1929'
if __name__ == '__main__':
......