From 4653f6561cf4a2f85818e6383a48e7bc04979582 Mon Sep 17 00:00:00 2001 From: Garming Sam Date: Wed, 14 Feb 2018 13:26:35 +1300 Subject: [PATCH 1/7] tests/replica_sync: Add some additional replication in setUp This should avoid some failures due to stale objects. Bug: https://bugzilla.samba.org/show_bug.cgi?id=13269 Signed-off-by: Garming Sam Reviewed-by: Andrew Bartlett (cherry picked from commit 19fcd872ec76afffbc4952266fdfad9a352c4871) --- source4/torture/drs/python/replica_sync.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source4/torture/drs/python/replica_sync.py b/source4/torture/drs/python/replica_sync.py index 93407df3962..927a085c1a6 100644 --- a/source4/torture/drs/python/replica_sync.py +++ b/source4/torture/drs/python/replica_sync.py @@ -42,6 +42,8 @@ from ldb import ( def setUp(self): super(DrsReplicaSyncTestCase, self).setUp() + self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True) + self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True) self.ou1 = None self.ou2 = None -- 2.13.6 From cc9378faa30e581dc6a4cbe83b5f83346690e84f Mon Sep 17 00:00:00 2001 From: Garming Sam Date: Wed, 14 Feb 2018 13:27:59 +1300 Subject: [PATCH 2/7] tests/drs_base: Allow the net drs replicate to try with a single object This eventually passes down the replicate single object exop. BUG: https://bugzilla.samba.org/show_bug.cgi?id=13269 Signed-off-by: Garming Sam Reviewed-by: Andrew Bartlett (cherry picked from commit ff9e63f976ef76f7f70221d4f6276e221ecd167f) --- source4/torture/drs/python/drs_base.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source4/torture/drs/python/drs_base.py b/source4/torture/drs/python/drs_base.py index 10f2e63ae3b..66a0d8d6d77 100644 --- a/source4/torture/drs/python/drs_base.py +++ b/source4/torture/drs/python/drs_base.py @@ -120,7 +120,8 @@ from ldb import ( # bin/samba-tool drs return ["drs", drs_command, cmdline_auth] - def _net_drs_replicate(self, DC, fromDC, nc_dn=None, forced=True, local=False, full_sync=False): + def _net_drs_replicate(self, DC, fromDC, nc_dn=None, forced=True, + local=False, full_sync=False, single=False): if nc_dn is None: nc_dn = self.domain_dn # make base command line @@ -134,6 +135,8 @@ from ldb import ( samba_tool_cmdline += ["--local"] if full_sync: samba_tool_cmdline += ["--full-sync"] + if single: + samba_tool_cmdline += ["--single-object"] (result, out, err) = self.runsubcmd(*samba_tool_cmdline) self.assertCmdSuccess(result, out, err) -- 2.13.6 From a92e3fa4460ed2b574fe1e2d716f9bcdec4f7933 Mon Sep 17 00:00:00 2001 From: Garming Sam Date: Wed, 14 Feb 2018 13:27:27 +1300 Subject: [PATCH 3/7] selftest: Add RODC variables to list of those exported BUG: https://bugzilla.samba.org/show_bug.cgi?id=13269 Signed-off-by: Garming Sam Reviewed-by: Andrew Bartlett (cherry picked from commit e694b8a1b993bf7213b191e1132c5d02e16ab85d) --- selftest/selftest.pl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/selftest/selftest.pl b/selftest/selftest.pl index ff19d5975cc..0e56e6a13ef 100755 --- a/selftest/selftest.pl +++ b/selftest/selftest.pl @@ -835,6 +835,12 @@ my @exported_envvars = ( "VAMPIRE_DC_NETBIOSNAME", "VAMPIRE_DC_NETBIOSALIAS", + # domain controller stuff for RODC + "RODC_DC_SERVER", + "RODC_DC_SERVER_IP", + "RODC_DC_SERVER_IPV6", + "RODC_DC_NETBIOSNAME", + # domain controller stuff for FL 2000 Vampired DC "VAMPIRE_2000_DC_SERVER", "VAMPIRE_2000_DC_SERVER_IP", -- 2.13.6 From f8977ee168526b7a24cbc8b768c9d235cd884df5 Mon Sep 17 00:00:00 2001 From: Garming Sam Date: Wed, 14 Feb 2018 13:30:26 +1300 Subject: [PATCH 4/7] tests/replica_sync_rodc: Test conflict handling on an RODC There are two cases we are interested in: 1) RODC receives two identical DNs which conflict 2) RODC receives a rename to a DN which already exists Currently these issues are ignored, but the UDV and HWM are being updated, leading to objects/updates being skipped. BUG: https://bugzilla.samba.org/show_bug.cgi?id=13269 Signed-off-by: Garming Sam Reviewed-by: Andrew Bartlett (cherry picked from commit 45d19167d52e42bd2f9369dbe37a233902cc81b0) --- selftest/knownfail.d/rodc_repl | 2 + source4/selftest/tests.py | 6 + source4/torture/drs/python/replica_sync_rodc.py | 156 ++++++++++++++++++++++++ 3 files changed, 164 insertions(+) create mode 100644 selftest/knownfail.d/rodc_repl create mode 100644 source4/torture/drs/python/replica_sync_rodc.py diff --git a/selftest/knownfail.d/rodc_repl b/selftest/knownfail.d/rodc_repl new file mode 100644 index 00000000000..f6313c18980 --- /dev/null +++ b/selftest/knownfail.d/rodc_repl @@ -0,0 +1,2 @@ +^samba4.drs.replica_sync_rodc.python.rodc..replica_sync_rodc.DrsReplicaSyncTestCase.test_ReplConflictsRODC.rodc +^samba4.drs.replica_sync_rodc.python.rodc..replica_sync_rodc.DrsReplicaSyncTestCase.test_ReplConflictsRODCRename.rodc diff --git a/source4/selftest/tests.py b/source4/selftest/tests.py index f5ff906a4bc..621a61347bc 100755 --- a/source4/selftest/tests.py +++ b/source4/selftest/tests.py @@ -769,6 +769,12 @@ plantestsuite_loadlist("samba4.ldap.rodc_rwdc.python(rodc)", "rodc:local", '$SERVER', '$DC_SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN', '$LOADLIST', '$LISTOPT']) +planoldpythontestsuite("rodc:local", "replica_sync_rodc", + extra_path=[os.path.join(samba4srcdir, 'torture/drs/python')], + name="samba4.drs.replica_sync_rodc.python(rodc)", + environ={'DC1': '$DC_SERVER', 'DC2': '$RODC_DC_SERVER'}, + extra_args=['-U$DOMAIN/$DC_USERNAME%$DC_PASSWORD']) + for env in ["ad_dc_ntvfs", "fl2000dc", "fl2003dc", "fl2008r2dc"]: plantestsuite_loadlist("samba4.ldap_schema.python(%s)" % env, env, [python, os.path.join(samba4srcdir, "dsdb/tests/python/ldap_schema.py"), '$SERVER', '-U"$USERNAME%$PASSWORD"', '--workgroup=$DOMAIN', '$LOADLIST', '$LISTOPT']) plantestsuite("samba4.ldap.possibleInferiors.python(%s)" % env, env, [python, os.path.join(samba4srcdir, "dsdb/samdb/ldb_modules/tests/possibleinferiors.py"), "ldap://$SERVER", '-U"$USERNAME%$PASSWORD"', "-W$DOMAIN"]) diff --git a/source4/torture/drs/python/replica_sync_rodc.py b/source4/torture/drs/python/replica_sync_rodc.py new file mode 100644 index 00000000000..907cef49792 --- /dev/null +++ b/source4/torture/drs/python/replica_sync_rodc.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Test conflict scenarios on the RODC +# +# Copyright (C) Kamen Mazdrashki 2011 +# Copyright (C) Catalyst.NET Ltd 2018 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +# +# Usage: +# export DC1=dc1_dns_name +# export DC2=dc2_dns_name (RODC) +# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun +# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN replica_sync_rodc -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD" +# + +import drs_base +import samba.tests +import time +import ldb + +from ldb import ( + SCOPE_BASE, LdbError, ERR_NO_SUCH_OBJECT) + +class DrsReplicaSyncTestCase(drs_base.DrsBaseTestCase): + """Intended as a black box test case for DsReplicaSync + implementation. It should test the behavior of this + case in cases when inbound replication is disabled""" + + def setUp(self): + super(DrsReplicaSyncTestCase, self).setUp() + self._disable_inbound_repl(self.dnsname_dc1) + self._disable_all_repl(self.dnsname_dc1) + self.ou1 = None + self.ou2 = None + + def tearDown(self): + # re-enable replication + self._enable_inbound_repl(self.dnsname_dc1) + self._enable_all_repl(self.dnsname_dc1) + + super(DrsReplicaSyncTestCase, self).tearDown() + + def _create_ou(self, samdb, name): + ldif = """ +dn: %s,%s +objectClass: organizationalUnit +""" % (name, self.domain_dn) + samdb.add_ldif(ldif) + res = samdb.search(base="%s,%s" % (name, self.domain_dn), + scope=SCOPE_BASE, attrs=["objectGUID"]) + return self._GUID_string(res[0]["objectGUID"][0]) + + def _check_deleted(self, sam_ldb, guid): + # search the user by guid as it may be deleted + res = sam_ldb.search(base='' % guid, + controls=["show_deleted:1"], + attrs=["isDeleted", "objectCategory", "ou"]) + self.assertEquals(len(res), 1) + ou_cur = res[0] + # Deleted Object base DN + dodn = self._deleted_objects_dn(sam_ldb) + # now check properties of the user + name_cur = ou_cur["ou"][0] + self.assertEquals(ou_cur["isDeleted"][0],"TRUE") + self.assertTrue(not("objectCategory" in ou_cur)) + self.assertTrue(dodn in str(ou_cur["dn"]), + "OU %s is deleted but it is not located under %s!" % (name_cur, dodn)) + + + def test_ReplConflictsRODC(self): + """Tests that objects created in conflict become conflict DNs""" + # Replicate all objects to RODC beforehand + self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True) + + # Create conflicting objects on DC1 and DC2, with DC1 object created first + name = "OU=Test RODC Conflict" + self.ou1 = self._create_ou(self.ldb_dc1, name) + + # Replicate single object + self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, + nc_dn="%s,%s" % (name, self.domain_dn), + local=True, single=True, forced=True) + + # Delete the object, so another can be added + self.ldb_dc1.delete('' % self.ou1) + + # Create a conflicting DN as it would appear to the RODC + self.ou2 = self._create_ou(self.ldb_dc1, name) + + try: + self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, + nc_dn="%s,%s" % (name, self.domain_dn), + local=True, single=True, forced=True) + except: + # Cleanup the object + self.ldb_dc1.delete('' % self.ou2) + return + + # Replicate cannot succeed, HWM would be updated incorrectly. + self.fail("DRS replicate should have failed.") + + def test_ReplConflictsRODCRename(self): + """Tests that objects created in conflict become conflict DNs""" + # Replicate all objects to RODC beforehand + self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True) + + # Create conflicting objects on DC1 and DC2, with DC1 object created first + name = "OU=Test RODC Rename Conflict" + self.ou1 = self._create_ou(self.ldb_dc1, name) + + # Replicate single object + self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, + nc_dn="%s,%s" % (name, self.domain_dn), + local=True, single=True, forced=True) + + # Create a non-conflicting DN to rename as conflicting + free_name = "OU=Test RODC Rename No Conflict" + self.ou2 = self._create_ou(self.ldb_dc1, free_name) + + self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, + nc_dn="%s,%s" % (free_name, self.domain_dn), + local=True, single=True, forced=True) + + # Delete the object, so we can rename freely + # DO NOT REPLICATE TO THE RODC + self.ldb_dc1.delete('' % self.ou1) + + # Collide the name from the RODC perspective + self.ldb_dc1.rename("" % self.ou2, "%s,%s" % (name, self.domain_dn)) + + try: + self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, + nc_dn="%s,%s" % (name, self.domain_dn), + local=True, single=True, forced=True) + except: + # Cleanup the object + self.ldb_dc1.delete('' % self.ou2) + return + + # Replicate cannot succeed, HWM would be updated incorrectly. + self.fail("DRS replicate should have failed.") -- 2.13.6 From 276800104955c1f2f872afa6f719590026337fbc Mon Sep 17 00:00:00 2001 From: Garming Sam Date: Wed, 14 Feb 2018 13:32:24 +1300 Subject: [PATCH 5/7] repl_metadata: Avoid silent skipping an object during DRS (due to RODC name collisions) No error code was being set in this case, and so, we would commit the HWM and UDV without actually having all the updates. BUG: https://bugzilla.samba.org/show_bug.cgi?id=13269 Signed-off-by: Garming Sam Reviewed-by: Andrew Bartlett (cherry picked from commit 59fa9e7ecf84bd4c2469e9a6835855769c4f6287) --- selftest/knownfail.d/rodc_repl | 1 - source4/dsdb/samdb/ldb_modules/repl_meta_data.c | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/selftest/knownfail.d/rodc_repl b/selftest/knownfail.d/rodc_repl index f6313c18980..bd5c04ea2cc 100644 --- a/selftest/knownfail.d/rodc_repl +++ b/selftest/knownfail.d/rodc_repl @@ -1,2 +1 @@ -^samba4.drs.replica_sync_rodc.python.rodc..replica_sync_rodc.DrsReplicaSyncTestCase.test_ReplConflictsRODC.rodc ^samba4.drs.replica_sync_rodc.python.rodc..replica_sync_rodc.DrsReplicaSyncTestCase.test_ReplConflictsRODCRename.rodc diff --git a/source4/dsdb/samdb/ldb_modules/repl_meta_data.c b/source4/dsdb/samdb/ldb_modules/repl_meta_data.c index 7646f942fca..84d898af483 100644 --- a/source4/dsdb/samdb/ldb_modules/repl_meta_data.c +++ b/source4/dsdb/samdb/ldb_modules/repl_meta_data.c @@ -4932,6 +4932,7 @@ static int replmd_op_possible_conflict_callback(struct ldb_request *req, struct "Conflict adding object '%s' from incoming replication as we are read only for the partition. \n" " - We must fail the operation until a master for this partition resolves the conflict", ldb_dn_get_linearized(conflict_dn)); + ret = LDB_ERR_OPERATIONS_ERROR; goto failed; } -- 2.13.6 From d6ffaf23c682ca8d9e7c8000f1108cfd342de8a8 Mon Sep 17 00:00:00 2001 From: Garming Sam Date: Wed, 14 Feb 2018 13:32:33 +1300 Subject: [PATCH 6/7] repl_metadata: Avoid silent skipping an object during DRS (due to RODC rename collisions) No error code was being set in this case, and so, we would commit the HWM and UDV without actually having all the updates. BUG: https://bugzilla.samba.org/show_bug.cgi?id=13269 Signed-off-by: Garming Sam Reviewed-by: Andrew Bartlett Autobuild-User(master): Garming Sam Autobuild-Date(master): Thu Feb 15 10:18:42 CET 2018 on sn-devel-144 (cherry picked from commit 9952eda7a1923971f77f3183cfa4c505386b30ee) --- selftest/knownfail.d/rodc_repl | 1 - source4/dsdb/samdb/ldb_modules/repl_meta_data.c | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 selftest/knownfail.d/rodc_repl diff --git a/selftest/knownfail.d/rodc_repl b/selftest/knownfail.d/rodc_repl deleted file mode 100644 index bd5c04ea2cc..00000000000 --- a/selftest/knownfail.d/rodc_repl +++ /dev/null @@ -1 +0,0 @@ -^samba4.drs.replica_sync_rodc.python.rodc..replica_sync_rodc.DrsReplicaSyncTestCase.test_ReplConflictsRODCRename.rodc diff --git a/source4/dsdb/samdb/ldb_modules/repl_meta_data.c b/source4/dsdb/samdb/ldb_modules/repl_meta_data.c index 84d898af483..ead0bd9235b 100644 --- a/source4/dsdb/samdb/ldb_modules/repl_meta_data.c +++ b/source4/dsdb/samdb/ldb_modules/repl_meta_data.c @@ -5571,6 +5571,7 @@ static int replmd_replicated_handle_rename(struct replmd_replicated_request *ar, "Conflict adding object '%s' from incoming replication but we are read only for the partition. \n" " - We must fail the operation until a master for this partition resolves the conflict", ldb_dn_get_linearized(conflict_dn)); + ret = LDB_ERR_OPERATIONS_ERROR; goto failed; } -- 2.13.6 From 97662a238fe7766242c06ad6b23c53c44cb0dfd7 Mon Sep 17 00:00:00 2001 From: Douglas Bagnall Date: Wed, 14 Feb 2018 17:15:07 +1300 Subject: [PATCH 7/7] repl_md: avoid returning LDB_SUCCESS on failure BUG: https://bugzilla.samba.org/show_bug.cgi?id=13269 Signed-off-by: Douglas Bagnall Signed-off-by: Garming Sam Reviewed-by: Andrew Bartlett (cherry picked from commit bc56913271e9d3a30143ef5a45d32430766d9dc3) --- source4/dsdb/samdb/ldb_modules/repl_meta_data.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/source4/dsdb/samdb/ldb_modules/repl_meta_data.c b/source4/dsdb/samdb/ldb_modules/repl_meta_data.c index ead0bd9235b..62f58addfde 100644 --- a/source4/dsdb/samdb/ldb_modules/repl_meta_data.c +++ b/source4/dsdb/samdb/ldb_modules/repl_meta_data.c @@ -5109,6 +5109,9 @@ static int replmd_op_possible_conflict_callback(struct ldb_request *req, struct * replication will stop with an error, but there is not much * else we can do. */ + if (ret == LDB_SUCCESS) { + ret = LDB_ERR_OPERATIONS_ERROR; + } return ldb_module_done(ar->req, NULL, NULL, ret); } @@ -5719,8 +5722,10 @@ static int replmd_replicated_handle_rename(struct replmd_replicated_request *ar, ldb_errstring(ldb_module_get_ctx(ar->module)))); goto failed; } -failed: + talloc_free(tmp_ctx); + return ret; +failed: /* * On failure make the caller get the error * This means replication will stop with an error, @@ -5728,6 +5733,9 @@ static int replmd_replicated_handle_rename(struct replmd_replicated_request *ar, * LDB_ERR_ENTRY_ALREADY_EXISTS case this is exactly what is * needed. */ + if (ret == LDB_SUCCESS) { + ret = LDB_ERR_OPERATIONS_ERROR; + } talloc_free(tmp_ctx); return ret; -- 2.13.6