Skip to content

Commit 77804b1

Browse files
committed
PYTHON-1422 Don't update a closed topology
1 parent 585d0fb commit 77804b1

File tree

3 files changed

+65
-33
lines changed

3 files changed

+65
-33
lines changed

pymongo/topology.py

Lines changed: 35 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -246,38 +246,49 @@ def select_server_by_address(self, address,
246246
server_selection_timeout,
247247
address)
248248

249+
def _process_change(self, server_description):
250+
"""Process a new ServerDescription on an opened topology.
251+
252+
Hold the lock when calling this.
253+
"""
254+
td_old = self._description
255+
if self._publish_server:
256+
old_server_description = td_old._server_descriptions[
257+
server_description.address]
258+
self._events.put((
259+
self._listeners.publish_server_description_changed,
260+
(old_server_description, server_description,
261+
server_description.address, self._topology_id)))
262+
263+
self._description = updated_topology_description(
264+
self._description, server_description)
265+
266+
self._update_servers()
267+
self._receive_cluster_time_no_lock(server_description.cluster_time)
268+
269+
if self._publish_tp:
270+
self._events.put((
271+
self._listeners.publish_topology_description_changed,
272+
(td_old, self._description, self._topology_id)))
273+
274+
# Wake waiters in select_servers().
275+
self._condition.notify_all()
276+
249277
def on_change(self, server_description):
250278
"""Process a new ServerDescription after an ismaster call completes."""
251279
# We do no I/O holding the lock.
252280
with self._lock:
281+
# Monitors may continue working on ismaster calls for some time
282+
# after a call to Topology.close, so this method may be called at
283+
# any time. Ensure the topology is open before processing the
284+
# change.
253285
# Any monitored server was definitely in the topology description
254286
# once. Check if it's still in the description or if some state-
255287
# change removed it. E.g., we got a host list from the primary
256288
# that didn't include this server.
257-
if self._description.has_server(server_description.address):
258-
td_old = self._description
259-
if self._publish_server:
260-
old_server_description = td_old._server_descriptions[
261-
server_description.address]
262-
self._events.put((
263-
self._listeners.publish_server_description_changed,
264-
(old_server_description, server_description,
265-
server_description.address, self._topology_id)))
266-
267-
self._description = updated_topology_description(
268-
self._description, server_description)
269-
270-
self._update_servers()
271-
self._receive_cluster_time_no_lock(
272-
server_description.cluster_time)
273-
274-
if self._publish_tp:
275-
self._events.put((
276-
self._listeners.publish_topology_description_changed,
277-
(td_old, self._description, self._topology_id)))
278-
279-
# Wake waiters in select_servers().
280-
self._condition.notify_all()
289+
if (self._opened and
290+
self._description.has_server(server_description.address)):
291+
self._process_change(server_description)
281292

282293
def get_server_by_address(self, address):
283294
"""Get a Server or None.

test/test_topology.py

Lines changed: 28 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -75,15 +75,16 @@ class MockMonitor(object):
7575
def __init__(self, server_description, topology, pool, topology_settings):
7676
self._server_description = server_description
7777
self._topology = topology
78+
self.opened = False
7879

7980
def open(self):
80-
pass
81+
self.opened = True
8182

8283
def request_check(self):
8384
pass
8485

8586
def close(self):
86-
pass
87+
self.opened = False
8788

8889

8990
class SetNameDiscoverySettings(TopologySettings):
@@ -122,9 +123,16 @@ def disconnected(topology, server_address):
122123
topology.on_change(ServerDescription(server_address))
123124

124125

126+
def get_server(topology, hostname):
127+
return topology.get_server_by_address((hostname, 27017))
128+
129+
125130
def get_type(topology, hostname):
126-
description = topology.get_server_by_address((hostname, 27017)).description
127-
return description.server_type
131+
return get_server(topology, hostname).description.server_type
132+
133+
134+
def get_monitor(topology, hostname):
135+
return get_server(topology, hostname)._monitor
128136

129137

130138
class TopologyTest(unittest.TestCase):
@@ -398,26 +406,37 @@ def test_close(self):
398406

399407
self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a'))
400408
self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b'))
409+
self.assertTrue(get_monitor(t, 'a').opened)
410+
self.assertTrue(get_monitor(t, 'b').opened)
401411
self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary,
402412
t.description.topology_type)
403413

404414
t.close()
405415
self.assertEqual(2, len(t.description.server_descriptions()))
406416
self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a'))
407417
self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b'))
418+
self.assertFalse(get_monitor(t, 'a').opened)
419+
self.assertFalse(get_monitor(t, 'b').opened)
408420
self.assertEqual('rs', t.description.replica_set_name)
409421
self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary,
410422
t.description.topology_type)
411423

424+
# A closed topology should not be updated when receiving an isMaster.
412425
got_ismaster(t, ('a', 27017), {
413426
'ok': 1,
414427
'ismaster': True,
415428
'setName': 'rs',
416-
'hosts': ['a', 'b']})
429+
'hosts': ['a', 'b', 'c']})
417430

418-
self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a'))
431+
self.assertEqual(2, len(t.description.server_descriptions()))
432+
self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a'))
419433
self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b'))
420-
self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary,
434+
self.assertFalse(get_monitor(t, 'a').opened)
435+
self.assertFalse(get_monitor(t, 'b').opened)
436+
# Server c should not have been added.
437+
self.assertEqual(None, get_server(t, 'c'))
438+
self.assertEqual('rs', t.description.replica_set_name)
439+
self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary,
421440
t.description.topology_type)
422441

423442
def test_reset_server(self):
@@ -480,7 +499,7 @@ def test_discover_set_name_from_primary(self):
480499
self.assertEqual(t.description.replica_set_name, None)
481500
self.assertEqual(t.description.topology_type,
482501
TOPOLOGY_TYPE.ReplicaSetNoPrimary)
483-
502+
t.open()
484503
got_ismaster(t, address, {
485504
'ok': 1,
486505
'ismaster': True,
@@ -516,7 +535,7 @@ def test_discover_set_name_from_secondary(self):
516535
self.assertEqual(t.description.replica_set_name, None)
517536
self.assertEqual(t.description.topology_type,
518537
TOPOLOGY_TYPE.ReplicaSetNoPrimary)
519-
538+
t.open()
520539
got_ismaster(t, address, {
521540
'ok': 1,
522541
'ismaster': False,

test/utils_selection_tests.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -159,12 +159,14 @@ def run_scenario(self):
159159
# the set of servers matching both the ReadPreference's mode
160160
# and tag sets.
161161
top_latency = Topology(TopologySettings(**settings))
162+
top_latency.open()
162163

163164
# "In latency window" is defined in the server selection
164165
# spec as the subset of suitable_servers that falls within the
165166
# allowable latency window.
166167
settings['local_threshold_ms'] = 1000000
167168
top_suitable = Topology(TopologySettings(**settings))
169+
top_suitable.open()
168170

169171
# Update topologies with server descriptions.
170172
for server in scenario_def['topology_description']['servers']:

0 commit comments

Comments
 (0)