@@ -81,7 +81,8 @@ def __init__(self, client, subscription, metrics, metric_group_prefix,
81
81
assert self .config ['assignors' ], 'Coordinator requires assignors'
82
82
83
83
self ._subscription = subscription
84
- self ._partitions_per_topic = {}
84
+ self ._metadata_snapshot = {}
85
+ self ._assignment_snapshot = None
85
86
self ._cluster = client .cluster
86
87
self ._cluster .request_update ()
87
88
self ._cluster .add_listener (WeakMethod (self ._handle_metadata_update ))
@@ -146,7 +147,7 @@ def _handle_metadata_update(self, cluster):
146
147
147
148
# check if there are any changes to the metadata which should trigger
148
149
# a rebalance
149
- if self ._subscription_metadata_changed ():
150
+ if self ._subscription_metadata_changed (cluster ):
150
151
151
152
if (self .config ['api_version' ] >= (0 , 9 )
152
153
and self .config ['group_id' ] is not None ):
@@ -159,20 +160,20 @@ def _handle_metadata_update(self, cluster):
159
160
self ._subscription .assign_from_subscribed ([
160
161
TopicPartition (topic , partition )
161
162
for topic in self ._subscription .subscription
162
- for partition in self ._partitions_per_topic [topic ]
163
+ for partition in self ._metadata_snapshot [topic ]
163
164
])
164
165
165
- def _subscription_metadata_changed (self ):
166
+ def _subscription_metadata_changed (self , cluster ):
166
167
if not self ._subscription .partitions_auto_assigned ():
167
168
return False
168
169
169
- old_partitions_per_topic = self ._partitions_per_topic
170
- self ._partitions_per_topic = {}
170
+ metadata_snapshot = {}
171
171
for topic in self ._subscription .group_subscription ():
172
- partitions = self . _cluster .partitions_for_topic (topic ) or []
173
- self . _partitions_per_topic [topic ] = set (partitions )
172
+ partitions = cluster .partitions_for_topic (topic ) or []
173
+ metadata_snapshot [topic ] = set (partitions )
174
174
175
- if self ._partitions_per_topic != old_partitions_per_topic :
175
+ if self ._metadata_snapshot != metadata_snapshot :
176
+ self ._metadata_snapshot = metadata_snapshot
176
177
return True
177
178
return False
178
179
@@ -184,8 +185,15 @@ def _lookup_assignor(self, name):
184
185
185
186
def _on_join_complete (self , generation , member_id , protocol ,
186
187
member_assignment_bytes ):
188
+ # if we were the assignor, then we need to make sure that there have
189
+ # been no metadata updates since the rebalance begin. Otherwise, we
190
+ # won't rebalance again until the next metadata change
191
+ if self ._assignment_snapshot and self ._assignment_snapshot != self ._metadata_snapshot :
192
+ self ._subscription .mark_for_reassignment ()
193
+ return
194
+
187
195
assignor = self ._lookup_assignor (protocol )
188
- assert assignor , 'invalid assignment protocol: %s' % protocol
196
+ assert assignor , 'Coordinator selected invalid assignment protocol: %s' % protocol
189
197
190
198
assignment = ConsumerProtocol .ASSIGNMENT .decode (member_assignment_bytes )
191
199
@@ -235,6 +243,11 @@ def _perform_assignment(self, leader_id, assignment_strategy, members):
235
243
self ._subscription .group_subscribe (all_subscribed_topics )
236
244
self ._client .set_topics (self ._subscription .group_subscription ())
237
245
246
+ # keep track of the metadata used for assignment so that we can check
247
+ # after rebalance completion whether anything has changed
248
+ self ._cluster .request_update ()
249
+ self ._assignment_snapshot = self ._metadata_snapshot
250
+
238
251
log .debug ("Performing assignment for group %s using strategy %s"
239
252
" with subscriptions %s" , self .group_id , assignor .name ,
240
253
member_metadata )
@@ -264,6 +277,7 @@ def _on_join_prepare(self, generation, member_id):
264
277
" for group %s failed on_partitions_revoked" ,
265
278
self ._subscription .listener , self .group_id )
266
279
280
+ self ._assignment_snapshot = None
267
281
self ._subscription .mark_for_reassignment ()
268
282
269
283
def need_rejoin (self ):
0 commit comments