2222import six
2323
2424from google .cloud .pubsub_v1 import types
25+ from google .cloud .pubsub_v1 .publisher import exceptions
2526from google .cloud .pubsub_v1 .publisher import future
2627
27- QueueItem = collections .namedtuple ('QueueItem' , ['message' , 'future' ])
28-
2928
3029class Batch (object ):
3130 """A batch of messages.
@@ -59,16 +58,24 @@ class Batch(object):
5958 """
6059 def __init__ (self , client , topic , settings , autocommit = True ):
6160 self ._client = client
62- self ._topic = topic
63- self ._settings = settings
64- self ._messages = queue .Queue ()
65- self ._futures = queue .Queue ()
66- self ._status = 'accepting messages'
67- self ._message_ids = {}
61+
62+ # Create a namespace that is owned by the client manager; this
63+ # is necessary to be able to have these values be communicable between
64+ # processes.
65+ self ._ = self .manager .Namespace ()
66+ self ._ .futures = self .manager .list ()
67+ self ._ .messages = self .manager .list ()
68+ self ._ .message_ids = self .manager .dict ()
69+ self ._ .settings = settings
70+ self ._ .status = 'accepting messages'
71+ self ._ .topic = topic
72+
73+ # This is purely internal tracking.
74+ self ._process = None
6875
6976 # Continually monitor the thread until it is time to commit the
7077 # batch, or the batch is explicitly committed.
71- if autocommit and self ._settings .max_latency < float ('inf' ):
78+ if autocommit and self ._ . settings .max_latency < float ('inf' ):
7279 self ._process = self ._client .thread_class (target = self .monitor )
7380 self ._process .start ()
7481
@@ -82,6 +89,16 @@ def client(self):
8289 """
8390 return self ._client
8491
92+ @property
93+ def manager (self ):
94+ """Return the client's manager.
95+
96+ Returns:
97+ :class:`multiprocessing.Manager`: The manager responsible for
98+ handling shared memory objects.
99+ """
100+ return self ._client .manager
101+
85102 @property
86103 def status (self ):
87104 """Return the status of this batch.
@@ -90,7 +107,7 @@ def status(self):
90107 str: The status of this batch. All statuses are human-readable,
91108 all-lowercase strings.
92109 """
93- return self ._status
110+ return self ._ . status
94111
95112 def commit (self ):
96113 """Actually publish all of the messages on the active batch.
@@ -99,52 +116,46 @@ def commit(self):
99116 batch on the publisher, and then the batch is discarded upon
100117 completion.
101118 """
102- # If this is the active batch on the cleint right now, remove it.
103- self ._client .batch (self ._topic , pop = self )
104-
105119 # Update the status.
106- self ._status = 'in-flight'
120+ self ._ . status = 'in-flight'
107121
108122 # Begin the request to publish these messages.
109- response = self ._client .api .publish (self ._topic , list (self .flush ()))
123+ if len (self ._ .messages ) == 0 :
124+ raise Exception ('Empty queue' )
125+ response = self ._client .api .publish (self ._ .topic , self ._ .messages )
126+
127+ # Sanity check: If the number of message IDs is not equal to the
128+ # number of futures I have, then something went wrong.
129+ if len (response .message_ids ) != len (self ._ .futures ):
130+ raise exceptions .PublishError (
131+ 'Some messages were not successfully published.' ,
132+ )
110133
111134 # FIXME (lukesneeringer): How do I check for errors on this?
112- self ._status = 'success'
135+ self ._ . status = 'success'
113136
114137 # Iterate over the futures on the queue and return the response IDs.
115138 # We are trusting that there is a 1:1 mapping, and raise an exception
116139 # if not.
117- try :
118- for message_id in response .message_ids :
119- future_ = self ._futures .get (block = False )
120- self ._message_ids [future_ ] = message_id
121- future_ ._trigger ()
122- except queue .Empty :
123- raise ValueError ('More message IDs came back than messages '
124- 'were published.' )
125-
126- # If the queue of futures is not empty, we did not get enough IDs
127- # back.
128- if self ._futures .empty ():
129- raise ValueError ('Fewer message IDs came back than messages '
130- 'were published.' )
131-
132-
133- def flush (self ):
134- """Flush the messages off of this queue, one at a time.
135-
136- This method is called when the batch is committed. Calling it outside
137- of the context of committing will effectively remove messages
138- from the batch.
139-
140- Yields:
141- :class:~`pubsub_v1.types.PubsubMessage`: A Pub/Sub Message.
140+ for mid , fut in zip (response .message_ids , self ._ .futures ):
141+ self ._message_ids [fut ] = mid
142+ fut ._trigger ()
143+
144+ def get_message_id (self , publish_future ):
145+ """Return the message ID corresponding to the given future.
146+
147+ Args:
148+ publish_future (:class:~`future.Future`): The future returned
149+ from a ``publish`` call.
150+
151+ Returns:
152+ str: The message ID.
153+
154+ Raises:
155+ KeyError: If the future is not yet done or there is no message
156+ ID corresponding to it.
142157 """
143- try :
144- while True :
145- yield self ._messages .get (block = False )
146- except queue .Empty :
147- raise StopIteration
158+ return self ._message_ids [publish_future ]
148159
149160 def monitor (self ):
150161 """Commit this batch after sufficient time has elapsed.
@@ -156,11 +167,11 @@ def monitor(self):
156167 # in a separate thread.
157168 #
158169 # Sleep for however long we should be waiting.
159- time .sleep (self ._settings .max_latency )
170+ time .sleep (self ._ . settings .max_latency )
160171
161172 # If, in the intervening period, the batch started to be committed,
162173 # then no-op at this point.
163- if self ._status != 'accepting messages' :
174+ if self ._ . status != 'accepting messages' :
164175 return
165176
166177 # Commit.
@@ -216,10 +227,18 @@ def publish(self, data, **attrs):
216227 'be sent as text strings.' )
217228
218229 # Store the actual message in the batch's message queue.
219- self ._messages .put (types .PubsubMessage (data = data , attributes = attrs ))
230+ self ._ .messages .append (
231+ types .PubsubMessage (data = data , attributes = attrs ),
232+ )
220233
221234 # Return a Future. That future needs to be aware of the status
222235 # of this batch.
223- f = future .Future (self )
224- self ._futures . put (f )
236+ f = future .Future (self . _ )
237+ self ._ . futures . append (f )
225238 return f
239+
240+
241+ # Make a fake batch. This is used by the client to do single-op checks
242+ # for batch existence.
243+ FakeBatch = collections .namedtuple ('FakeBatch' , ['status' ])
244+ FAKE = FakeBatch (status = 'fake' )
0 commit comments