-
Notifications
You must be signed in to change notification settings - Fork 50
/
Copy path__init__.py
executable file
·1751 lines (1417 loc) · 58.2 KB
/
__init__.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Memcache API.
Provides memcached-alike API to application developers to store
data in memory when reliable storage via the DataStore API isn't
required and higher performance is desired.
"""
import hashlib
import math
import os
import types
import six
from six.moves import zip
import six.moves.cPickle
from google.appengine.api import api_base_pb2
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import capabilities
from google.appengine.api import namespace_manager
from google.appengine.api.memcache import memcache_service_pb2
from google.appengine.runtime import apiproxy_errors
if six.PY2:
import cStringIO
BytesIO = cStringIO.StringIO
else:
BytesIO = six.BytesIO
long = int
MemcacheSetResponse = memcache_service_pb2.MemcacheSetResponse
MemcacheSetRequest = memcache_service_pb2.MemcacheSetRequest
MemcacheGetResponse = memcache_service_pb2.MemcacheGetResponse
MemcacheGetRequest = memcache_service_pb2.MemcacheGetRequest
MemcacheDeleteResponse = memcache_service_pb2.MemcacheDeleteResponse
MemcacheDeleteRequest = memcache_service_pb2.MemcacheDeleteRequest
MemcacheIncrementResponse = memcache_service_pb2.MemcacheIncrementResponse
MemcacheIncrementRequest = memcache_service_pb2.MemcacheIncrementRequest
MemcacheBatchIncrementResponse = memcache_service_pb2.MemcacheBatchIncrementResponse
MemcacheBatchIncrementRequest = memcache_service_pb2.MemcacheBatchIncrementRequest
MemcacheFlushResponse = memcache_service_pb2.MemcacheFlushResponse
MemcacheFlushRequest = memcache_service_pb2.MemcacheFlushRequest
MemcacheStatsRequest = memcache_service_pb2.MemcacheStatsRequest
MemcacheStatsResponse = memcache_service_pb2.MemcacheStatsResponse
DELETE_NETWORK_FAILURE = 0
DELETE_ITEM_MISSING = 1
DELETE_SUCCESSFUL = 2
STORED = MemcacheSetResponse.STORED
NOT_STORED = MemcacheSetResponse.NOT_STORED
ERROR = MemcacheSetResponse.ERROR
EXISTS = MemcacheSetResponse.EXISTS
MAX_KEY_SIZE = 250
MAX_VALUE_SIZE = 10**6
STAT_HITS = 'hits'
STAT_MISSES = 'misses'
STAT_BYTE_HITS = 'byte_hits'
STAT_ITEMS = 'items'
STAT_BYTES = 'bytes'
STAT_OLDEST_ITEM_AGES = 'oldest_item_age'
FLAG_TYPE_MASK = 7
FLAG_COMPRESSED = 1 << 3
TYPE_STR = 0
TYPE_UNICODE = 1
TYPE_PICKLED = 2
TYPE_INT = 3
TYPE_LONG = 4
TYPE_BOOL = 5
CAPABILITY = capabilities.CapabilitySet('memcache')
class ItemWithTimestamps(object):
"""A Memcache item with its relevant timestamps."""
def __init__(
self,
value,
expiration_time_sec,
last_access_time_sec,
delete_lock_time_sec=0,
):
"""Constructor.
Args:
value: The Memcache item. Set to "" if the item is delete locked.
expiration_time_sec: The absolute expiration timestamp of the item in unix
epoch seconds. Set to 0 if no expiration time is set.
last_access_time_sec: The absolute last accessed timestamp of the item in
unix epoch seconds.
delete_lock_time_sec: Absolute delete_time timestamp of the item in unix
epoch seconds. Set to 0 if this item is not delete locked.
"""
self.value = value
self.expiration_time_sec = expiration_time_sec
self.last_access_time_sec = last_access_time_sec
self.delete_lock_time_sec = delete_lock_time_sec
def get_value(self):
"""Returns the value of the item."""
return self.value
def get_expiration_time_sec(self):
"""Returns the absolute expiration timestamp in unix epoch seconds.
It is set to 0 if this item has no expiration timestamp.
"""
return self.expiration_time_sec
def get_last_access_time_sec(self):
"""Returns the last accessed timestamp of the item in unix epoch seconds."""
return self.last_access_time_sec
def get_delete_lock_time_sec(self):
"""Returns the absolute delete_time timestamp in unix epoch seconds.
It is set to 0 if this item is not delete locked.
"""
return self.delete_lock_time_sec
def _is_pair(obj):
"""Helper to test if something is a pair (2-tuple)."""
return isinstance(obj, tuple) and len(obj) == 2
def _add_name_space(message, namespace=None):
"""Populate the name_space field in a `messagecol` buffer.
Args:
message: A `messagecol` buffer supporting the `set_name_space()` operation.
namespace: The name of the namespace part. If `None`, use the
default namespace. The empty namespace (i.e. `''`) will clear
the `name_space` field.
"""
if namespace is None:
namespace = namespace_manager.get_namespace()
if not namespace:
message.ClearField('name_space')
else:
message.name_space = namespace
def _key_string(key, key_prefix='', server_to_user_dict=None):
"""Utility function to handle different ways of requesting keys.
Args:
key: Either a string or tuple of `(shard_number, string)`. In Google App
Engine the sharding is automatic so the shard number is ignored.
To memcache, the key is just bytes (no defined encoding).
key_prefix: Optional string prefix to prepend to key.
server_to_user_dict: Optional dictionary to populate with a mapping of
server-side key (which includes the key_prefix) to user-supplied key
(which does not have the prefix).
Returns:
The key as a non-unicode string prepended with `key_prefix`. This is
the key sent to and stored by the server. If the resulting key is
longer then `MAX_KEY_SIZE`, it will be hashed with sha1 and will be
replaced with the hex representation of the said hash.
Raises:
TypeError: If provided key isn't a string or tuple of `(int, string)`
or `key_prefix`.
"""
if _is_pair(key):
key = key[1]
if not isinstance(key, (six.string_types, six.binary_type)):
raise TypeError('Key must be a string instance, received %r' % key)
if not isinstance(key_prefix, (six.string_types, six.binary_type)):
raise TypeError(
'key_prefix must be a string instance, received %r' % key_prefix)
key_bytes = key
if isinstance(key, six.text_type):
key_bytes = key.encode('utf-8')
if isinstance(key_prefix, six.text_type):
key_prefix = key_prefix.encode('utf-8')
server_key = key_prefix + key_bytes
if len(server_key) > MAX_KEY_SIZE:
server_key = hashlib.sha1(server_key).hexdigest()
if isinstance(server_key, six.text_type):
server_key = server_key.encode('utf-8')
if server_to_user_dict is not None:
assert isinstance(server_to_user_dict, dict)
server_to_user_dict[server_key] = key
return server_key
def _validate_encode_value(value, do_pickle):
"""Utility function to validate and encode server keys and values.
Args:
value: Value to store in memcache. If it's a string, it will get passed
along as-is. If it's a unicode string, it will be marked appropriately,
such that retrievals will yield a unicode value. If it's any other data
type, this function will attempt to pickle the data and then store the
serialized result, unpickling it upon retrieval.
do_pickle: Callable that takes an object and returns a non-unicode
string containing the pickled object.
Returns:
Tuple `(stored_value, flags)` where:
stored_value: The value as a non-unicode string that should be stored
in memcache.
flags: An integer with bits set from the `FLAG_*` constants in this file
to indicate the encoding of the key and value.
Raises:
ValueError: If the encoded value is too large.
pickle.PicklingError: If the value is not a string and could not be pickled.
RuntimeError: If a complicated data structure could not be pickled due to
too many levels of recursion in its composition.
"""
flags = 0
stored_value = value
if isinstance(value, six.binary_type):
pass
elif isinstance(value, six.text_type):
stored_value = value.encode('utf-8')
flags |= TYPE_UNICODE
elif isinstance(value, bool):
stored_value = str(int(value)).encode()
flags |= TYPE_BOOL
elif isinstance(value, int):
stored_value = str(value).encode()
flags |= TYPE_INT
elif isinstance(value, six.integer_types):
stored_value = str(value).encode()
flags |= TYPE_LONG
else:
stored_value = do_pickle(value)
flags |= TYPE_PICKLED
if len(stored_value) > MAX_VALUE_SIZE:
raise ValueError('Values may not be more than %d bytes in length; '
'received %d bytes' % (MAX_VALUE_SIZE, len(stored_value)))
return (stored_value, flags)
def _decode_value(stored_value, flags, do_unpickle):
"""Utility function for decoding values retrieved from memcache.
Args:
stored_value: The value as a non-unicode string that was stored.
flags: An integer with bits set from the `FLAG_*` constants in this file
that indicate the encoding of the key and value.
do_unpickle: Callable that takes a non-unicode string object that contains
a pickled object and returns the pickled object.
Returns:
The original object that was stored, be it a normal string, a unicode
`string`, `int`, `long`, or a Python object that was pickled.
Raises:
pickle.UnpicklingError: If the value could not be unpickled.
"""
assert isinstance(stored_value, six.binary_type)
assert isinstance(flags, six.integer_types)
type_number = flags & FLAG_TYPE_MASK
value = stored_value
if type_number == TYPE_STR:
return value
elif type_number == TYPE_UNICODE:
return six.text_type(value, 'utf-8')
elif type_number == TYPE_PICKLED:
return do_unpickle(value)
elif type_number == TYPE_BOOL:
return bool(int(value))
elif type_number == TYPE_INT:
return int(value)
elif type_number == TYPE_LONG:
return long(value)
else:
assert False, 'Unknown stored type'
assert False, "Shouldn't get here."
def create_rpc(deadline=None, callback=None):
"""Creates an RPC object for use with the memcache API.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
callback: Optional callable to invoke on completion.
Returns:
An `apiproxy_stub_map.UserRPC` object specialized for this service.
"""
return apiproxy_stub_map.UserRPC('memcache', deadline, callback)
class Client(object):
"""Memcache client object, through which one invokes all memcache operations.
Several methods are no-ops to retain source-level compatibility
with the existing popular Python memcache library.
Any method that takes a 'key' argument will accept that key as a string
(unicode or not) or a tuple of `(hash_value, string)` where the hash_value,
normally used for sharding onto a memcache instance, is instead ignored, as
Google App Engine deals with the sharding transparently. Keys in memcache are
just bytes, without a specified encoding. All such methods may raise
`TypeError` if provided a bogus key value and a `ValueError` if the key is too
large.
Any method that takes a 'value' argument will accept as that value any
`string` (unicode or not), `int`, `long`, or pickle-able Python object,
including all native types. You'll get back from the cache the same type that
you originally put in.
The `Client` class is not thread-safe with respect to the `gets()`, `cas()`
and `cas_multi()` methods (and other compare-and-set-related methods).
Therefore, `Client` objects should not be used by more than one thread for CAS
purposes. Note that the global `Client` for the module-level functions is okay
because it does not expose any of the CAS methods.
"""
def __init__(self,
servers=None,
debug=0,
pickleProtocol=six.moves.cPickle.HIGHEST_PROTOCOL,
pickler=six.moves.cPickle.Pickler,
unpickler=six.moves.cPickle.Unpickler,
pload=None,
pid=None,
make_sync_call=None,
_app_id=None):
"""Create a new Client object.
No parameters are required.
Arguments:
servers: Ignored; only for compatibility.
debug: Ignored; only for compatibility.
pickleProtocol: Pickle protocol to use for pickling the object.
pickler: pickle.Pickler sub-class to use for pickling.
unpickler: pickle.Unpickler sub-class to use for unpickling.
pload: Callable to use for retrieving objects by persistent id.
pid: Callable to use for determine the persistent id for objects, if any.
make_sync_call: Ignored; only for compatibility with an earlier version.
"""
if os.environ.get('MEMCACHE_USE_CROSS_COMPATIBLE_PROTOCOL', None):
pickleProtocol = 2
self._pickler_factory = pickler
self._unpickler_factory = unpickler
self._pickle_protocol = pickleProtocol
self._persistent_id = pid
self._persistent_load = pload
self._app_id = _app_id
self._cas_ids = {}
def cas_reset(self):
"""Clear the remembered CAS ids."""
self._cas_ids.clear()
def _make_async_call(self, rpc, method, request, response, get_result_hook,
user_data):
"""Internal helper to schedule an asynchronous RPC.
Args:
rpc: `None` or a `UserRPC` object.
method: Method name, e.g. 'Get'.
request: Request protobuf.
response: Response protobuf.
get_result_hook: None or hook function used to process results
(See `UserRPC.make_call()` for more info).
user_data: None or user data for hook function.
Returns:
A `UserRPC` object; either the one passed in as the first argument,
or a new one (if the first argument was `None`).
"""
if rpc is None:
rpc = create_rpc()
assert rpc.service == 'memcache', repr(rpc.service)
rpc.make_call(method, request, response, get_result_hook, user_data)
return rpc
def _do_pickle(self, value):
"""Pickles a provided value."""
pickle_data = BytesIO()
pickler = self._pickler_factory(pickle_data, protocol=self._pickle_protocol)
if self._persistent_id is not None:
pickler.persistent_id = self._persistent_id
pickler.dump(value)
return pickle_data.getvalue()
def _do_unpickle(self, value):
"""Unpickles a provided value."""
pickle_data = BytesIO(value)
unpickler = self._unpickler_factory(pickle_data)
if self._persistent_load is not None:
unpickler.persistent_load = self._persistent_load
return unpickler.load()
def _add_app_id(self, message):
"""Populates override field in message if accessing another app's memcache.
Args:
message: A protocol buffer supporting the `mutable_override()` operation.
"""
if self._app_id:
app_override = message.override
app_override.app_id = self._app_id
def set_servers(self, servers):
"""Sets the pool of memcache servers used by the client.
This is purely a compatibility method. In Google App Engine, it's a no-op.
"""
pass
def disconnect_all(self):
"""Closes all connections to memcache servers.
This is purely a compatibility method. In Google App Engine, it's a no-op.
"""
pass
def forget_dead_hosts(self):
"""Resets all servers to the alive status.
This is purely a compatibility method. In Google App Engine, it's a no-op.
"""
pass
def debuglog(self):
"""Logging function for debugging information.
This is purely a compatibility method. In Google App Engine, it's a no-op.
"""
pass
def get_stats(self):
"""Gets memcache statistics for this application.
All of these statistics may reset due to various transient conditions. They
provide the best information available at the time of being called.
Returns:
Dictionary mapping statistic names to associated values. Statistics and
their associated meanings:
hits: Number of cache get requests resulting in a cache hit.
misses: Number of cache get requests resulting in a cache miss.
byte_hits: Sum of bytes transferred on get requests. Rolls over to
zero on overflow.
items: Number of key/value pairs in the cache.
bytes: Total size of all items in the cache.
oldest_item_age: How long in seconds since the oldest item in the
cache was accessed. Effectively, this indicates how long a new
item will survive in the cache without being accessed. This is
_not_ the amount of time that has elapsed since the item was
created.
On error, returns `None`.
"""
rpc = self.get_stats_async()
return rpc.get_result()
def get_stats_async(self, rpc=None):
"""Async version of `get_stats()`.
Returns:
A `UserRPC` instance whose `get_result()` method returns `None` if
there was a network error, otherwise a `dict` just like
`get_stats()` returns.
"""
request = MemcacheStatsRequest()
self._add_app_id(request)
response = MemcacheStatsResponse()
return self._make_async_call(rpc, 'Stats', request, response,
self.__get_stats_hook, None)
def __get_stats_hook(self, rpc):
try:
rpc.check_success()
except apiproxy_errors.Error:
return None
response = rpc.response
if not response.HasField('stats'):
return {
STAT_HITS: 0,
STAT_MISSES: 0,
STAT_BYTE_HITS: 0,
STAT_ITEMS: 0,
STAT_BYTES: 0,
STAT_OLDEST_ITEM_AGES: 0,
}
stats = response.stats
return {
STAT_HITS: stats.hits,
STAT_MISSES: stats.misses,
STAT_BYTE_HITS: stats.byte_hits,
STAT_ITEMS: stats.items,
STAT_BYTES: stats.bytes,
STAT_OLDEST_ITEM_AGES: stats.oldest_item_age,
}
def flush_all(self):
"""Deletes everything in memcache.
Returns:
`True` on success, `False` on RPC or server error.
"""
rpc = self.flush_all_async()
return rpc.get_result()
def flush_all_async(self, rpc=None):
"""Async version of `flush_all()`.
Returns:
A `UserRPC` instance whose `get_result()` method returns `True` on
success, `False` on RPC or server error.
"""
request = MemcacheFlushRequest()
self._add_app_id(request)
response = MemcacheFlushResponse()
return self._make_async_call(rpc, 'FlushAll', request, response,
self.__flush_all_hook, None)
def __flush_all_hook(self, rpc):
try:
rpc.check_success()
except apiproxy_errors.Error:
return False
return True
def get(self, key, namespace=None, for_cas=False):
"""Looks up a single key in memcache.
If you have multiple items to load, though, it's much more efficient
to use `get_multi()` instead, which loads them in one bulk operation,
reducing the networking latency that'd otherwise be required to do
many serialized `get()` operations.
Args:
key: The key in memcache to look up. See docs on Client
for details of format.
namespace: a string specifying an optional namespace to use in
the request.
for_cas: If `True`, request and store CAS ids on the client (see
`cas()` operation below).
Returns:
The value of the key, if found in memcache, else `None`.
"""
if _is_pair(key):
key = key[1]
rpc = self.get_multi_async([key], namespace=namespace, for_cas=for_cas)
results = rpc.get_result()
return results.get(key)
def gets(self, key, namespace=None):
"""An alias for `get(..., for_cas=True)`."""
return self.get(key, namespace=namespace, for_cas=True)
def get_multi(self, keys, key_prefix='', namespace=None, for_cas=False):
"""Looks up multiple keys from memcache in one operation.
This is the recommended way to do bulk loads.
Args:
keys: List of keys to look up. Keys may be strings or
tuples of `(hash_value, string)`. Google App Engine
does the sharding and hashing automatically, though, so the hash
value is ignored. To memcache, keys are just series of bytes,
and not in any particular encoding.
key_prefix: Prefix to prepend to all keys when talking to the server;
not included in the returned dictionary.
namespace: a string specifying an optional namespace to use in
the request.
for_cas: If `True`, request and store CAS ids on the client.
Returns:
A dictionary of the keys and values that were present in memcache.
Even if the key_prefix was specified, that key_prefix won't be on
the keys in the returned dictionary.
"""
rpc = self.get_multi_async(keys, key_prefix, namespace, for_cas)
return rpc.get_result()
def get_multi_async(self,
keys,
key_prefix='',
namespace=None,
for_cas=False,
rpc=None):
"""Async version of `get_multi()`.
Returns:
A `UserRPC` instance whose `get_result()` method returns {} if
there was a network error, otherwise a `dict` just like
`get_multi()` returns.
"""
request = MemcacheGetRequest()
self._add_app_id(request)
_add_name_space(request, namespace)
if for_cas:
request.for_cas = True
response = MemcacheGetResponse()
user_key = {}
for key in keys:
request.key.append(_key_string(key, key_prefix, user_key))
return self._make_async_call(rpc, 'Get', request, response, self.__get_hook,
user_key)
def __get_hook(self, rpc):
try:
rpc.check_success()
except apiproxy_errors.Error:
return {}
for_cas = rpc.request.for_cas
namespace = rpc.request.name_space
response = rpc.response
user_key = rpc.user_data
return_value = {}
for returned_item in response.item:
value = _decode_value(returned_item.value,
returned_item.flags, self._do_unpickle)
raw_key = returned_item.key
if for_cas:
ns = namespace if namespace else ''
self._cas_ids[(ns, raw_key)] = returned_item.cas_id
return_value[user_key[raw_key]] = value
return return_value
def peek(self, key, namespace=None):
"""Gets an item from memcache along with its timestamp metadata.
Peeking at items will update stats, but will not alter the eviction order
of the item. Unlike get(), an item is fetched even if it is delete locked.
Args:
key: The key in memcache to look up. See docs on Client for details of
format.
namespace: a string specifying an optional namespace to use in the
request.
Returns:
An ItemWithTimestamps object which contains the value of the item along
with timestamp metadata - expiration timestamp, last access timestamp and
delete timestamp (if the item is delete locked).
"""
if _is_pair(key):
key = key[1]
rpc = self.peek_multi_async([key], namespace=namespace)
results = rpc.get_result()
return results.get(key)
def peek_multi(self, keys, key_prefix='', namespace=None):
"""Gets multiple items from memcache along with their timestamp metadata.
This is the recommended way to do bulk peek() calls.
Args:
keys: List of keys to look up. Keys may be strings or tuples of
(hash_value, string). Google App Engine does the sharding and hashing
automatically, though, so the hash value is ignored. To memcache, keys
are just series of bytes, and not in any particular encoding.
key_prefix: Prefix to prepend to all keys when talking to the server; not
included in the returned dictionary.
namespace: a string specifying an optional namespace to use in the
request.
Returns:
A dictionary of the keys and ItemWithTimestamps objects. Even if the
key_prefix was specified, that key_prefix won't be on the keys in the
returned dictionary.
Each ItemWithTimestamps object contains the item corresponding to the
key, along with timestamp metadata - expiration timestamp, last access
timestamp and delete timestamp (if the item is delete locked).
"""
rpc = self.peek_multi_async(keys, key_prefix, namespace)
return rpc.get_result()
def peek_multi_async(self, keys, key_prefix='', namespace=None, rpc=None):
"""Async version of peek_multi()."""
request = MemcacheGetRequest()
self._add_app_id(request)
_add_name_space(request, namespace)
request.for_peek = True
response = MemcacheGetResponse()
user_key = {}
for key in keys:
request.key.append(_key_string(key, key_prefix, user_key))
return self._make_async_call(
rpc, 'Get', request, response, self.__peek_hook, user_key
)
def __peek_hook(self, rpc):
"""Returns a dict of keys and ItemWithTimestamps objects."""
try:
rpc.check_success()
except apiproxy_errors.Error:
return {}
response = rpc.response
user_key = rpc.user_data
return_value = {}
for returned_item in response.item:
value = ''
if returned_item.value:
value = _decode_value(
returned_item.value, returned_item.flags, self._do_unpickle
)
timestamps = returned_item.timestamps
raw_key = returned_item.key
return_value[user_key[raw_key]] = ItemWithTimestamps(
value,
timestamps.expiration_time_sec,
timestamps.last_access_time_sec,
timestamps.delete_lock_time_sec,
)
return return_value
def delete(self, key, seconds=0, namespace=None):
"""Deletes a key from memcache.
Args:
key: Key to delete. See docs on Client for detils.
seconds: Optional number of seconds to make deleted items 'locked'
for 'add' operations. Value can be a delta from current time (up to
1 month), or an absolute Unix epoch time. Defaults to 0, which means
items can be immediately added. With or without this option,
a 'set' operation will always work. Float values will be rounded up to
the nearest whole second.
namespace: a string specifying an optional namespace to use in
the request.
Returns:
`DELETE_NETWORK_FAILURE (0)` on network failure,
`DELETE_ITEM_MISSING (1) if the server tried to delete the item but
didn't have it, or
`DELETE_SUCCESSFUL (2)` if the item was actually deleted.
This can be used as a boolean value, where a network failure is the
only bad condition.
"""
rpc = self.delete_multi_async([key], seconds, namespace=namespace)
results = rpc.get_result()
if not results:
return DELETE_NETWORK_FAILURE
return results[0]
def delete_multi(self, keys, seconds=0, key_prefix='', namespace=None):
"""Delete multiple keys at once.
Args:
keys: List of keys to delete.
seconds: Optional number of seconds to make deleted items 'locked'
for 'add' operations. Value can be a delta from current time (up to
1 month), or an absolute Unix epoch time. Defaults to 0, which means
items can be immediately added. With or without this option,
a 'set' operation will always work. Float values will be rounded up to
the nearest whole second.
key_prefix: Prefix to put on all keys when sending specified
keys to memcache. See docs for `get_multi()` and `set_multi()`.
namespace: a string specifying an optional namespace to use in
the request.
Returns:
`True` if all operations completed successfully. `False` if one
or more failed to complete.
"""
rpc = self.delete_multi_async(keys, seconds, key_prefix, namespace)
results = rpc.get_result()
return bool(results)
def delete_multi_async(self,
keys,
seconds=0,
key_prefix='',
namespace=None,
rpc=None):
"""Async version of `delete_multi()` -- note different return value.
Returns:
A `UserRPC` instance whose `get_result()` method returns `None` if
there was a network error, or a list of status values otherwise,
where each status corresponds to a key and is either
`DELETE_SUCCESSFUL`, `DELETE_ITEM_MISSING`, or `DELETE_NETWORK_FAILURE`
(see `delete()` docstring for details).
"""
if not isinstance(seconds, (six.integer_types, float)):
raise TypeError('Delete timeout must be a number.')
if seconds < 0:
raise ValueError('Delete timeout must not be negative.')
request = MemcacheDeleteRequest()
self._add_app_id(request)
_add_name_space(request, namespace)
response = MemcacheDeleteResponse()
for key in keys:
delete_item = request.item.add()
delete_item.key = _key_string(key, key_prefix=key_prefix)
delete_item.delete_time = int(math.ceil(seconds))
return self._make_async_call(rpc, 'Delete', request, response,
self.__delete_hook, None)
def __delete_hook(self, rpc):
try:
rpc.check_success()
except apiproxy_errors.Error:
return None
result = []
for status in rpc.response.delete_status:
if status == MemcacheDeleteResponse.DELETED:
result.append(DELETE_SUCCESSFUL)
elif status == MemcacheDeleteResponse.NOT_FOUND:
result.append(DELETE_ITEM_MISSING)
else:
return None
return result
def set(self, key, value, time=0, min_compress_len=0, namespace=None):
"""Sets a key's value, regardless of previous contents in cache.
Unlike `add()` and `replace()`, this method always sets (or
overwrites) the value in memcache, regardless of previous
contents.
Args:
key: Key to set. See docs on Client for details.
value: Value to set. Any type. If complex, will be pickled.
time: Optional expiration time, either relative number of seconds
from current time (up to 1 month), or an absolute Unix epoch time.
By default, items never expire, though items may be evicted due to
memory pressure. Float values will be rounded up to the nearest
whole second.
min_compress_len: Ignored option for compatibility.
namespace: a string specifying an optional namespace to use in
the request.
Returns:
`True` if set. `False` on error.
"""
return self._set_with_policy(
MemcacheSetRequest.SET, key, value, time=time, namespace=namespace)
def add(self, key, value, time=0, min_compress_len=0, namespace=None):
"""Sets a key's value if the item is not already in memcache.
Args:
key: Key to set. See docs on Client for details.
value: Value to set. Any type. If complex, will be pickled.
time: Optional expiration time, either relative number of seconds
from current time (up to 1 month), or an absolute Unix epoch time.
By default, items never expire, though items may be evicted due to
memory pressure. Float values will be rounded up to the nearest
whole second.
min_compress_len: Ignored option for compatibility.
namespace: a string specifying an optional namespace to use in
the request.
Returns:
`True` if added. `False` on error.
"""
return self._set_with_policy(
MemcacheSetRequest.ADD, key, value, time=time, namespace=namespace)
def replace(self, key, value, time=0, min_compress_len=0, namespace=None):
"""Replaces a key's value, failing if item isn't already in memcache.
Args:
key: Key to set. See docs on Client for details.
value: Value to set. Any type. If complex, will be pickled.
time: Optional expiration time, either relative number of seconds
from current time (up to 1 month), or an absolute Unix epoch time.
By default, items never expire, though items may be evicted due to
memory pressure. Float values will be rounded up to the nearest
whole second.
min_compress_len: Ignored option for compatibility.
namespace: a string specifying an optional namespace to use in
the request.
Returns:
`True` if replaced. `False` on RPC error or cache miss.
"""
return self._set_with_policy(
MemcacheSetRequest.REPLACE, key, value, time=time, namespace=namespace)
def cas(self, key, value, time=0, min_compress_len=0, namespace=None):
"""Compare-And-Set update.
This requires that the key has previously been successfully
fetched with `gets()` or `get(..., for_cas=True)`, and that no changes
have been made to the key since that fetch. Typical usage is:
key = ...
client = memcache.Client()
value = client.gets(key) # `OR` client.get(key, for_cas=True)
<updated value>
ok = client.cas(key, value)
If two processes run similar code, the first one calling `cas()`