Changesets can be listed by changeset number.
The Git repository is here.
- Revision:
- 224
- Log:
Fix nasty bug identified on the Typo mailing list. An incorrect
assignment in the memcache module meant it ran in testing mode.
This stopped certain features from working at all as well as
making Typo run slowly.Temporarily commented out pending official fix.
- Author:
- rool
- Date:
- Tue Jun 12 15:51:51 +0100 2007
- Size:
- 11624 Bytes
1 | # $TESTING = defined? $TESTING |
2 | |
3 | require 'socket' |
4 | require 'thread' |
5 | require 'timeout' |
6 | require 'rubygems' |
7 | |
8 | class String |
9 | |
10 | ## |
11 | # Uses the ITU-T polynomial in the CRC32 algorithm. |
12 | |
13 | def crc32_ITU_T |
14 | n = length |
15 | r = 0xFFFFFFFF |
16 | |
17 | n.times do |i| |
18 | r ^= self[i] |
19 | 8.times do |
20 | if (r & 1) != 0 then |
21 | r = (r>>1) ^ 0xEDB88320 |
22 | else |
23 | r >>= 1 |
24 | end |
25 | end |
26 | end |
27 | |
28 | r ^ 0xFFFFFFFF |
29 | end |
30 | |
31 | end |
32 | |
33 | ## |
34 | # A Ruby client library for memcached. |
35 | # |
36 | # This is intended to provide access to basic memcached functionality. It |
37 | # does not attempt to be complete implementation of the entire API. |
38 | |
39 | class MemCache |
40 | |
41 | ## |
42 | # Default options for the cache object. |
43 | |
44 | DEFAULT_OPTIONS = { |
45 | :namespace => nil, |
46 | :readonly => false, |
47 | :multithread => false, |
48 | } |
49 | |
50 | ## |
51 | # Default memcached port. |
52 | |
53 | DEFAULT_PORT = 11211 |
54 | |
55 | ## |
56 | # Default memcached server weight. |
57 | |
58 | DEFAULT_WEIGHT = 1 |
59 | |
60 | ## |
61 | # The amount of time to wait for a response from a memcached server. If a |
62 | # response is not completed within this time, the connection to the server |
63 | # will be closed and an error will be raised. |
64 | |
65 | attr_accessor :request_timeout |
66 | |
67 | ## |
68 | # The namespace for this instance |
69 | |
70 | attr_reader :namespace |
71 | |
72 | ## |
73 | # The multithread setting for this instance |
74 | |
75 | attr_reader :multithread |
76 | |
77 | ## |
78 | # Accepts a list of +servers+ and a list of +opts+. +servers+ may be |
79 | # omitted. See +servers=+ for acceptable server list arguments. |
80 | # |
81 | # Valid options for +opts+ are: |
82 | # |
83 | # [:namespace] Prepends this value to all keys added or retrieved. |
84 | # [:readonly] Raises an exeception on cache writes when true. |
85 | # [:multithread] Wraps cache access in a Mutex for thread safety. |
86 | |
87 | def initialize(*args) |
88 | servers = [] |
89 | opts = {} |
90 | |
91 | case args.length |
92 | when 0 then # NOP |
93 | when 1 then |
94 | arg = args.shift |
95 | case arg |
96 | when Hash then opts = arg |
97 | when Array then servers = arg |
98 | when String then servers = [arg] |
99 | else raise ArgumentError, 'first argument must be Array, Hash or String' |
100 | end |
101 | when 2 then |
102 | servers, opts = args |
103 | else |
104 | raise ArgumentError, "wrong number of arguments (#{args.length} for 2)" |
105 | end |
106 | |
107 | opts = DEFAULT_OPTIONS.merge opts |
108 | @namespace = opts[:namespace] |
109 | @readonly = opts[:readonly] |
110 | @multithread = opts[:multithread] |
111 | @mutex = Mutex.new if @multithread |
112 | self.servers = servers |
113 | @buckets = [] |
114 | end |
115 | |
116 | ## |
117 | # Return a string representation of the cache object. |
118 | |
119 | def inspect |
120 | sprintf("<MemCache: %s servers, %s buckets, ns: %p, ro: %p>", |
121 | @servers.length, @buckets.length, @namespace, @readonly) |
122 | end |
123 | |
124 | ## |
125 | # Returns whether there is at least one active server for the object. |
126 | |
127 | def active? |
128 | not @servers.empty? |
129 | end |
130 | |
131 | ## |
132 | # Returns whether the cache was created read only. |
133 | |
134 | def readonly? |
135 | @readonly |
136 | end |
137 | |
138 | ## |
139 | # Set the servers that the requests will be distributed between. Entries |
140 | # can be either strings of the form "hostname:port" or |
141 | # "hostname:port:weight" or MemCache::Server objects. |
142 | |
143 | def servers=(servers) |
144 | # Create the server objects. |
145 | @servers = servers.collect do |server| |
146 | case server |
147 | when String |
148 | host, port, weight = server.split ':', 3 |
149 | port ||= DEFAULT_PORT |
150 | weight ||= DEFAULT_WEIGHT |
151 | Server.new self, host, port, weight |
152 | when Server |
153 | if server.memcache.multithread != @multithread then |
154 | raise ArgumentError, "can't mix threaded and non-threaded servers" |
155 | end |
156 | server |
157 | else |
158 | raise TypeError, "Cannot convert %s to MemCache::Server" % |
159 | svr.class.name |
160 | end |
161 | end |
162 | |
163 | # Create an array of server buckets for weight selection of servers. |
164 | @buckets = [] |
165 | @servers.each do |server| |
166 | server.weight.times { @buckets.push(server) } |
167 | end |
168 | end |
169 | |
170 | ## |
171 | # Retrieves +key+ from memcache. |
172 | |
173 | def get(key) |
174 | raise MemCacheError, 'No active servers' unless active? |
175 | cache_key = make_cache_key key |
176 | server = get_server_for_key cache_key |
177 | |
178 | raise MemCacheError, 'No connection to server' if server.socket.nil? |
179 | |
180 | value = if @multithread then |
181 | threadsafe_cache_get server, cache_key |
182 | else |
183 | cache_get server, cache_key |
184 | end |
185 | |
186 | return nil if value.nil? |
187 | |
188 | # Return the unmarshaled value. |
189 | return Marshal.load(value) |
190 | rescue ArgumentError, TypeError, SystemCallError, IOError => err |
191 | server.close |
192 | new_err = MemCacheError.new err.message |
193 | new_err.set_backtrace err.backtrace |
194 | raise new_err |
195 | end |
196 | |
197 | ## |
198 | # Retrieves +keys+ and returns a Hash mapping keys to values. |
199 | |
200 | def get_multi(*keys) |
201 | values = {} |
202 | keys.flatten.each { |key| values[key] = get key } |
203 | values |
204 | end |
205 | |
206 | ## |
207 | # Add +key+ to the cache with value +value+ that expires in +expiry+ |
208 | # seconds. |
209 | |
210 | def set(key, value, expiry = 0) |
211 | @mutex.lock if @multithread |
212 | |
213 | raise MemCacheError, "No active servers" unless self.active? |
214 | raise MemCacheError, "Update of readonly cache" if @readonly |
215 | cache_key = make_cache_key(key) |
216 | server = get_server_for_key(cache_key) |
217 | |
218 | sock = server.socket |
219 | raise MemCacheError, "No connection to server" if sock.nil? |
220 | |
221 | marshaled_value = Marshal.dump value |
222 | command = "set #{cache_key} 0 #{expiry} #{marshaled_value.size}\r\n#{marshaled_value}\r\n" |
223 | |
224 | begin |
225 | sock.write command |
226 | sock.gets |
227 | rescue SystemCallError, IOError => err |
228 | server.close |
229 | raise MemCacheError, err.message |
230 | end |
231 | ensure |
232 | @mutex.unlock if @multithread |
233 | end |
234 | |
235 | ## |
236 | # Removes +key+ from the cache in +expiry+ seconds. |
237 | |
238 | def delete(key, expiry = 0) |
239 | @mutex.lock if @multithread |
240 | |
241 | raise MemCacheError, "No active servers" unless active? |
242 | cache_key = make_cache_key key |
243 | server = get_server_for_key cache_key |
244 | |
245 | sock = server.socket |
246 | raise MemCacheError, "No connection to server" if sock.nil? |
247 | |
248 | begin |
249 | sock.write "delete #{cache_key} #{expiry}\r\n" |
250 | sock.gets |
251 | rescue SystemCallError, IOError => err |
252 | server.close |
253 | raise MemCacheError, err.message |
254 | end |
255 | ensure |
256 | @mutex.unlock if @multithread |
257 | end |
258 | |
259 | ## |
260 | # Reset the connection to all memcache servers. This should be called if |
261 | # there is a problem with a cache lookup that might have left the connection |
262 | # in a corrupted state. |
263 | |
264 | def reset |
265 | @servers.each { |server| server.close } |
266 | end |
267 | |
268 | ## |
269 | # Shortcut to get a value from the cache. |
270 | |
271 | alias [] get |
272 | |
273 | ## |
274 | # Shortcut to save a value in the cache. This method does not set an |
275 | # expiration on the entry. Use set to specify an explicit expiry. |
276 | |
277 | def []=(key, value) |
278 | set key, value |
279 | end |
280 | |
281 | protected unless $TESTING |
282 | |
283 | ## |
284 | # Create a key for the cache, incorporating the namespace qualifier if |
285 | # requested. |
286 | |
287 | def make_cache_key(key) |
288 | if namespace.nil? then |
289 | key |
290 | else |
291 | "#{@namespace}:#{key}" |
292 | end |
293 | end |
294 | |
295 | ## |
296 | # Pick a server to handle the request based on a hash of the key. |
297 | |
298 | def get_server_for_key(key) |
299 | raise MemCacheError, "No servers available" if @servers.empty? |
300 | return @servers.first if @servers.length == 1 |
301 | |
302 | hkey = hash_for key |
303 | |
304 | 20.times do |try| |
305 | server = @buckets[hkey % @buckets.nitems] |
306 | return server if server.alive? |
307 | hkey += hash_for "#{try}#{key}" |
308 | end |
309 | |
310 | raise MemCacheError, "No servers available" |
311 | end |
312 | |
313 | ## |
314 | # Returns an interoperable hash value for +key+. (I think, docs are |
315 | # sketchy for down servers). |
316 | |
317 | def hash_for(key) |
318 | (key.crc32_ITU_T >> 16) & 0x7fff |
319 | end |
320 | |
321 | ## |
322 | # Fetches the raw data for +cache_key+ from +server+. Returns nil on cache |
323 | # miss. |
324 | |
325 | def cache_get(server, cache_key) |
326 | socket = server.socket |
327 | socket.write "get #{cache_key}\r\n" |
328 | text = socket.gets # "VALUE <key> <flags> <bytes>\r\n" |
329 | return nil if text == "END\r\n" |
330 | |
331 | text =~ /(\d+)\r/ |
332 | value = socket.read $1.to_i |
333 | socket.read 2 # "\r\n" |
334 | socket.gets # "END\r\n" |
335 | return value |
336 | end |
337 | |
338 | def threadsafe_cache_get(socket, cache_key) # :nodoc: |
339 | @mutex.lock |
340 | cache_get socket, cache_key |
341 | ensure |
342 | @mutex.unlock |
343 | end |
344 | |
345 | ## |
346 | # This class represents a memcached server instance. |
347 | |
348 | class Server |
349 | |
350 | ## |
351 | # The amount of time to wait to establish a connection with a memcached |
352 | # server. If a connection cannot be established within this time limit, |
353 | # the server will be marked as down. |
354 | |
355 | CONNECT_TIMEOUT = 0.25 |
356 | |
357 | ## |
358 | # The amount of time to wait before attempting to re-establish a |
359 | # connection with a server that is marked dead. |
360 | |
361 | RETRY_DELAY = 30.0 |
362 | |
363 | ## |
364 | # The host the memcached server is running on. |
365 | |
366 | attr_reader :host |
367 | |
368 | ## |
369 | # The port the memcached server is listening on. |
370 | |
371 | attr_reader :port |
372 | |
373 | ## |
374 | # The weight given to the server. |
375 | |
376 | attr_reader :weight |
377 | |
378 | ## |
379 | # The time of next retry if the connection is dead. |
380 | |
381 | attr_reader :retry |
382 | |
383 | ## |
384 | # A text status string describing the state of the server. |
385 | |
386 | attr_reader :status |
387 | |
388 | ## |
389 | # Create a new MemCache::Server object for the memcached instance |
390 | # listening on the given host and port, weighted by the given weight. |
391 | |
392 | def initialize(memcache, host, port = DEFAULT_PORT, weight = DEFAULT_WEIGHT) |
393 | raise ArgumentError, "No host specified" if host.nil? or host.empty? |
394 | raise ArgumentError, "No port specified" if port.nil? or port.to_i.zero? |
395 | |
396 | @memcache = memcache |
397 | @host = host |
398 | @port = port.to_i |
399 | @weight = weight.to_i |
400 | |
401 | @multithread = @memcache.multithread |
402 | @mutex = Mutex.new |
403 | |
404 | @sock = nil |
405 | @retry = nil |
406 | @status = 'NOT CONNECTED' |
407 | end |
408 | |
409 | ## |
410 | # Return a string representation of the server object. |
411 | |
412 | def inspect |
413 | sprintf("<MemCache::Server: %s:%d [%d] (%s)>", |
414 | @host, @port, @weight, @status) |
415 | end |
416 | |
417 | ## |
418 | # Check whether the server connection is alive. This will cause the |
419 | # socket to attempt to connect if it isn't already connected and or if |
420 | # the server was previously marked as down and the retry time has |
421 | # been exceeded. |
422 | |
423 | def alive? |
424 | !self.socket.nil? |
425 | end |
426 | |
427 | ## |
428 | # Try to connect to the memcached server targeted by this object. |
429 | # Returns the connected socket object on success or nil on failure. |
430 | |
431 | def socket |
432 | @mutex.lock if @multithread |
433 | return @sock if @sock and not @sock.closed? |
434 | |
435 | @sock = nil |
436 | |
437 | # If the host was dead, don't retry for a while. |
438 | return if @retry and @retry > Time.now |
439 | |
440 | # Attempt to connect if not already connected. |
441 | begin |
442 | @sock = timeout CONNECT_TIMEOUT do |
443 | TCPSocket.new @host, @port |
444 | end |
445 | @retry = nil |
446 | @status = 'CONNECTED' |
447 | rescue SocketError, SystemCallError, IOError, Timeout::Error => err |
448 | mark_dead err.message |
449 | end |
450 | |
451 | return @sock |
452 | ensure |
453 | @mutex.unlock if @multithread |
454 | end |
455 | |
456 | ## |
457 | # Close the connection to the memcached server targeted by this |
458 | # object. The server is not considered dead. |
459 | |
460 | def close |
461 | @mutex.lock if @multithread |
462 | @sock.close if @sock && !@sock.closed? |
463 | @sock = nil |
464 | @retry = nil |
465 | @status = "NOT CONNECTED" |
466 | ensure |
467 | @mutex.unlock if @multithread |
468 | end |
469 | |
470 | private |
471 | |
472 | ## |
473 | # Mark the server as dead and close its socket. |
474 | |
475 | def mark_dead(reason = "Unknown error") |
476 | @sock.close if @sock && !@sock.closed? |
477 | @sock = nil |
478 | @retry = Time.now + RETRY_DELAY |
479 | |
480 | @status = sprintf "DEAD: %s, will retry at %s", reason, @retry |
481 | end |
482 | |
483 | end |
484 | |
485 | ## |
486 | # Base MemCache exception class. |
487 | |
488 | class MemCacheError < RuntimeError; end |
489 | |
490 | end |
491 |