Ticket #1392: testgetlatencies01.darcs.patch

File testgetlatencies01.darcs.patch, 22.8 KB (added by arch_o_median, at 2011-04-15T00:03:37Z)
Line 
1Fri Mar 25 14:35:14 MDT 2011  wilcoxjg@gmail.com
2  * storage: new mocking tests of storage server read and write
3  There are already tests of read and functionality in test_storage.py, but those tests let the code under test use a real filesystem whereas these tests mock all file system calls.
4
5Thu Apr 14 16:48:23 MDT 2011  zooko@zooko.com
6  * test_server.py --> test_backends.py:  server.py: added testing of get_latencies in StorageServer
7  This patch test both coverage and handling of small samples in the get_latencies method of StorageServer.  get_latencies now distinguishes between highly repetitive latencies and small sample sizes.  This is of most concern at the big end of the latency distribution, although the ambiguity increases in general as the sample size decreases.
8
9Thu Apr 14 17:15:26 MDT 2011  zooko@zooko.com
10  * test_backends: cleaned whitespace, made test_get_latencies it's own function
11
12New patches:
13
14[storage: new mocking tests of storage server read and write
15wilcoxjg@gmail.com**20110325203514
16 Ignore-this: df65c3c4f061dd1516f88662023fdb41
17 There are already tests of read and functionality in test_storage.py, but those tests let the code under test use a real filesystem whereas these tests mock all file system calls.
18] {
19addfile ./src/allmydata/test/test_server.py
20hunk ./src/allmydata/test/test_server.py 1
21+from twisted.trial import unittest
22+
23+from StringIO import StringIO
24+
25+from allmydata.test.common_util import ReallyEqualMixin
26+
27+import mock
28+
29+# This is the code that we're going to be testing.
30+from allmydata.storage.server import StorageServer
31+
32+# The following share file contents was generated with
33+# storage.immutable.ShareFile from Tahoe-LAFS v1.8.2
34+# with share data == 'a'.
35+share_data = 'a\x00\x00\x00\x00xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\x00(\xde\x80'
36+share_file_data = '\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01' + share_data
37+
38+sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0'
39+
40+class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
41+    @mock.patch('__builtin__.open')
42+    def test_create_server(self, mockopen):
43+        """ This tests whether a server instance can be constructed. """
44+
45+        def call_open(fname, mode):
46+            if fname == 'testdir/bucket_counter.state':
47+                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
48+            elif fname == 'testdir/lease_checker.state':
49+                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
50+            elif fname == 'testdir/lease_checker.history':
51+                return StringIO()
52+        mockopen.side_effect = call_open
53+
54+        # Now begin the test.
55+        s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
56+
57+        # You passed!
58+
59+class TestServer(unittest.TestCase, ReallyEqualMixin):
60+    @mock.patch('__builtin__.open')
61+    def setUp(self, mockopen):
62+        def call_open(fname, mode):
63+            if fname == 'testdir/bucket_counter.state':
64+                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
65+            elif fname == 'testdir/lease_checker.state':
66+                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
67+            elif fname == 'testdir/lease_checker.history':
68+                return StringIO()
69+        mockopen.side_effect = call_open
70+
71+        self.s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
72+
73+
74+    @mock.patch('time.time')
75+    @mock.patch('os.mkdir')
76+    @mock.patch('__builtin__.open')
77+    @mock.patch('os.listdir')
78+    @mock.patch('os.path.isdir')
79+    def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
80+        """Handle a report of corruption."""
81+
82+        def call_listdir(dirname):
83+            self.failUnlessReallyEqual(dirname, 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
84+            raise OSError(2, "No such file or directory: 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a'")
85+
86+        mocklistdir.side_effect = call_listdir
87+
88+        class MockFile:
89+            def __init__(self):
90+                self.buffer = ''
91+                self.pos = 0
92+            def write(self, instring):
93+                begin = self.pos
94+                padlen = begin - len(self.buffer)
95+                if padlen > 0:
96+                    self.buffer += '\x00' * padlen
97+                end = self.pos + len(instring)
98+                self.buffer = self.buffer[:begin]+instring+self.buffer[end:]
99+                self.pos = end
100+            def close(self):
101+                pass
102+            def seek(self, pos):
103+                self.pos = pos
104+            def read(self, numberbytes):
105+                return self.buffer[self.pos:self.pos+numberbytes]
106+            def tell(self):
107+                return self.pos
108+
109+        mocktime.return_value = 0
110+
111+        sharefile = MockFile()
112+        def call_open(fname, mode):
113+            self.failUnlessReallyEqual(fname, 'testdir/shares/incoming/or/orsxg5dtorxxeylhmvpws3temv4a/0' )
114+            return sharefile
115+
116+        mockopen.side_effect = call_open
117+        # Now begin the test.
118+        alreadygot, bs = self.s.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock())
119+        print bs
120+        bs[0].remote_write(0, 'a')
121+        self.failUnlessReallyEqual(sharefile.buffer, share_file_data)
122+
123+
124+    @mock.patch('os.path.exists')
125+    @mock.patch('os.path.getsize')
126+    @mock.patch('__builtin__.open')
127+    @mock.patch('os.listdir')
128+    def test_read_share(self, mocklistdir, mockopen, mockgetsize, mockexists):
129+        """ This tests whether the code correctly finds and reads
130+        shares written out by old (Tahoe-LAFS <= v1.8.2)
131+        servers. There is a similar test in test_download, but that one
132+        is from the perspective of the client and exercises a deeper
133+        stack of code. This one is for exercising just the
134+        StorageServer object. """
135+
136+        def call_listdir(dirname):
137+            self.failUnlessReallyEqual(dirname,'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
138+            return ['0']
139+
140+        mocklistdir.side_effect = call_listdir
141+
142+        def call_open(fname, mode):
143+            self.failUnlessReallyEqual(fname, sharefname)
144+            self.failUnless('r' in mode, mode)
145+            self.failUnless('b' in mode, mode)
146+
147+            return StringIO(share_file_data)
148+        mockopen.side_effect = call_open
149+
150+        datalen = len(share_file_data)
151+        def call_getsize(fname):
152+            self.failUnlessReallyEqual(fname, sharefname)
153+            return datalen
154+        mockgetsize.side_effect = call_getsize
155+
156+        def call_exists(fname):
157+            self.failUnlessReallyEqual(fname, sharefname)
158+            return True
159+        mockexists.side_effect = call_exists
160+
161+        # Now begin the test.
162+        bs = self.s.remote_get_buckets('teststorage_index')
163+
164+        self.failUnlessEqual(len(bs), 1)
165+        b = bs[0]
166+        self.failUnlessReallyEqual(b.remote_read(0, datalen), share_data)
167+        # If you try to read past the end you get the as much data as is there.
168+        self.failUnlessReallyEqual(b.remote_read(0, datalen+20), share_data)
169+        # If you start reading past the end of the file you get the empty string.
170+        self.failUnlessReallyEqual(b.remote_read(datalen+1, 3), '')
171}
172[test_server.py --> test_backends.py:  server.py: added testing of get_latencies in StorageServer
173zooko@zooko.com**20110414224823
174 Ignore-this: 3e266de570f725f768d18c131e2c6d8
175 This patch test both coverage and handling of small samples in the get_latencies method of StorageServer.  get_latencies now distinguishes between highly repetitive latencies and small sample sizes.  This is of most concern at the big end of the latency distribution, although the ambiguity increases in general as the sample size decreases.
176] {
177move ./src/allmydata/test/test_server.py ./src/allmydata/test/test_backends.py
178hunk ./src/allmydata/storage/server.py 134
179             samples = self.latencies[category][:]
180             samples.sort()
181             count = len(samples)
182+            if count < 1000:
183+                output[category] = None
184+                continue
185+            samples.sort()
186+            stats = {}
187             stats["mean"] = sum(samples) / count
188             stats["01_0_percentile"] = samples[int(0.01 * count)]
189             stats["10_0_percentile"] = samples[int(0.1 * count)]
190hunk ./src/allmydata/test/test_backends.py 21
191 sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0'
192 
193 class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
194+    @mock.patch('time.time')
195+    @mock.patch('os.mkdir')
196+    @mock.patch('__builtin__.open')
197+    @mock.patch('os.listdir')
198+    @mock.patch('os.path.isdir')
199+    def test_create_server_null_backend(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
200+        """ This tests whether a server instance can be constructed
201+        with a null backend. The server instance fails the test if it
202+        tries to read or write to the file system. """
203+
204+        # Now begin the test.
205+        s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend(),stats_provider=mock.Mock())
206+
207+        # The null backend should not talk to the os.
208+        self.failIf(mockisdir.called)
209+        self.failIf(mocklistdir.called)
210+        self.failIf(mockopen.called)
211+        self.failIf(mockmkdir.called)
212+        #self.failIf(mocktime.called)
213+       
214+        #  The server's representation should not change.
215+        self.failUnlessReallyEqual(s.__repr__(),'<StorageServer orsxg5do>')
216+
217+        #  There should be no latencies when the backend is null.  *** The "cancel" category is left out to increase coverage.  This seems like a dubious decision.  Must consult with more knowledgeable persons.
218+        numbersamples = 1001
219+        for category in ["allocate","write","close","read","get",\
220+                         "writev","readv","add-lease","renew"]:#,"cancel"]:
221+            [s.add_latency(category,x) for x in numbersamples*[0]]
222+        l = s.get_latencies()
223+
224+        # Now test that get_latencies correctly reports None for small sample-sizes.
225+        s1 = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend(),stats_provider=mock.Mock())
226+        #  There should be no latencies when the backend is null.  *** The "cancel" category is left out to increase coverage.  This seems like a dubious decision.  Must consult with more knowledgeable persons.
227+        numbersamples = 10
228+        for category in ["allocate","write","close","read","get",\
229+                         "writev","readv","add-lease","renew"]:#,"cancel"]:
230+            [s1.add_latency(category,x) for x in numbersamples*[0]]
231+        l1 = s1.get_latencies()
232+
233+        for key in l1.keys():
234+            self.failUnlessReallyEqual(l1[key],None)
235+        # You passed!
236+
237+    @mock.patch('time.time')
238+    @mock.patch('os.mkdir')
239     @mock.patch('__builtin__.open')
240     def test_create_server(self, mockopen):
241         """ This tests whether a server instance can be constructed. """
242}
243[test_backends: cleaned whitespace, made test_get_latencies it's own function
244zooko@zooko.com**20110414231526
245 Ignore-this: 483e7d4eb67cc0cbf780d8219156907e
246] {
247hunk ./src/allmydata/test/test_backends.py 32
248         tries to read or write to the file system. """
249 
250         # Now begin the test.
251-        s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend(),stats_provider=mock.Mock())
252+        s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend(), stats_provider=mock.Mock())
253 
254         # The null backend should not talk to the os.
255         self.failIf(mockisdir.called)
256hunk ./src/allmydata/test/test_backends.py 40
257         self.failIf(mockopen.called)
258         self.failIf(mockmkdir.called)
259         #self.failIf(mocktime.called)
260-       
261+
262         #  The server's representation should not change.
263hunk ./src/allmydata/test/test_backends.py 42
264-        self.failUnlessReallyEqual(s.__repr__(),'<StorageServer orsxg5do>')
265+        self.failUnlessReallyEqual(s.__repr__(), '<StorageServer orsxg5do>')
266+
267+        # You passed!
268+
269+    @mock.patch('time.time')
270+    @mock.patch('os.mkdir')
271+    @mock.patch('__builtin__.open')
272+    @mock.patch('os.listdir')
273+    @mock.patch('os.path.isdir')
274+    def test_get_latencies(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
275+        """  There should be no latencies when the backend is null.  *** The "cancel" category is left out to increase coverage.  This seems like a dubious decision.  Must consult with more knowledgeable persons."""
276+
277+        # Now begin the test.
278+        s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend(), stats_provider=mock.Mock())
279+
280+        # The null backend should not talk to the os.
281+        self.failIf(mockisdir.called)
282+        self.failIf(mocklistdir.called)
283+        self.failIf(mockopen.called)
284+        self.failIf(mockmkdir.called)
285+        #self.failIf(mocktime.called)
286 
287hunk ./src/allmydata/test/test_backends.py 64
288-        #  There should be no latencies when the backend is null.  *** The "cancel" category is left out to increase coverage.  This seems like a dubious decision.  Must consult with more knowledgeable persons.
289         numbersamples = 1001
290hunk ./src/allmydata/test/test_backends.py 65
291-        for category in ["allocate","write","close","read","get",\
292-                         "writev","readv","add-lease","renew"]:#,"cancel"]:
293-            [s.add_latency(category,x) for x in numbersamples*[0]]
294+        for category in ["allocate", "write", "close", "read", "get", \
295+                         "writev", "readv", "add-lease", "renew"]:#,"cancel"]:
296+            [s.add_latency(category, x) for x in numbersamples*[0]]
297         l = s.get_latencies()
298 
299         # Now test that get_latencies correctly reports None for small sample-sizes.
300hunk ./src/allmydata/test/test_backends.py 71
301-        s1 = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend(),stats_provider=mock.Mock())
302+        s1 = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend(), stats_provider=mock.Mock())
303         #  There should be no latencies when the backend is null.  *** The "cancel" category is left out to increase coverage.  This seems like a dubious decision.  Must consult with more knowledgeable persons.
304         numbersamples = 10
305hunk ./src/allmydata/test/test_backends.py 74
306-        for category in ["allocate","write","close","read","get",\
307-                         "writev","readv","add-lease","renew"]:#,"cancel"]:
308-            [s1.add_latency(category,x) for x in numbersamples*[0]]
309+        for category in ["allocate", "write", "close", "read", "get", \
310+                         "writev", "readv", "add-lease", "renew"]:#, "cancel"]:
311+            [s1.add_latency(category, x) for x in numbersamples*[0]]
312         l1 = s1.get_latencies()
313 
314         for key in l1.keys():
315hunk ./src/allmydata/test/test_backends.py 80
316-            self.failUnlessReallyEqual(l1[key],None)
317+            self.failUnlessReallyEqual(l1[key], None)
318         # You passed!
319 
320     @mock.patch('time.time')
321hunk ./src/allmydata/test/test_backends.py 167
322         bs[0].remote_write(0, 'a')
323         self.failUnlessReallyEqual(sharefile.buffer, share_file_data)
324 
325-
326     @mock.patch('os.path.exists')
327     @mock.patch('os.path.getsize')
328     @mock.patch('__builtin__.open')
329}
330
331Context:
332
333[Fix a test failure in test_package_initialization on Python 2.4.x due to exceptions being stringified differently than in later versions of Python. refs #1389
334david-sarah@jacaranda.org**20110411190738
335 Ignore-this: 7847d26bc117c328c679f08a7baee519
336]
337[tests: add test for including the ImportError message and traceback entry in the summary of errors from importing dependencies. refs #1389
338david-sarah@jacaranda.org**20110410155844
339 Ignore-this: fbecdbeb0d06a0f875fe8d4030aabafa
340]
341[allmydata/__init__.py: preserve the message and last traceback entry (file, line number, function, and source line) of ImportErrors in the package versions string. fixes #1389
342david-sarah@jacaranda.org**20110410155705
343 Ignore-this: 2f87b8b327906cf8bfca9440a0904900
344]
345[remove unused variable detected by pyflakes
346zooko@zooko.com**20110407172231
347 Ignore-this: 7344652d5e0720af822070d91f03daf9
348]
349[allmydata/__init__.py: Nicer reporting of unparseable version numbers in dependencies. fixes #1388
350david-sarah@jacaranda.org**20110401202750
351 Ignore-this: 9c6bd599259d2405e1caadbb3e0d8c7f
352]
353[update FTP-and-SFTP.rst: the necessary patch is included in Twisted-10.1
354Brian Warner <warner@lothar.com>**20110325232511
355 Ignore-this: d5307faa6900f143193bfbe14e0f01a
356]
357[control.py: remove all uses of s.get_serverid()
358warner@lothar.com**20110227011203
359 Ignore-this: f80a787953bd7fa3d40e828bde00e855
360]
361[web: remove some uses of s.get_serverid(), not all
362warner@lothar.com**20110227011159
363 Ignore-this: a9347d9cf6436537a47edc6efde9f8be
364]
365[immutable/downloader/fetcher.py: remove all get_serverid() calls
366warner@lothar.com**20110227011156
367 Ignore-this: fb5ef018ade1749348b546ec24f7f09a
368]
369[immutable/downloader/fetcher.py: fix diversity bug in server-response handling
370warner@lothar.com**20110227011153
371 Ignore-this: bcd62232c9159371ae8a16ff63d22c1b
372 
373 When blocks terminate (either COMPLETE or CORRUPT/DEAD/BADSEGNUM), the
374 _shares_from_server dict was being popped incorrectly (using shnum as the
375 index instead of serverid). I'm still thinking through the consequences of
376 this bug. It was probably benign and really hard to detect. I think it would
377 cause us to incorrectly believe that we're pulling too many shares from a
378 server, and thus prefer a different server rather than asking for a second
379 share from the first server. The diversity code is intended to spread out the
380 number of shares simultaneously being requested from each server, but with
381 this bug, it might be spreading out the total number of shares requested at
382 all, not just simultaneously. (note that SegmentFetcher is scoped to a single
383 segment, so the effect doesn't last very long).
384]
385[immutable/downloader/share.py: reduce get_serverid(), one left, update ext deps
386warner@lothar.com**20110227011150
387 Ignore-this: d8d56dd8e7b280792b40105e13664554
388 
389 test_download.py: create+check MyShare instances better, make sure they share
390 Server objects, now that finder.py cares
391]
392[immutable/downloader/finder.py: reduce use of get_serverid(), one left
393warner@lothar.com**20110227011146
394 Ignore-this: 5785be173b491ae8a78faf5142892020
395]
396[immutable/offloaded.py: reduce use of get_serverid() a bit more
397warner@lothar.com**20110227011142
398 Ignore-this: b48acc1b2ae1b311da7f3ba4ffba38f
399]
400[immutable/upload.py: reduce use of get_serverid()
401warner@lothar.com**20110227011138
402 Ignore-this: ffdd7ff32bca890782119a6e9f1495f6
403]
404[immutable/checker.py: remove some uses of s.get_serverid(), not all
405warner@lothar.com**20110227011134
406 Ignore-this: e480a37efa9e94e8016d826c492f626e
407]
408[add remaining get_* methods to storage_client.Server, NoNetworkServer, and
409warner@lothar.com**20110227011132
410 Ignore-this: 6078279ddf42b179996a4b53bee8c421
411 MockIServer stubs
412]
413[upload.py: rearrange _make_trackers a bit, no behavior changes
414warner@lothar.com**20110227011128
415 Ignore-this: 296d4819e2af452b107177aef6ebb40f
416]
417[happinessutil.py: finally rename merge_peers to merge_servers
418warner@lothar.com**20110227011124
419 Ignore-this: c8cd381fea1dd888899cb71e4f86de6e
420]
421[test_upload.py: factor out FakeServerTracker
422warner@lothar.com**20110227011120
423 Ignore-this: 6c182cba90e908221099472cc159325b
424]
425[test_upload.py: server-vs-tracker cleanup
426warner@lothar.com**20110227011115
427 Ignore-this: 2915133be1a3ba456e8603885437e03
428]
429[happinessutil.py: server-vs-tracker cleanup
430warner@lothar.com**20110227011111
431 Ignore-this: b856c84033562d7d718cae7cb01085a9
432]
433[upload.py: more tracker-vs-server cleanup
434warner@lothar.com**20110227011107
435 Ignore-this: bb75ed2afef55e47c085b35def2de315
436]
437[upload.py: fix var names to avoid confusion between 'trackers' and 'servers'
438warner@lothar.com**20110227011103
439 Ignore-this: 5d5e3415b7d2732d92f42413c25d205d
440]
441[refactor: s/peer/server/ in immutable/upload, happinessutil.py, test_upload
442warner@lothar.com**20110227011100
443 Ignore-this: 7ea858755cbe5896ac212a925840fe68
444 
445 No behavioral changes, just updating variable/method names and log messages.
446 The effects outside these three files should be minimal: some exception
447 messages changed (to say "server" instead of "peer"), and some internal class
448 names were changed. A few things still use "peer" to minimize external
449 changes, like UploadResults.timings["peer_selection"] and
450 happinessutil.merge_peers, which can be changed later.
451]
452[storage_client.py: clean up test_add_server/test_add_descriptor, remove .test_servers
453warner@lothar.com**20110227011056
454 Ignore-this: efad933e78179d3d5fdcd6d1ef2b19cc
455]
456[test_client.py, upload.py:: remove KiB/MiB/etc constants, and other dead code
457warner@lothar.com**20110227011051
458 Ignore-this: dc83c5794c2afc4f81e592f689c0dc2d
459]
460[test: increase timeout on a network test because Francois's ARM machine hit that timeout
461zooko@zooko.com**20110317165909
462 Ignore-this: 380c345cdcbd196268ca5b65664ac85b
463 I'm skeptical that the test was proceeding correctly but ran out of time. It seems more likely that it had gotten hung. But if we raise the timeout to an even more extravagant number then we can be even more certain that the test was never going to finish.
464]
465[docs/configuration.rst: add a "Frontend Configuration" section
466Brian Warner <warner@lothar.com>**20110222014323
467 Ignore-this: 657018aa501fe4f0efef9851628444ca
468 
469 this points to docs/frontends/*.rst, which were previously underlinked
470]
471[web/filenode.py: avoid calling req.finish() on closed HTTP connections. Closes #1366
472"Brian Warner <warner@lothar.com>"**20110221061544
473 Ignore-this: 799d4de19933f2309b3c0c19a63bb888
474]
475[Add unit tests for cross_check_pkg_resources_versus_import, and a regression test for ref #1355. This requires a little refactoring to make it testable.
476david-sarah@jacaranda.org**20110221015817
477 Ignore-this: 51d181698f8c20d3aca58b057e9c475a
478]
479[allmydata/__init__.py: .name was used in place of the correct .__name__ when printing an exception. Also, robustify string formatting by using %r instead of %s in some places. fixes #1355.
480david-sarah@jacaranda.org**20110221020125
481 Ignore-this: b0744ed58f161bf188e037bad077fc48
482]
483[Refactor StorageFarmBroker handling of servers
484Brian Warner <warner@lothar.com>**20110221015804
485 Ignore-this: 842144ed92f5717699b8f580eab32a51
486 
487 Pass around IServer instance instead of (peerid, rref) tuple. Replace
488 "descriptor" with "server". Other replacements:
489 
490  get_all_servers -> get_connected_servers/get_known_servers
491  get_servers_for_index -> get_servers_for_psi (now returns IServers)
492 
493 This change still needs to be pushed further down: lots of code is now
494 getting the IServer and then distributing (peerid, rref) internally.
495 Instead, it ought to distribute the IServer internally and delay
496 extracting a serverid or rref until the last moment.
497 
498 no_network.py was updated to retain parallelism.
499]
500[TAG allmydata-tahoe-1.8.2
501warner@lothar.com**20110131020101]
502Patch bundle hash:
50348f5cedd889bc9d05be6343375b5908a8c42d000