Ticket #999: checkpoint5.darcs.patch

File checkpoint5.darcs.patch, 121.7 KB (added by arch_o_median, at 2011-07-05T04:29:25Z)

more precise tests in TestServerFSBackend

Line 
1Fri Mar 25 14:35:14 MDT 2011  wilcoxjg@gmail.com
2  * storage: new mocking tests of storage server read and write
3  There are already tests of read and functionality in test_storage.py, but those tests let the code under test use a real filesystem whereas these tests mock all file system calls.
4
5Fri Jun 24 14:28:50 MDT 2011  wilcoxjg@gmail.com
6  * server.py, test_backends.py, interfaces.py, immutable.py (others?): working patch for implementation of backends plugin
7  sloppy not for production
8
9Sat Jun 25 23:27:32 MDT 2011  wilcoxjg@gmail.com
10  * a temp patch used as a snapshot
11
12Sat Jun 25 23:32:44 MDT 2011  wilcoxjg@gmail.com
13  * snapshot of progress on backend implementation (not suitable for trunk)
14
15Sun Jun 26 10:57:15 MDT 2011  wilcoxjg@gmail.com
16  * checkpoint patch
17
18Tue Jun 28 14:22:02 MDT 2011  wilcoxjg@gmail.com
19  * checkpoint4
20
21Mon Jul  4 21:46:26 MDT 2011  wilcoxjg@gmail.com
22  * checkpoint5
23
24New patches:
25
26[storage: new mocking tests of storage server read and write
27wilcoxjg@gmail.com**20110325203514
28 Ignore-this: df65c3c4f061dd1516f88662023fdb41
29 There are already tests of read and functionality in test_storage.py, but those tests let the code under test use a real filesystem whereas these tests mock all file system calls.
30] {
31addfile ./src/allmydata/test/test_server.py
32hunk ./src/allmydata/test/test_server.py 1
33+from twisted.trial import unittest
34+
35+from StringIO import StringIO
36+
37+from allmydata.test.common_util import ReallyEqualMixin
38+
39+import mock
40+
41+# This is the code that we're going to be testing.
42+from allmydata.storage.server import StorageServer
43+
44+# The following share file contents was generated with
45+# storage.immutable.ShareFile from Tahoe-LAFS v1.8.2
46+# with share data == 'a'.
47+share_data = 'a\x00\x00\x00\x00xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\x00(\xde\x80'
48+share_file_data = '\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01' + share_data
49+
50+sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0'
51+
52+class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
53+    @mock.patch('__builtin__.open')
54+    def test_create_server(self, mockopen):
55+        """ This tests whether a server instance can be constructed. """
56+
57+        def call_open(fname, mode):
58+            if fname == 'testdir/bucket_counter.state':
59+                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
60+            elif fname == 'testdir/lease_checker.state':
61+                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
62+            elif fname == 'testdir/lease_checker.history':
63+                return StringIO()
64+        mockopen.side_effect = call_open
65+
66+        # Now begin the test.
67+        s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
68+
69+        # You passed!
70+
71+class TestServer(unittest.TestCase, ReallyEqualMixin):
72+    @mock.patch('__builtin__.open')
73+    def setUp(self, mockopen):
74+        def call_open(fname, mode):
75+            if fname == 'testdir/bucket_counter.state':
76+                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
77+            elif fname == 'testdir/lease_checker.state':
78+                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
79+            elif fname == 'testdir/lease_checker.history':
80+                return StringIO()
81+        mockopen.side_effect = call_open
82+
83+        self.s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
84+
85+
86+    @mock.patch('time.time')
87+    @mock.patch('os.mkdir')
88+    @mock.patch('__builtin__.open')
89+    @mock.patch('os.listdir')
90+    @mock.patch('os.path.isdir')
91+    def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
92+        """Handle a report of corruption."""
93+
94+        def call_listdir(dirname):
95+            self.failUnlessReallyEqual(dirname, 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
96+            raise OSError(2, "No such file or directory: 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a'")
97+
98+        mocklistdir.side_effect = call_listdir
99+
100+        class MockFile:
101+            def __init__(self):
102+                self.buffer = ''
103+                self.pos = 0
104+            def write(self, instring):
105+                begin = self.pos
106+                padlen = begin - len(self.buffer)
107+                if padlen > 0:
108+                    self.buffer += '\x00' * padlen
109+                end = self.pos + len(instring)
110+                self.buffer = self.buffer[:begin]+instring+self.buffer[end:]
111+                self.pos = end
112+            def close(self):
113+                pass
114+            def seek(self, pos):
115+                self.pos = pos
116+            def read(self, numberbytes):
117+                return self.buffer[self.pos:self.pos+numberbytes]
118+            def tell(self):
119+                return self.pos
120+
121+        mocktime.return_value = 0
122+
123+        sharefile = MockFile()
124+        def call_open(fname, mode):
125+            self.failUnlessReallyEqual(fname, 'testdir/shares/incoming/or/orsxg5dtorxxeylhmvpws3temv4a/0' )
126+            return sharefile
127+
128+        mockopen.side_effect = call_open
129+        # Now begin the test.
130+        alreadygot, bs = self.s.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock())
131+        print bs
132+        bs[0].remote_write(0, 'a')
133+        self.failUnlessReallyEqual(sharefile.buffer, share_file_data)
134+
135+
136+    @mock.patch('os.path.exists')
137+    @mock.patch('os.path.getsize')
138+    @mock.patch('__builtin__.open')
139+    @mock.patch('os.listdir')
140+    def test_read_share(self, mocklistdir, mockopen, mockgetsize, mockexists):
141+        """ This tests whether the code correctly finds and reads
142+        shares written out by old (Tahoe-LAFS <= v1.8.2)
143+        servers. There is a similar test in test_download, but that one
144+        is from the perspective of the client and exercises a deeper
145+        stack of code. This one is for exercising just the
146+        StorageServer object. """
147+
148+        def call_listdir(dirname):
149+            self.failUnlessReallyEqual(dirname,'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
150+            return ['0']
151+
152+        mocklistdir.side_effect = call_listdir
153+
154+        def call_open(fname, mode):
155+            self.failUnlessReallyEqual(fname, sharefname)
156+            self.failUnless('r' in mode, mode)
157+            self.failUnless('b' in mode, mode)
158+
159+            return StringIO(share_file_data)
160+        mockopen.side_effect = call_open
161+
162+        datalen = len(share_file_data)
163+        def call_getsize(fname):
164+            self.failUnlessReallyEqual(fname, sharefname)
165+            return datalen
166+        mockgetsize.side_effect = call_getsize
167+
168+        def call_exists(fname):
169+            self.failUnlessReallyEqual(fname, sharefname)
170+            return True
171+        mockexists.side_effect = call_exists
172+
173+        # Now begin the test.
174+        bs = self.s.remote_get_buckets('teststorage_index')
175+
176+        self.failUnlessEqual(len(bs), 1)
177+        b = bs[0]
178+        self.failUnlessReallyEqual(b.remote_read(0, datalen), share_data)
179+        # If you try to read past the end you get the as much data as is there.
180+        self.failUnlessReallyEqual(b.remote_read(0, datalen+20), share_data)
181+        # If you start reading past the end of the file you get the empty string.
182+        self.failUnlessReallyEqual(b.remote_read(datalen+1, 3), '')
183}
184[server.py, test_backends.py, interfaces.py, immutable.py (others?): working patch for implementation of backends plugin
185wilcoxjg@gmail.com**20110624202850
186 Ignore-this: ca6f34987ee3b0d25cac17c1fc22d50c
187 sloppy not for production
188] {
189move ./src/allmydata/test/test_server.py ./src/allmydata/test/test_backends.py
190hunk ./src/allmydata/storage/crawler.py 13
191     pass
192 
193 class ShareCrawler(service.MultiService):
194-    """A ShareCrawler subclass is attached to a StorageServer, and
195+    """A subcless of ShareCrawler is attached to a StorageServer, and
196     periodically walks all of its shares, processing each one in some
197     fashion. This crawl is rate-limited, to reduce the IO burden on the host,
198     since large servers can easily have a terabyte of shares, in several
199hunk ./src/allmydata/storage/crawler.py 31
200     We assume that the normal upload/download/get_buckets traffic of a tahoe
201     grid will cause the prefixdir contents to be mostly cached in the kernel,
202     or that the number of buckets in each prefixdir will be small enough to
203-    load quickly. A 1TB allmydata.com server was measured to have 2.56M
204+    load quickly. A 1TB allmydata.com server was measured to have 2.56 * 10^6
205     buckets, spread into the 1024 prefixdirs, with about 2500 buckets per
206     prefix. On this server, each prefixdir took 130ms-200ms to list the first
207     time, and 17ms to list the second time.
208hunk ./src/allmydata/storage/crawler.py 68
209     cpu_slice = 1.0 # use up to 1.0 seconds before yielding
210     minimum_cycle_time = 300 # don't run a cycle faster than this
211 
212-    def __init__(self, server, statefile, allowed_cpu_percentage=None):
213+    def __init__(self, backend, statefile, allowed_cpu_percentage=None):
214         service.MultiService.__init__(self)
215         if allowed_cpu_percentage is not None:
216             self.allowed_cpu_percentage = allowed_cpu_percentage
217hunk ./src/allmydata/storage/crawler.py 72
218-        self.server = server
219-        self.sharedir = server.sharedir
220-        self.statefile = statefile
221+        self.backend = backend
222         self.prefixes = [si_b2a(struct.pack(">H", i << (16-10)))[:2]
223                          for i in range(2**10)]
224         self.prefixes.sort()
225hunk ./src/allmydata/storage/crawler.py 446
226 
227     minimum_cycle_time = 60*60 # we don't need this more than once an hour
228 
229-    def __init__(self, server, statefile, num_sample_prefixes=1):
230-        ShareCrawler.__init__(self, server, statefile)
231+    def __init__(self, statefile, num_sample_prefixes=1):
232+        ShareCrawler.__init__(self, statefile)
233         self.num_sample_prefixes = num_sample_prefixes
234 
235     def add_initial_state(self):
236hunk ./src/allmydata/storage/expirer.py 15
237     removed.
238 
239     I collect statistics on the leases and make these available to a web
240-    status page, including::
241+    status page, including:
242 
243     Space recovered during this cycle-so-far:
244      actual (only if expiration_enabled=True):
245hunk ./src/allmydata/storage/expirer.py 51
246     slow_start = 360 # wait 6 minutes after startup
247     minimum_cycle_time = 12*60*60 # not more than twice per day
248 
249-    def __init__(self, server, statefile, historyfile,
250+    def __init__(self, statefile, historyfile,
251                  expiration_enabled, mode,
252                  override_lease_duration, # used if expiration_mode=="age"
253                  cutoff_date, # used if expiration_mode=="cutoff-date"
254hunk ./src/allmydata/storage/expirer.py 71
255         else:
256             raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % mode)
257         self.sharetypes_to_expire = sharetypes
258-        ShareCrawler.__init__(self, server, statefile)
259+        ShareCrawler.__init__(self, statefile)
260 
261     def add_initial_state(self):
262         # we fill ["cycle-to-date"] here (even though they will be reset in
263hunk ./src/allmydata/storage/immutable.py 44
264     sharetype = "immutable"
265 
266     def __init__(self, filename, max_size=None, create=False):
267-        """ If max_size is not None then I won't allow more than max_size to be written to me. If create=True and max_size must not be None. """
268+        """ If max_size is not None then I won't allow more than
269+        max_size to be written to me. If create=True then max_size
270+        must not be None. """
271         precondition((max_size is not None) or (not create), max_size, create)
272         self.home = filename
273         self._max_size = max_size
274hunk ./src/allmydata/storage/immutable.py 87
275 
276     def read_share_data(self, offset, length):
277         precondition(offset >= 0)
278-        # reads beyond the end of the data are truncated. Reads that start
279-        # beyond the end of the data return an empty string. I wonder why
280-        # Python doesn't do the following computation for me?
281+        # Reads beyond the end of the data are truncated. Reads that start
282+        # beyond the end of the data return an empty string.
283         seekpos = self._data_offset+offset
284         fsize = os.path.getsize(self.home)
285         actuallength = max(0, min(length, fsize-seekpos))
286hunk ./src/allmydata/storage/immutable.py 198
287             space_freed += os.stat(self.home)[stat.ST_SIZE]
288             self.unlink()
289         return space_freed
290+class NullBucketWriter(Referenceable):
291+    implements(RIBucketWriter)
292 
293hunk ./src/allmydata/storage/immutable.py 201
294+    def remote_write(self, offset, data):
295+        return
296 
297 class BucketWriter(Referenceable):
298     implements(RIBucketWriter)
299hunk ./src/allmydata/storage/server.py 7
300 from twisted.application import service
301 
302 from zope.interface import implements
303-from allmydata.interfaces import RIStorageServer, IStatsProducer
304+from allmydata.interfaces import RIStorageServer, IStatsProducer, IShareStore
305 from allmydata.util import fileutil, idlib, log, time_format
306 import allmydata # for __full_version__
307 
308hunk ./src/allmydata/storage/server.py 16
309 from allmydata.storage.lease import LeaseInfo
310 from allmydata.storage.mutable import MutableShareFile, EmptyShare, \
311      create_mutable_sharefile
312-from allmydata.storage.immutable import ShareFile, BucketWriter, BucketReader
313+from allmydata.storage.immutable import ShareFile, NullBucketWriter, BucketWriter, BucketReader
314 from allmydata.storage.crawler import BucketCountingCrawler
315 from allmydata.storage.expirer import LeaseCheckingCrawler
316 
317hunk ./src/allmydata/storage/server.py 20
318+from zope.interface import implements
319+
320+# A Backend is a MultiService so that its server's crawlers (if the server has any) can
321+# be started and stopped.
322+class Backend(service.MultiService):
323+    implements(IStatsProducer)
324+    def __init__(self):
325+        service.MultiService.__init__(self)
326+
327+    def get_bucket_shares(self):
328+        """XXX"""
329+        raise NotImplementedError
330+
331+    def get_share(self):
332+        """XXX"""
333+        raise NotImplementedError
334+
335+    def make_bucket_writer(self):
336+        """XXX"""
337+        raise NotImplementedError
338+
339+class NullBackend(Backend):
340+    def __init__(self):
341+        Backend.__init__(self)
342+
343+    def get_available_space(self):
344+        return None
345+
346+    def get_bucket_shares(self, storage_index):
347+        return set()
348+
349+    def get_share(self, storage_index, sharenum):
350+        return None
351+
352+    def make_bucket_writer(self, storage_index, shnum, max_space_per_bucket, lease_info, canary):
353+        return NullBucketWriter()
354+
355+class FSBackend(Backend):
356+    def __init__(self, storedir, readonly=False, reserved_space=0):
357+        Backend.__init__(self)
358+
359+        self._setup_storage(storedir, readonly, reserved_space)
360+        self._setup_corruption_advisory()
361+        self._setup_bucket_counter()
362+        self._setup_lease_checkerf()
363+
364+    def _setup_storage(self, storedir, readonly, reserved_space):
365+        self.storedir = storedir
366+        self.readonly = readonly
367+        self.reserved_space = int(reserved_space)
368+        if self.reserved_space:
369+            if self.get_available_space() is None:
370+                log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored",
371+                        umid="0wZ27w", level=log.UNUSUAL)
372+
373+        self.sharedir = os.path.join(self.storedir, "shares")
374+        fileutil.make_dirs(self.sharedir)
375+        self.incomingdir = os.path.join(self.sharedir, 'incoming')
376+        self._clean_incomplete()
377+
378+    def _clean_incomplete(self):
379+        fileutil.rm_dir(self.incomingdir)
380+        fileutil.make_dirs(self.incomingdir)
381+
382+    def _setup_corruption_advisory(self):
383+        # we don't actually create the corruption-advisory dir until necessary
384+        self.corruption_advisory_dir = os.path.join(self.storedir,
385+                                                    "corruption-advisories")
386+
387+    def _setup_bucket_counter(self):
388+        statefile = os.path.join(self.storedir, "bucket_counter.state")
389+        self.bucket_counter = BucketCountingCrawler(statefile)
390+        self.bucket_counter.setServiceParent(self)
391+
392+    def _setup_lease_checkerf(self):
393+        statefile = os.path.join(self.storedir, "lease_checker.state")
394+        historyfile = os.path.join(self.storedir, "lease_checker.history")
395+        self.lease_checker = LeaseCheckingCrawler(statefile, historyfile,
396+                                   expiration_enabled, expiration_mode,
397+                                   expiration_override_lease_duration,
398+                                   expiration_cutoff_date,
399+                                   expiration_sharetypes)
400+        self.lease_checker.setServiceParent(self)
401+
402+    def get_available_space(self):
403+        if self.readonly:
404+            return 0
405+        return fileutil.get_available_space(self.storedir, self.reserved_space)
406+
407+    def get_bucket_shares(self, storage_index):
408+        """Return a list of (shnum, pathname) tuples for files that hold
409+        shares for this storage_index. In each tuple, 'shnum' will always be
410+        the integer form of the last component of 'pathname'."""
411+        storagedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index))
412+        try:
413+            for f in os.listdir(storagedir):
414+                if NUM_RE.match(f):
415+                    filename = os.path.join(storagedir, f)
416+                    yield (int(f), filename)
417+        except OSError:
418+            # Commonly caused by there being no buckets at all.
419+            pass
420+
421 # storage/
422 # storage/shares/incoming
423 #   incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will
424hunk ./src/allmydata/storage/server.py 143
425     name = 'storage'
426     LeaseCheckerClass = LeaseCheckingCrawler
427 
428-    def __init__(self, storedir, nodeid, reserved_space=0,
429-                 discard_storage=False, readonly_storage=False,
430+    def __init__(self, nodeid, backend, reserved_space=0,
431+                 readonly_storage=False,
432                  stats_provider=None,
433                  expiration_enabled=False,
434                  expiration_mode="age",
435hunk ./src/allmydata/storage/server.py 155
436         assert isinstance(nodeid, str)
437         assert len(nodeid) == 20
438         self.my_nodeid = nodeid
439-        self.storedir = storedir
440-        sharedir = os.path.join(storedir, "shares")
441-        fileutil.make_dirs(sharedir)
442-        self.sharedir = sharedir
443-        # we don't actually create the corruption-advisory dir until necessary
444-        self.corruption_advisory_dir = os.path.join(storedir,
445-                                                    "corruption-advisories")
446-        self.reserved_space = int(reserved_space)
447-        self.no_storage = discard_storage
448-        self.readonly_storage = readonly_storage
449         self.stats_provider = stats_provider
450         if self.stats_provider:
451             self.stats_provider.register_producer(self)
452hunk ./src/allmydata/storage/server.py 158
453-        self.incomingdir = os.path.join(sharedir, 'incoming')
454-        self._clean_incomplete()
455-        fileutil.make_dirs(self.incomingdir)
456         self._active_writers = weakref.WeakKeyDictionary()
457hunk ./src/allmydata/storage/server.py 159
458+        self.backend = backend
459+        self.backend.setServiceParent(self)
460         log.msg("StorageServer created", facility="tahoe.storage")
461 
462hunk ./src/allmydata/storage/server.py 163
463-        if reserved_space:
464-            if self.get_available_space() is None:
465-                log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored",
466-                        umin="0wZ27w", level=log.UNUSUAL)
467-
468         self.latencies = {"allocate": [], # immutable
469                           "write": [],
470                           "close": [],
471hunk ./src/allmydata/storage/server.py 174
472                           "renew": [],
473                           "cancel": [],
474                           }
475-        self.add_bucket_counter()
476-
477-        statefile = os.path.join(self.storedir, "lease_checker.state")
478-        historyfile = os.path.join(self.storedir, "lease_checker.history")
479-        klass = self.LeaseCheckerClass
480-        self.lease_checker = klass(self, statefile, historyfile,
481-                                   expiration_enabled, expiration_mode,
482-                                   expiration_override_lease_duration,
483-                                   expiration_cutoff_date,
484-                                   expiration_sharetypes)
485-        self.lease_checker.setServiceParent(self)
486 
487     def __repr__(self):
488         return "<StorageServer %s>" % (idlib.shortnodeid_b2a(self.my_nodeid),)
489hunk ./src/allmydata/storage/server.py 178
490 
491-    def add_bucket_counter(self):
492-        statefile = os.path.join(self.storedir, "bucket_counter.state")
493-        self.bucket_counter = BucketCountingCrawler(self, statefile)
494-        self.bucket_counter.setServiceParent(self)
495-
496     def count(self, name, delta=1):
497         if self.stats_provider:
498             self.stats_provider.count("storage_server." + name, delta)
499hunk ./src/allmydata/storage/server.py 233
500             kwargs["facility"] = "tahoe.storage"
501         return log.msg(*args, **kwargs)
502 
503-    def _clean_incomplete(self):
504-        fileutil.rm_dir(self.incomingdir)
505-
506     def get_stats(self):
507         # remember: RIStatsProvider requires that our return dict
508         # contains numeric values.
509hunk ./src/allmydata/storage/server.py 269
510             stats['storage_server.total_bucket_count'] = bucket_count
511         return stats
512 
513-    def get_available_space(self):
514-        """Returns available space for share storage in bytes, or None if no
515-        API to get this information is available."""
516-
517-        if self.readonly_storage:
518-            return 0
519-        return fileutil.get_available_space(self.storedir, self.reserved_space)
520-
521     def allocated_size(self):
522         space = 0
523         for bw in self._active_writers:
524hunk ./src/allmydata/storage/server.py 276
525         return space
526 
527     def remote_get_version(self):
528-        remaining_space = self.get_available_space()
529+        remaining_space = self.backend.get_available_space()
530         if remaining_space is None:
531             # We're on a platform that has no API to get disk stats.
532             remaining_space = 2**64
533hunk ./src/allmydata/storage/server.py 301
534         self.count("allocate")
535         alreadygot = set()
536         bucketwriters = {} # k: shnum, v: BucketWriter
537-        si_dir = storage_index_to_dir(storage_index)
538-        si_s = si_b2a(storage_index)
539 
540hunk ./src/allmydata/storage/server.py 302
541+        si_s = si_b2a(storage_index)
542         log.msg("storage: allocate_buckets %s" % si_s)
543 
544         # in this implementation, the lease information (including secrets)
545hunk ./src/allmydata/storage/server.py 316
546 
547         max_space_per_bucket = allocated_size
548 
549-        remaining_space = self.get_available_space()
550+        remaining_space = self.backend.get_available_space()
551         limited = remaining_space is not None
552         if limited:
553             # this is a bit conservative, since some of this allocated_size()
554hunk ./src/allmydata/storage/server.py 329
555         # they asked about: this will save them a lot of work. Add or update
556         # leases for all of them: if they want us to hold shares for this
557         # file, they'll want us to hold leases for this file.
558-        for (shnum, fn) in self._get_bucket_shares(storage_index):
559+        for (shnum, fn) in self.backend.get_bucket_shares(storage_index):
560             alreadygot.add(shnum)
561             sf = ShareFile(fn)
562             sf.add_or_renew_lease(lease_info)
563hunk ./src/allmydata/storage/server.py 335
564 
565         for shnum in sharenums:
566-            incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum)
567-            finalhome = os.path.join(self.sharedir, si_dir, "%d" % shnum)
568-            if os.path.exists(finalhome):
569+            share = self.backend.get_share(storage_index, shnum)
570+
571+            if not share:
572+                if (not limited) or (remaining_space >= max_space_per_bucket):
573+                    # ok! we need to create the new share file.
574+                    bw = self.backend.make_bucket_writer(storage_index, shnum,
575+                                      max_space_per_bucket, lease_info, canary)
576+                    bucketwriters[shnum] = bw
577+                    self._active_writers[bw] = 1
578+                    if limited:
579+                        remaining_space -= max_space_per_bucket
580+                else:
581+                    # bummer! not enough space to accept this bucket
582+                    pass
583+
584+            elif share.is_complete():
585                 # great! we already have it. easy.
586                 pass
587hunk ./src/allmydata/storage/server.py 353
588-            elif os.path.exists(incominghome):
589+            elif not share.is_complete():
590                 # Note that we don't create BucketWriters for shnums that
591                 # have a partial share (in incoming/), so if a second upload
592                 # occurs while the first is still in progress, the second
593hunk ./src/allmydata/storage/server.py 359
594                 # uploader will use different storage servers.
595                 pass
596-            elif (not limited) or (remaining_space >= max_space_per_bucket):
597-                # ok! we need to create the new share file.
598-                bw = BucketWriter(self, incominghome, finalhome,
599-                                  max_space_per_bucket, lease_info, canary)
600-                if self.no_storage:
601-                    bw.throw_out_all_data = True
602-                bucketwriters[shnum] = bw
603-                self._active_writers[bw] = 1
604-                if limited:
605-                    remaining_space -= max_space_per_bucket
606-            else:
607-                # bummer! not enough space to accept this bucket
608-                pass
609-
610-        if bucketwriters:
611-            fileutil.make_dirs(os.path.join(self.sharedir, si_dir))
612 
613         self.add_latency("allocate", time.time() - start)
614         return alreadygot, bucketwriters
615hunk ./src/allmydata/storage/server.py 437
616             self.stats_provider.count('storage_server.bytes_added', consumed_size)
617         del self._active_writers[bw]
618 
619-    def _get_bucket_shares(self, storage_index):
620-        """Return a list of (shnum, pathname) tuples for files that hold
621-        shares for this storage_index. In each tuple, 'shnum' will always be
622-        the integer form of the last component of 'pathname'."""
623-        storagedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index))
624-        try:
625-            for f in os.listdir(storagedir):
626-                if NUM_RE.match(f):
627-                    filename = os.path.join(storagedir, f)
628-                    yield (int(f), filename)
629-        except OSError:
630-            # Commonly caused by there being no buckets at all.
631-            pass
632 
633     def remote_get_buckets(self, storage_index):
634         start = time.time()
635hunk ./src/allmydata/storage/server.py 444
636         si_s = si_b2a(storage_index)
637         log.msg("storage: get_buckets %s" % si_s)
638         bucketreaders = {} # k: sharenum, v: BucketReader
639-        for shnum, filename in self._get_bucket_shares(storage_index):
640+        for shnum, filename in self.backend.get_bucket_shares(storage_index):
641             bucketreaders[shnum] = BucketReader(self, filename,
642                                                 storage_index, shnum)
643         self.add_latency("get", time.time() - start)
644hunk ./src/allmydata/test/test_backends.py 10
645 import mock
646 
647 # This is the code that we're going to be testing.
648-from allmydata.storage.server import StorageServer
649+from allmydata.storage.server import StorageServer, FSBackend, NullBackend
650 
651 # The following share file contents was generated with
652 # storage.immutable.ShareFile from Tahoe-LAFS v1.8.2
653hunk ./src/allmydata/test/test_backends.py 21
654 sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0'
655 
656 class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
657+    @mock.patch('time.time')
658+    @mock.patch('os.mkdir')
659+    @mock.patch('__builtin__.open')
660+    @mock.patch('os.listdir')
661+    @mock.patch('os.path.isdir')
662+    def test_create_server_null_backend(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
663+        """ This tests whether a server instance can be constructed
664+        with a null backend. The server instance fails the test if it
665+        tries to read or write to the file system. """
666+
667+        # Now begin the test.
668+        s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend())
669+
670+        self.failIf(mockisdir.called)
671+        self.failIf(mocklistdir.called)
672+        self.failIf(mockopen.called)
673+        self.failIf(mockmkdir.called)
674+
675+        # You passed!
676+
677+    @mock.patch('time.time')
678+    @mock.patch('os.mkdir')
679     @mock.patch('__builtin__.open')
680hunk ./src/allmydata/test/test_backends.py 44
681-    def test_create_server(self, mockopen):
682-        """ This tests whether a server instance can be constructed. """
683+    @mock.patch('os.listdir')
684+    @mock.patch('os.path.isdir')
685+    def test_create_server_fs_backend(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
686+        """ This tests whether a server instance can be constructed
687+        with a filesystem backend. To pass the test, it has to use the
688+        filesystem in only the prescribed ways. """
689 
690         def call_open(fname, mode):
691             if fname == 'testdir/bucket_counter.state':
692hunk ./src/allmydata/test/test_backends.py 58
693                 raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
694             elif fname == 'testdir/lease_checker.history':
695                 return StringIO()
696+            else:
697+                self.fail("Server with FS backend tried to open '%s' in mode '%s'" % (fname, mode))
698         mockopen.side_effect = call_open
699 
700         # Now begin the test.
701hunk ./src/allmydata/test/test_backends.py 63
702-        s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
703+        s = StorageServer('testnodeidxxxxxxxxxx', backend=FSBackend('teststoredir'))
704+
705+        self.failIf(mockisdir.called)
706+        self.failIf(mocklistdir.called)
707+        self.failIf(mockopen.called)
708+        self.failIf(mockmkdir.called)
709+        self.failIf(mocktime.called)
710 
711         # You passed!
712 
713hunk ./src/allmydata/test/test_backends.py 73
714-class TestServer(unittest.TestCase, ReallyEqualMixin):
715+class TestServerNullBackend(unittest.TestCase, ReallyEqualMixin):
716+    def setUp(self):
717+        self.s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend())
718+
719+    @mock.patch('os.mkdir')
720+    @mock.patch('__builtin__.open')
721+    @mock.patch('os.listdir')
722+    @mock.patch('os.path.isdir')
723+    def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir):
724+        """ Write a new share. """
725+
726+        # Now begin the test.
727+        alreadygot, bs = self.s.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock())
728+        bs[0].remote_write(0, 'a')
729+        self.failIf(mockisdir.called)
730+        self.failIf(mocklistdir.called)
731+        self.failIf(mockopen.called)
732+        self.failIf(mockmkdir.called)
733+
734+    @mock.patch('os.path.exists')
735+    @mock.patch('os.path.getsize')
736+    @mock.patch('__builtin__.open')
737+    @mock.patch('os.listdir')
738+    def test_read_share(self, mocklistdir, mockopen, mockgetsize, mockexists):
739+        """ This tests whether the code correctly finds and reads
740+        shares written out by old (Tahoe-LAFS <= v1.8.2)
741+        servers. There is a similar test in test_download, but that one
742+        is from the perspective of the client and exercises a deeper
743+        stack of code. This one is for exercising just the
744+        StorageServer object. """
745+
746+        # Now begin the test.
747+        bs = self.s.remote_get_buckets('teststorage_index')
748+
749+        self.failUnlessEqual(len(bs), 0)
750+        self.failIf(mocklistdir.called)
751+        self.failIf(mockopen.called)
752+        self.failIf(mockgetsize.called)
753+        self.failIf(mockexists.called)
754+
755+
756+class TestServerFSBackend(unittest.TestCase, ReallyEqualMixin):
757     @mock.patch('__builtin__.open')
758     def setUp(self, mockopen):
759         def call_open(fname, mode):
760hunk ./src/allmydata/test/test_backends.py 126
761                 return StringIO()
762         mockopen.side_effect = call_open
763 
764-        self.s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
765-
766+        self.s = StorageServer('testnodeidxxxxxxxxxx', backend=FSBackend('teststoredir'))
767 
768     @mock.patch('time.time')
769     @mock.patch('os.mkdir')
770hunk ./src/allmydata/test/test_backends.py 134
771     @mock.patch('os.listdir')
772     @mock.patch('os.path.isdir')
773     def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
774-        """Handle a report of corruption."""
775+        """ Write a new share. """
776 
777         def call_listdir(dirname):
778             self.failUnlessReallyEqual(dirname, 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
779hunk ./src/allmydata/test/test_backends.py 173
780         mockopen.side_effect = call_open
781         # Now begin the test.
782         alreadygot, bs = self.s.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock())
783-        print bs
784         bs[0].remote_write(0, 'a')
785         self.failUnlessReallyEqual(sharefile.buffer, share_file_data)
786 
787hunk ./src/allmydata/test/test_backends.py 176
788-
789     @mock.patch('os.path.exists')
790     @mock.patch('os.path.getsize')
791     @mock.patch('__builtin__.open')
792hunk ./src/allmydata/test/test_backends.py 218
793 
794         self.failUnlessEqual(len(bs), 1)
795         b = bs[0]
796+        # These should match by definition, the next two cases cover cases without (completely) unambiguous behaviors.
797         self.failUnlessReallyEqual(b.remote_read(0, datalen), share_data)
798         # If you try to read past the end you get the as much data as is there.
799         self.failUnlessReallyEqual(b.remote_read(0, datalen+20), share_data)
800hunk ./src/allmydata/test/test_backends.py 224
801         # If you start reading past the end of the file you get the empty string.
802         self.failUnlessReallyEqual(b.remote_read(datalen+1, 3), '')
803+
804+
805}
806[a temp patch used as a snapshot
807wilcoxjg@gmail.com**20110626052732
808 Ignore-this: 95f05e314eaec870afa04c76d979aa44
809] {
810hunk ./docs/configuration.rst 637
811   [storage]
812   enabled = True
813   readonly = True
814-  sizelimit = 10000000000
815 
816 
817   [helper]
818hunk ./docs/garbage-collection.rst 16
819 
820 When a file or directory in the virtual filesystem is no longer referenced,
821 the space that its shares occupied on each storage server can be freed,
822-making room for other shares. Tahoe currently uses a garbage collection
823+making room for other shares. Tahoe uses a garbage collection
824 ("GC") mechanism to implement this space-reclamation process. Each share has
825 one or more "leases", which are managed by clients who want the
826 file/directory to be retained. The storage server accepts each share for a
827hunk ./docs/garbage-collection.rst 34
828 the `<lease-tradeoffs.svg>`_ diagram to get an idea for the tradeoffs involved.
829 If lease renewal occurs quickly and with 100% reliability, than any renewal
830 time that is shorter than the lease duration will suffice, but a larger ratio
831-of duration-over-renewal-time will be more robust in the face of occasional
832+of lease duration to renewal time will be more robust in the face of occasional
833 delays or failures.
834 
835 The current recommended values for a small Tahoe grid are to renew the leases
836replace ./docs/garbage-collection.rst [A-Za-z_0-9\-\.] Tahoe Tahoe-LAFS
837hunk ./src/allmydata/client.py 260
838             sharetypes.append("mutable")
839         expiration_sharetypes = tuple(sharetypes)
840 
841+        if self.get_config("storage", "backend", "filesystem") == "filesystem":
842+            xyz
843+        xyz
844         ss = StorageServer(storedir, self.nodeid,
845                            reserved_space=reserved,
846                            discard_storage=discard,
847hunk ./src/allmydata/storage/crawler.py 234
848         f = open(tmpfile, "wb")
849         pickle.dump(self.state, f)
850         f.close()
851-        fileutil.move_into_place(tmpfile, self.statefile)
852+        fileutil.move_into_place(tmpfile, self.statefname)
853 
854     def startService(self):
855         # arrange things to look like we were just sleeping, so
856}
857[snapshot of progress on backend implementation (not suitable for trunk)
858wilcoxjg@gmail.com**20110626053244
859 Ignore-this: 50c764af791c2b99ada8289546806a0a
860] {
861adddir ./src/allmydata/storage/backends
862adddir ./src/allmydata/storage/backends/das
863move ./src/allmydata/storage/expirer.py ./src/allmydata/storage/backends/das/expirer.py
864adddir ./src/allmydata/storage/backends/null
865hunk ./src/allmydata/interfaces.py 270
866         store that on disk.
867         """
868 
869+class IStorageBackend(Interface):
870+    """
871+    Objects of this kind live on the server side and are used by the
872+    storage server object.
873+    """
874+    def get_available_space(self, reserved_space):
875+        """ Returns available space for share storage in bytes, or
876+        None if this information is not available or if the available
877+        space is unlimited.
878+
879+        If the backend is configured for read-only mode then this will
880+        return 0.
881+
882+        reserved_space is how many bytes to subtract from the answer, so
883+        you can pass how many bytes you would like to leave unused on this
884+        filesystem as reserved_space. """
885+
886+    def get_bucket_shares(self):
887+        """XXX"""
888+
889+    def get_share(self):
890+        """XXX"""
891+
892+    def make_bucket_writer(self):
893+        """XXX"""
894+
895+class IStorageBackendShare(Interface):
896+    """
897+    This object contains as much as all of the share data.  It is intended
898+    for lazy evaluation such that in many use cases substantially less than
899+    all of the share data will be accessed.
900+    """
901+    def is_complete(self):
902+        """
903+        Returns the share state, or None if the share does not exist.
904+        """
905+
906 class IStorageBucketWriter(Interface):
907     """
908     Objects of this kind live on the client side.
909hunk ./src/allmydata/interfaces.py 2492
910 
911 class EmptyPathnameComponentError(Exception):
912     """The webapi disallows empty pathname components."""
913+
914+class IShareStore(Interface):
915+    pass
916+
917addfile ./src/allmydata/storage/backends/__init__.py
918addfile ./src/allmydata/storage/backends/das/__init__.py
919addfile ./src/allmydata/storage/backends/das/core.py
920hunk ./src/allmydata/storage/backends/das/core.py 1
921+from allmydata.interfaces import IStorageBackend
922+from allmydata.storage.backends.base import Backend
923+from allmydata.storage.common import si_b2a, si_a2b, storage_index_to_dir
924+from allmydata.util.assertutil import precondition
925+
926+import os, re, weakref, struct, time
927+
928+from foolscap.api import Referenceable
929+from twisted.application import service
930+
931+from zope.interface import implements
932+from allmydata.interfaces import RIStorageServer, IStatsProducer, IShareStore
933+from allmydata.util import fileutil, idlib, log, time_format
934+import allmydata # for __full_version__
935+
936+from allmydata.storage.common import si_b2a, si_a2b, storage_index_to_dir
937+_pyflakes_hush = [si_b2a, si_a2b, storage_index_to_dir] # re-exported
938+from allmydata.storage.lease import LeaseInfo
939+from allmydata.storage.mutable import MutableShareFile, EmptyShare, \
940+     create_mutable_sharefile
941+from allmydata.storage.backends.das.immutable import NullBucketWriter, BucketWriter, BucketReader
942+from allmydata.storage.crawler import FSBucketCountingCrawler
943+from allmydata.storage.backends.das.expirer import FSLeaseCheckingCrawler
944+
945+from zope.interface import implements
946+
947+class DASCore(Backend):
948+    implements(IStorageBackend)
949+    def __init__(self, storedir, expiration_policy, readonly=False, reserved_space=0):
950+        Backend.__init__(self)
951+
952+        self._setup_storage(storedir, readonly, reserved_space)
953+        self._setup_corruption_advisory()
954+        self._setup_bucket_counter()
955+        self._setup_lease_checkerf(expiration_policy)
956+
957+    def _setup_storage(self, storedir, readonly, reserved_space):
958+        self.storedir = storedir
959+        self.readonly = readonly
960+        self.reserved_space = int(reserved_space)
961+        if self.reserved_space:
962+            if self.get_available_space() is None:
963+                log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored",
964+                        umid="0wZ27w", level=log.UNUSUAL)
965+
966+        self.sharedir = os.path.join(self.storedir, "shares")
967+        fileutil.make_dirs(self.sharedir)
968+        self.incomingdir = os.path.join(self.sharedir, 'incoming')
969+        self._clean_incomplete()
970+
971+    def _clean_incomplete(self):
972+        fileutil.rm_dir(self.incomingdir)
973+        fileutil.make_dirs(self.incomingdir)
974+
975+    def _setup_corruption_advisory(self):
976+        # we don't actually create the corruption-advisory dir until necessary
977+        self.corruption_advisory_dir = os.path.join(self.storedir,
978+                                                    "corruption-advisories")
979+
980+    def _setup_bucket_counter(self):
981+        statefname = os.path.join(self.storedir, "bucket_counter.state")
982+        self.bucket_counter = FSBucketCountingCrawler(statefname)
983+        self.bucket_counter.setServiceParent(self)
984+
985+    def _setup_lease_checkerf(self, expiration_policy):
986+        statefile = os.path.join(self.storedir, "lease_checker.state")
987+        historyfile = os.path.join(self.storedir, "lease_checker.history")
988+        self.lease_checker = FSLeaseCheckingCrawler(statefile, historyfile, expiration_policy)
989+        self.lease_checker.setServiceParent(self)
990+
991+    def get_available_space(self):
992+        if self.readonly:
993+            return 0
994+        return fileutil.get_available_space(self.storedir, self.reserved_space)
995+
996+    def get_shares(self, storage_index):
997+        """Return a list of the FSBShare objects that correspond to the passed storage_index."""
998+        finalstoragedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index))
999+        try:
1000+            for f in os.listdir(finalstoragedir):
1001+                if NUM_RE.match(f):
1002+                    filename = os.path.join(finalstoragedir, f)
1003+                    yield FSBShare(filename, int(f))
1004+        except OSError:
1005+            # Commonly caused by there being no buckets at all.
1006+            pass
1007+       
1008+    def make_bucket_writer(self, storage_index, shnum, max_space_per_bucket, lease_info, canary):
1009+        immsh = ImmutableShare(self.sharedir, storage_index, shnum, max_size=max_space_per_bucket, create=True)
1010+        bw = BucketWriter(self.ss, immsh, max_space_per_bucket, lease_info, canary)
1011+        return bw
1012+       
1013+
1014+# each share file (in storage/shares/$SI/$SHNUM) contains lease information
1015+# and share data. The share data is accessed by RIBucketWriter.write and
1016+# RIBucketReader.read . The lease information is not accessible through these
1017+# interfaces.
1018+
1019+# The share file has the following layout:
1020+#  0x00: share file version number, four bytes, current version is 1
1021+#  0x04: share data length, four bytes big-endian = A # See Footnote 1 below.
1022+#  0x08: number of leases, four bytes big-endian
1023+#  0x0c: beginning of share data (see immutable.layout.WriteBucketProxy)
1024+#  A+0x0c = B: first lease. Lease format is:
1025+#   B+0x00: owner number, 4 bytes big-endian, 0 is reserved for no-owner
1026+#   B+0x04: renew secret, 32 bytes (SHA256)
1027+#   B+0x24: cancel secret, 32 bytes (SHA256)
1028+#   B+0x44: expiration time, 4 bytes big-endian seconds-since-epoch
1029+#   B+0x48: next lease, or end of record
1030+
1031+# Footnote 1: as of Tahoe v1.3.0 this field is not used by storage servers,
1032+# but it is still filled in by storage servers in case the storage server
1033+# software gets downgraded from >= Tahoe v1.3.0 to < Tahoe v1.3.0, or the
1034+# share file is moved from one storage server to another. The value stored in
1035+# this field is truncated, so if the actual share data length is >= 2**32,
1036+# then the value stored in this field will be the actual share data length
1037+# modulo 2**32.
1038+
1039+class ImmutableShare:
1040+    LEASE_SIZE = struct.calcsize(">L32s32sL")
1041+    sharetype = "immutable"
1042+
1043+    def __init__(self, sharedir, storageindex, shnum, max_size=None, create=False):
1044+        """ If max_size is not None then I won't allow more than
1045+        max_size to be written to me. If create=True then max_size
1046+        must not be None. """
1047+        precondition((max_size is not None) or (not create), max_size, create)
1048+        self.shnum = shnum
1049+        self.fname = os.path.join(sharedir, storage_index_to_dir(storageindex), str(shnum))
1050+        self._max_size = max_size
1051+        if create:
1052+            # touch the file, so later callers will see that we're working on
1053+            # it. Also construct the metadata.
1054+            assert not os.path.exists(self.fname)
1055+            fileutil.make_dirs(os.path.dirname(self.fname))
1056+            f = open(self.fname, 'wb')
1057+            # The second field -- the four-byte share data length -- is no
1058+            # longer used as of Tahoe v1.3.0, but we continue to write it in
1059+            # there in case someone downgrades a storage server from >=
1060+            # Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one
1061+            # server to another, etc. We do saturation -- a share data length
1062+            # larger than 2**32-1 (what can fit into the field) is marked as
1063+            # the largest length that can fit into the field. That way, even
1064+            # if this does happen, the old < v1.3.0 server will still allow
1065+            # clients to read the first part of the share.
1066+            f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0))
1067+            f.close()
1068+            self._lease_offset = max_size + 0x0c
1069+            self._num_leases = 0
1070+        else:
1071+            f = open(self.fname, 'rb')
1072+            filesize = os.path.getsize(self.fname)
1073+            (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
1074+            f.close()
1075+            if version != 1:
1076+                msg = "sharefile %s had version %d but we wanted 1" % \
1077+                      (self.fname, version)
1078+                raise UnknownImmutableContainerVersionError(msg)
1079+            self._num_leases = num_leases
1080+            self._lease_offset = filesize - (num_leases * self.LEASE_SIZE)
1081+        self._data_offset = 0xc
1082+
1083+    def unlink(self):
1084+        os.unlink(self.fname)
1085+
1086+    def read_share_data(self, offset, length):
1087+        precondition(offset >= 0)
1088+        # Reads beyond the end of the data are truncated. Reads that start
1089+        # beyond the end of the data return an empty string.
1090+        seekpos = self._data_offset+offset
1091+        fsize = os.path.getsize(self.fname)
1092+        actuallength = max(0, min(length, fsize-seekpos))
1093+        if actuallength == 0:
1094+            return ""
1095+        f = open(self.fname, 'rb')
1096+        f.seek(seekpos)
1097+        return f.read(actuallength)
1098+
1099+    def write_share_data(self, offset, data):
1100+        length = len(data)
1101+        precondition(offset >= 0, offset)
1102+        if self._max_size is not None and offset+length > self._max_size:
1103+            raise DataTooLargeError(self._max_size, offset, length)
1104+        f = open(self.fname, 'rb+')
1105+        real_offset = self._data_offset+offset
1106+        f.seek(real_offset)
1107+        assert f.tell() == real_offset
1108+        f.write(data)
1109+        f.close()
1110+
1111+    def _write_lease_record(self, f, lease_number, lease_info):
1112+        offset = self._lease_offset + lease_number * self.LEASE_SIZE
1113+        f.seek(offset)
1114+        assert f.tell() == offset
1115+        f.write(lease_info.to_immutable_data())
1116+
1117+    def _read_num_leases(self, f):
1118+        f.seek(0x08)
1119+        (num_leases,) = struct.unpack(">L", f.read(4))
1120+        return num_leases
1121+
1122+    def _write_num_leases(self, f, num_leases):
1123+        f.seek(0x08)
1124+        f.write(struct.pack(">L", num_leases))
1125+
1126+    def _truncate_leases(self, f, num_leases):
1127+        f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE)
1128+
1129+    def get_leases(self):
1130+        """Yields a LeaseInfo instance for all leases."""
1131+        f = open(self.fname, 'rb')
1132+        (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
1133+        f.seek(self._lease_offset)
1134+        for i in range(num_leases):
1135+            data = f.read(self.LEASE_SIZE)
1136+            if data:
1137+                yield LeaseInfo().from_immutable_data(data)
1138+
1139+    def add_lease(self, lease_info):
1140+        f = open(self.fname, 'rb+')
1141+        num_leases = self._read_num_leases(f)
1142+        self._write_lease_record(f, num_leases, lease_info)
1143+        self._write_num_leases(f, num_leases+1)
1144+        f.close()
1145+
1146+    def renew_lease(self, renew_secret, new_expire_time):
1147+        for i,lease in enumerate(self.get_leases()):
1148+            if constant_time_compare(lease.renew_secret, renew_secret):
1149+                # yup. See if we need to update the owner time.
1150+                if new_expire_time > lease.expiration_time:
1151+                    # yes
1152+                    lease.expiration_time = new_expire_time
1153+                    f = open(self.fname, 'rb+')
1154+                    self._write_lease_record(f, i, lease)
1155+                    f.close()
1156+                return
1157+        raise IndexError("unable to renew non-existent lease")
1158+
1159+    def add_or_renew_lease(self, lease_info):
1160+        try:
1161+            self.renew_lease(lease_info.renew_secret,
1162+                             lease_info.expiration_time)
1163+        except IndexError:
1164+            self.add_lease(lease_info)
1165+
1166+
1167+    def cancel_lease(self, cancel_secret):
1168+        """Remove a lease with the given cancel_secret. If the last lease is
1169+        cancelled, the file will be removed. Return the number of bytes that
1170+        were freed (by truncating the list of leases, and possibly by
1171+        deleting the file. Raise IndexError if there was no lease with the
1172+        given cancel_secret.
1173+        """
1174+
1175+        leases = list(self.get_leases())
1176+        num_leases_removed = 0
1177+        for i,lease in enumerate(leases):
1178+            if constant_time_compare(lease.cancel_secret, cancel_secret):
1179+                leases[i] = None
1180+                num_leases_removed += 1
1181+        if not num_leases_removed:
1182+            raise IndexError("unable to find matching lease to cancel")
1183+        if num_leases_removed:
1184+            # pack and write out the remaining leases. We write these out in
1185+            # the same order as they were added, so that if we crash while
1186+            # doing this, we won't lose any non-cancelled leases.
1187+            leases = [l for l in leases if l] # remove the cancelled leases
1188+            f = open(self.fname, 'rb+')
1189+            for i,lease in enumerate(leases):
1190+                self._write_lease_record(f, i, lease)
1191+            self._write_num_leases(f, len(leases))
1192+            self._truncate_leases(f, len(leases))
1193+            f.close()
1194+        space_freed = self.LEASE_SIZE * num_leases_removed
1195+        if not len(leases):
1196+            space_freed += os.stat(self.fname)[stat.ST_SIZE]
1197+            self.unlink()
1198+        return space_freed
1199hunk ./src/allmydata/storage/backends/das/expirer.py 2
1200 import time, os, pickle, struct
1201-from allmydata.storage.crawler import ShareCrawler
1202-from allmydata.storage.shares import get_share_file
1203+from allmydata.storage.crawler import FSShareCrawler
1204 from allmydata.storage.common import UnknownMutableContainerVersionError, \
1205      UnknownImmutableContainerVersionError
1206 from twisted.python import log as twlog
1207hunk ./src/allmydata/storage/backends/das/expirer.py 7
1208 
1209-class LeaseCheckingCrawler(ShareCrawler):
1210+class FSLeaseCheckingCrawler(FSShareCrawler):
1211     """I examine the leases on all shares, determining which are still valid
1212     and which have expired. I can remove the expired leases (if so
1213     configured), and the share will be deleted when the last lease is
1214hunk ./src/allmydata/storage/backends/das/expirer.py 50
1215     slow_start = 360 # wait 6 minutes after startup
1216     minimum_cycle_time = 12*60*60 # not more than twice per day
1217 
1218-    def __init__(self, statefile, historyfile,
1219-                 expiration_enabled, mode,
1220-                 override_lease_duration, # used if expiration_mode=="age"
1221-                 cutoff_date, # used if expiration_mode=="cutoff-date"
1222-                 sharetypes):
1223+    def __init__(self, statefile, historyfile, expiration_policy):
1224         self.historyfile = historyfile
1225hunk ./src/allmydata/storage/backends/das/expirer.py 52
1226-        self.expiration_enabled = expiration_enabled
1227-        self.mode = mode
1228+        self.expiration_enabled = expiration_policy['enabled']
1229+        self.mode = expiration_policy['mode']
1230         self.override_lease_duration = None
1231         self.cutoff_date = None
1232         if self.mode == "age":
1233hunk ./src/allmydata/storage/backends/das/expirer.py 57
1234-            assert isinstance(override_lease_duration, (int, type(None)))
1235-            self.override_lease_duration = override_lease_duration # seconds
1236+            assert isinstance(expiration_policy['override_lease_duration'], (int, type(None)))
1237+            self.override_lease_duration = expiration_policy['override_lease_duration']# seconds
1238         elif self.mode == "cutoff-date":
1239hunk ./src/allmydata/storage/backends/das/expirer.py 60
1240-            assert isinstance(cutoff_date, int) # seconds-since-epoch
1241+            assert isinstance(expiration_policy['cutoff_date'], int) # seconds-since-epoch
1242             assert cutoff_date is not None
1243hunk ./src/allmydata/storage/backends/das/expirer.py 62
1244-            self.cutoff_date = cutoff_date
1245+            self.cutoff_date = expiration_policy['cutoff_date']
1246         else:
1247hunk ./src/allmydata/storage/backends/das/expirer.py 64
1248-            raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % mode)
1249-        self.sharetypes_to_expire = sharetypes
1250-        ShareCrawler.__init__(self, statefile)
1251+            raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % expiration_policy['mode'])
1252+        self.sharetypes_to_expire = expiration_policy['sharetypes']
1253+        FSShareCrawler.__init__(self, statefile)
1254 
1255     def add_initial_state(self):
1256         # we fill ["cycle-to-date"] here (even though they will be reset in
1257hunk ./src/allmydata/storage/backends/das/expirer.py 156
1258 
1259     def process_share(self, sharefilename):
1260         # first, find out what kind of a share it is
1261-        sf = get_share_file(sharefilename)
1262+        f = open(sharefilename, "rb")
1263+        prefix = f.read(32)
1264+        f.close()
1265+        if prefix == MutableShareFile.MAGIC:
1266+            sf = MutableShareFile(sharefilename)
1267+        else:
1268+            # otherwise assume it's immutable
1269+            sf = FSBShare(sharefilename)
1270         sharetype = sf.sharetype
1271         now = time.time()
1272         s = self.stat(sharefilename)
1273addfile ./src/allmydata/storage/backends/null/__init__.py
1274addfile ./src/allmydata/storage/backends/null/core.py
1275hunk ./src/allmydata/storage/backends/null/core.py 1
1276+from allmydata.storage.backends.base import Backend
1277+
1278+class NullCore(Backend):
1279+    def __init__(self):
1280+        Backend.__init__(self)
1281+
1282+    def get_available_space(self):
1283+        return None
1284+
1285+    def get_shares(self, storage_index):
1286+        return set()
1287+
1288+    def get_share(self, storage_index, sharenum):
1289+        return None
1290+
1291+    def make_bucket_writer(self, storage_index, shnum, max_space_per_bucket, lease_info, canary):
1292+        return NullBucketWriter()
1293hunk ./src/allmydata/storage/crawler.py 12
1294 class TimeSliceExceeded(Exception):
1295     pass
1296 
1297-class ShareCrawler(service.MultiService):
1298+class FSShareCrawler(service.MultiService):
1299     """A subcless of ShareCrawler is attached to a StorageServer, and
1300     periodically walks all of its shares, processing each one in some
1301     fashion. This crawl is rate-limited, to reduce the IO burden on the host,
1302hunk ./src/allmydata/storage/crawler.py 68
1303     cpu_slice = 1.0 # use up to 1.0 seconds before yielding
1304     minimum_cycle_time = 300 # don't run a cycle faster than this
1305 
1306-    def __init__(self, backend, statefile, allowed_cpu_percentage=None):
1307+    def __init__(self, statefname, allowed_cpu_percentage=None):
1308         service.MultiService.__init__(self)
1309         if allowed_cpu_percentage is not None:
1310             self.allowed_cpu_percentage = allowed_cpu_percentage
1311hunk ./src/allmydata/storage/crawler.py 72
1312-        self.backend = backend
1313+        self.statefname = statefname
1314         self.prefixes = [si_b2a(struct.pack(">H", i << (16-10)))[:2]
1315                          for i in range(2**10)]
1316         self.prefixes.sort()
1317hunk ./src/allmydata/storage/crawler.py 192
1318         #                            of the last bucket to be processed, or
1319         #                            None if we are sleeping between cycles
1320         try:
1321-            f = open(self.statefile, "rb")
1322+            f = open(self.statefname, "rb")
1323             state = pickle.load(f)
1324             f.close()
1325         except EnvironmentError:
1326hunk ./src/allmydata/storage/crawler.py 230
1327         else:
1328             last_complete_prefix = self.prefixes[lcpi]
1329         self.state["last-complete-prefix"] = last_complete_prefix
1330-        tmpfile = self.statefile + ".tmp"
1331+        tmpfile = self.statefname + ".tmp"
1332         f = open(tmpfile, "wb")
1333         pickle.dump(self.state, f)
1334         f.close()
1335hunk ./src/allmydata/storage/crawler.py 433
1336         pass
1337 
1338 
1339-class BucketCountingCrawler(ShareCrawler):
1340+class FSBucketCountingCrawler(FSShareCrawler):
1341     """I keep track of how many buckets are being managed by this server.
1342     This is equivalent to the number of distributed files and directories for
1343     which I am providing storage. The actual number of files+directories in
1344hunk ./src/allmydata/storage/crawler.py 446
1345 
1346     minimum_cycle_time = 60*60 # we don't need this more than once an hour
1347 
1348-    def __init__(self, statefile, num_sample_prefixes=1):
1349-        ShareCrawler.__init__(self, statefile)
1350+    def __init__(self, statefname, num_sample_prefixes=1):
1351+        FSShareCrawler.__init__(self, statefname)
1352         self.num_sample_prefixes = num_sample_prefixes
1353 
1354     def add_initial_state(self):
1355hunk ./src/allmydata/storage/immutable.py 14
1356 from allmydata.storage.common import UnknownImmutableContainerVersionError, \
1357      DataTooLargeError
1358 
1359-# each share file (in storage/shares/$SI/$SHNUM) contains lease information
1360-# and share data. The share data is accessed by RIBucketWriter.write and
1361-# RIBucketReader.read . The lease information is not accessible through these
1362-# interfaces.
1363-
1364-# The share file has the following layout:
1365-#  0x00: share file version number, four bytes, current version is 1
1366-#  0x04: share data length, four bytes big-endian = A # See Footnote 1 below.
1367-#  0x08: number of leases, four bytes big-endian
1368-#  0x0c: beginning of share data (see immutable.layout.WriteBucketProxy)
1369-#  A+0x0c = B: first lease. Lease format is:
1370-#   B+0x00: owner number, 4 bytes big-endian, 0 is reserved for no-owner
1371-#   B+0x04: renew secret, 32 bytes (SHA256)
1372-#   B+0x24: cancel secret, 32 bytes (SHA256)
1373-#   B+0x44: expiration time, 4 bytes big-endian seconds-since-epoch
1374-#   B+0x48: next lease, or end of record
1375-
1376-# Footnote 1: as of Tahoe v1.3.0 this field is not used by storage servers,
1377-# but it is still filled in by storage servers in case the storage server
1378-# software gets downgraded from >= Tahoe v1.3.0 to < Tahoe v1.3.0, or the
1379-# share file is moved from one storage server to another. The value stored in
1380-# this field is truncated, so if the actual share data length is >= 2**32,
1381-# then the value stored in this field will be the actual share data length
1382-# modulo 2**32.
1383-
1384-class ShareFile:
1385-    LEASE_SIZE = struct.calcsize(">L32s32sL")
1386-    sharetype = "immutable"
1387-
1388-    def __init__(self, filename, max_size=None, create=False):
1389-        """ If max_size is not None then I won't allow more than
1390-        max_size to be written to me. If create=True then max_size
1391-        must not be None. """
1392-        precondition((max_size is not None) or (not create), max_size, create)
1393-        self.home = filename
1394-        self._max_size = max_size
1395-        if create:
1396-            # touch the file, so later callers will see that we're working on
1397-            # it. Also construct the metadata.
1398-            assert not os.path.exists(self.home)
1399-            fileutil.make_dirs(os.path.dirname(self.home))
1400-            f = open(self.home, 'wb')
1401-            # The second field -- the four-byte share data length -- is no
1402-            # longer used as of Tahoe v1.3.0, but we continue to write it in
1403-            # there in case someone downgrades a storage server from >=
1404-            # Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one
1405-            # server to another, etc. We do saturation -- a share data length
1406-            # larger than 2**32-1 (what can fit into the field) is marked as
1407-            # the largest length that can fit into the field. That way, even
1408-            # if this does happen, the old < v1.3.0 server will still allow
1409-            # clients to read the first part of the share.
1410-            f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0))
1411-            f.close()
1412-            self._lease_offset = max_size + 0x0c
1413-            self._num_leases = 0
1414-        else:
1415-            f = open(self.home, 'rb')
1416-            filesize = os.path.getsize(self.home)
1417-            (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
1418-            f.close()
1419-            if version != 1:
1420-                msg = "sharefile %s had version %d but we wanted 1" % \
1421-                      (filename, version)
1422-                raise UnknownImmutableContainerVersionError(msg)
1423-            self._num_leases = num_leases
1424-            self._lease_offset = filesize - (num_leases * self.LEASE_SIZE)
1425-        self._data_offset = 0xc
1426-
1427-    def unlink(self):
1428-        os.unlink(self.home)
1429-
1430-    def read_share_data(self, offset, length):
1431-        precondition(offset >= 0)
1432-        # Reads beyond the end of the data are truncated. Reads that start
1433-        # beyond the end of the data return an empty string.
1434-        seekpos = self._data_offset+offset
1435-        fsize = os.path.getsize(self.home)
1436-        actuallength = max(0, min(length, fsize-seekpos))
1437-        if actuallength == 0:
1438-            return ""
1439-        f = open(self.home, 'rb')
1440-        f.seek(seekpos)
1441-        return f.read(actuallength)
1442-
1443-    def write_share_data(self, offset, data):
1444-        length = len(data)
1445-        precondition(offset >= 0, offset)
1446-        if self._max_size is not None and offset+length > self._max_size:
1447-            raise DataTooLargeError(self._max_size, offset, length)
1448-        f = open(self.home, 'rb+')
1449-        real_offset = self._data_offset+offset
1450-        f.seek(real_offset)
1451-        assert f.tell() == real_offset
1452-        f.write(data)
1453-        f.close()
1454-
1455-    def _write_lease_record(self, f, lease_number, lease_info):
1456-        offset = self._lease_offset + lease_number * self.LEASE_SIZE
1457-        f.seek(offset)
1458-        assert f.tell() == offset
1459-        f.write(lease_info.to_immutable_data())
1460-
1461-    def _read_num_leases(self, f):
1462-        f.seek(0x08)
1463-        (num_leases,) = struct.unpack(">L", f.read(4))
1464-        return num_leases
1465-
1466-    def _write_num_leases(self, f, num_leases):
1467-        f.seek(0x08)
1468-        f.write(struct.pack(">L", num_leases))
1469-
1470-    def _truncate_leases(self, f, num_leases):
1471-        f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE)
1472-
1473-    def get_leases(self):
1474-        """Yields a LeaseInfo instance for all leases."""
1475-        f = open(self.home, 'rb')
1476-        (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
1477-        f.seek(self._lease_offset)
1478-        for i in range(num_leases):
1479-            data = f.read(self.LEASE_SIZE)
1480-            if data:
1481-                yield LeaseInfo().from_immutable_data(data)
1482-
1483-    def add_lease(self, lease_info):
1484-        f = open(self.home, 'rb+')
1485-        num_leases = self._read_num_leases(f)
1486-        self._write_lease_record(f, num_leases, lease_info)
1487-        self._write_num_leases(f, num_leases+1)
1488-        f.close()
1489-
1490-    def renew_lease(self, renew_secret, new_expire_time):
1491-        for i,lease in enumerate(self.get_leases()):
1492-            if constant_time_compare(lease.renew_secret, renew_secret):
1493-                # yup. See if we need to update the owner time.
1494-                if new_expire_time > lease.expiration_time:
1495-                    # yes
1496-                    lease.expiration_time = new_expire_time
1497-                    f = open(self.home, 'rb+')
1498-                    self._write_lease_record(f, i, lease)
1499-                    f.close()
1500-                return
1501-        raise IndexError("unable to renew non-existent lease")
1502-
1503-    def add_or_renew_lease(self, lease_info):
1504-        try:
1505-            self.renew_lease(lease_info.renew_secret,
1506-                             lease_info.expiration_time)
1507-        except IndexError:
1508-            self.add_lease(lease_info)
1509-
1510-
1511-    def cancel_lease(self, cancel_secret):
1512-        """Remove a lease with the given cancel_secret. If the last lease is
1513-        cancelled, the file will be removed. Return the number of bytes that
1514-        were freed (by truncating the list of leases, and possibly by
1515-        deleting the file. Raise IndexError if there was no lease with the
1516-        given cancel_secret.
1517-        """
1518-
1519-        leases = list(self.get_leases())
1520-        num_leases_removed = 0
1521-        for i,lease in enumerate(leases):
1522-            if constant_time_compare(lease.cancel_secret, cancel_secret):
1523-                leases[i] = None
1524-                num_leases_removed += 1
1525-        if not num_leases_removed:
1526-            raise IndexError("unable to find matching lease to cancel")
1527-        if num_leases_removed:
1528-            # pack and write out the remaining leases. We write these out in
1529-            # the same order as they were added, so that if we crash while
1530-            # doing this, we won't lose any non-cancelled leases.
1531-            leases = [l for l in leases if l] # remove the cancelled leases
1532-            f = open(self.home, 'rb+')
1533-            for i,lease in enumerate(leases):
1534-                self._write_lease_record(f, i, lease)
1535-            self._write_num_leases(f, len(leases))
1536-            self._truncate_leases(f, len(leases))
1537-            f.close()
1538-        space_freed = self.LEASE_SIZE * num_leases_removed
1539-        if not len(leases):
1540-            space_freed += os.stat(self.home)[stat.ST_SIZE]
1541-            self.unlink()
1542-        return space_freed
1543-class NullBucketWriter(Referenceable):
1544-    implements(RIBucketWriter)
1545-
1546-    def remote_write(self, offset, data):
1547-        return
1548-
1549 class BucketWriter(Referenceable):
1550     implements(RIBucketWriter)
1551 
1552hunk ./src/allmydata/storage/immutable.py 17
1553-    def __init__(self, ss, incominghome, finalhome, max_size, lease_info, canary):
1554+    def __init__(self, ss, immutableshare, max_size, lease_info, canary):
1555         self.ss = ss
1556hunk ./src/allmydata/storage/immutable.py 19
1557-        self.incominghome = incominghome
1558-        self.finalhome = finalhome
1559         self._max_size = max_size # don't allow the client to write more than this
1560         self._canary = canary
1561         self._disconnect_marker = canary.notifyOnDisconnect(self._disconnected)
1562hunk ./src/allmydata/storage/immutable.py 24
1563         self.closed = False
1564         self.throw_out_all_data = False
1565-        self._sharefile = ShareFile(incominghome, create=True, max_size=max_size)
1566+        self._sharefile = immutableshare
1567         # also, add our lease to the file now, so that other ones can be
1568         # added by simultaneous uploaders
1569         self._sharefile.add_lease(lease_info)
1570hunk ./src/allmydata/storage/server.py 16
1571 from allmydata.storage.lease import LeaseInfo
1572 from allmydata.storage.mutable import MutableShareFile, EmptyShare, \
1573      create_mutable_sharefile
1574-from allmydata.storage.immutable import ShareFile, NullBucketWriter, BucketWriter, BucketReader
1575-from allmydata.storage.crawler import BucketCountingCrawler
1576-from allmydata.storage.expirer import LeaseCheckingCrawler
1577 
1578 from zope.interface import implements
1579 
1580hunk ./src/allmydata/storage/server.py 19
1581-# A Backend is a MultiService so that its server's crawlers (if the server has any) can
1582-# be started and stopped.
1583-class Backend(service.MultiService):
1584-    implements(IStatsProducer)
1585-    def __init__(self):
1586-        service.MultiService.__init__(self)
1587-
1588-    def get_bucket_shares(self):
1589-        """XXX"""
1590-        raise NotImplementedError
1591-
1592-    def get_share(self):
1593-        """XXX"""
1594-        raise NotImplementedError
1595-
1596-    def make_bucket_writer(self):
1597-        """XXX"""
1598-        raise NotImplementedError
1599-
1600-class NullBackend(Backend):
1601-    def __init__(self):
1602-        Backend.__init__(self)
1603-
1604-    def get_available_space(self):
1605-        return None
1606-
1607-    def get_bucket_shares(self, storage_index):
1608-        return set()
1609-
1610-    def get_share(self, storage_index, sharenum):
1611-        return None
1612-
1613-    def make_bucket_writer(self, storage_index, shnum, max_space_per_bucket, lease_info, canary):
1614-        return NullBucketWriter()
1615-
1616-class FSBackend(Backend):
1617-    def __init__(self, storedir, readonly=False, reserved_space=0):
1618-        Backend.__init__(self)
1619-
1620-        self._setup_storage(storedir, readonly, reserved_space)
1621-        self._setup_corruption_advisory()
1622-        self._setup_bucket_counter()
1623-        self._setup_lease_checkerf()
1624-
1625-    def _setup_storage(self, storedir, readonly, reserved_space):
1626-        self.storedir = storedir
1627-        self.readonly = readonly
1628-        self.reserved_space = int(reserved_space)
1629-        if self.reserved_space:
1630-            if self.get_available_space() is None:
1631-                log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored",
1632-                        umid="0wZ27w", level=log.UNUSUAL)
1633-
1634-        self.sharedir = os.path.join(self.storedir, "shares")
1635-        fileutil.make_dirs(self.sharedir)
1636-        self.incomingdir = os.path.join(self.sharedir, 'incoming')
1637-        self._clean_incomplete()
1638-
1639-    def _clean_incomplete(self):
1640-        fileutil.rm_dir(self.incomingdir)
1641-        fileutil.make_dirs(self.incomingdir)
1642-
1643-    def _setup_corruption_advisory(self):
1644-        # we don't actually create the corruption-advisory dir until necessary
1645-        self.corruption_advisory_dir = os.path.join(self.storedir,
1646-                                                    "corruption-advisories")
1647-
1648-    def _setup_bucket_counter(self):
1649-        statefile = os.path.join(self.storedir, "bucket_counter.state")
1650-        self.bucket_counter = BucketCountingCrawler(statefile)
1651-        self.bucket_counter.setServiceParent(self)
1652-
1653-    def _setup_lease_checkerf(self):
1654-        statefile = os.path.join(self.storedir, "lease_checker.state")
1655-        historyfile = os.path.join(self.storedir, "lease_checker.history")
1656-        self.lease_checker = LeaseCheckingCrawler(statefile, historyfile,
1657-                                   expiration_enabled, expiration_mode,
1658-                                   expiration_override_lease_duration,
1659-                                   expiration_cutoff_date,
1660-                                   expiration_sharetypes)
1661-        self.lease_checker.setServiceParent(self)
1662-
1663-    def get_available_space(self):
1664-        if self.readonly:
1665-            return 0
1666-        return fileutil.get_available_space(self.storedir, self.reserved_space)
1667-
1668-    def get_bucket_shares(self, storage_index):
1669-        """Return a list of (shnum, pathname) tuples for files that hold
1670-        shares for this storage_index. In each tuple, 'shnum' will always be
1671-        the integer form of the last component of 'pathname'."""
1672-        storagedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index))
1673-        try:
1674-            for f in os.listdir(storagedir):
1675-                if NUM_RE.match(f):
1676-                    filename = os.path.join(storagedir, f)
1677-                    yield (int(f), filename)
1678-        except OSError:
1679-            # Commonly caused by there being no buckets at all.
1680-            pass
1681-
1682 # storage/
1683 # storage/shares/incoming
1684 #   incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will
1685hunk ./src/allmydata/storage/server.py 32
1686 # $SHARENUM matches this regex:
1687 NUM_RE=re.compile("^[0-9]+$")
1688 
1689-
1690-
1691 class StorageServer(service.MultiService, Referenceable):
1692     implements(RIStorageServer, IStatsProducer)
1693     name = 'storage'
1694hunk ./src/allmydata/storage/server.py 35
1695-    LeaseCheckerClass = LeaseCheckingCrawler
1696 
1697     def __init__(self, nodeid, backend, reserved_space=0,
1698                  readonly_storage=False,
1699hunk ./src/allmydata/storage/server.py 38
1700-                 stats_provider=None,
1701-                 expiration_enabled=False,
1702-                 expiration_mode="age",
1703-                 expiration_override_lease_duration=None,
1704-                 expiration_cutoff_date=None,
1705-                 expiration_sharetypes=("mutable", "immutable")):
1706+                 stats_provider=None ):
1707         service.MultiService.__init__(self)
1708         assert isinstance(nodeid, str)
1709         assert len(nodeid) == 20
1710hunk ./src/allmydata/storage/server.py 217
1711         # they asked about: this will save them a lot of work. Add or update
1712         # leases for all of them: if they want us to hold shares for this
1713         # file, they'll want us to hold leases for this file.
1714-        for (shnum, fn) in self.backend.get_bucket_shares(storage_index):
1715-            alreadygot.add(shnum)
1716-            sf = ShareFile(fn)
1717-            sf.add_or_renew_lease(lease_info)
1718-
1719-        for shnum in sharenums:
1720-            share = self.backend.get_share(storage_index, shnum)
1721+        for share in self.backend.get_shares(storage_index):
1722+            alreadygot.add(share.shnum)
1723+            share.add_or_renew_lease(lease_info)
1724 
1725hunk ./src/allmydata/storage/server.py 221
1726-            if not share:
1727-                if (not limited) or (remaining_space >= max_space_per_bucket):
1728-                    # ok! we need to create the new share file.
1729-                    bw = self.backend.make_bucket_writer(storage_index, shnum,
1730-                                      max_space_per_bucket, lease_info, canary)
1731-                    bucketwriters[shnum] = bw
1732-                    self._active_writers[bw] = 1
1733-                    if limited:
1734-                        remaining_space -= max_space_per_bucket
1735-                else:
1736-                    # bummer! not enough space to accept this bucket
1737-                    pass
1738+        for shnum in (sharenums - alreadygot):
1739+            if (not limited) or (remaining_space >= max_space_per_bucket):
1740+                #XXX or should the following line occur in storage server construtor? ok! we need to create the new share file.
1741+                self.backend.set_storage_server(self)
1742+                bw = self.backend.make_bucket_writer(storage_index, shnum,
1743+                                                     max_space_per_bucket, lease_info, canary)
1744+                bucketwriters[shnum] = bw
1745+                self._active_writers[bw] = 1
1746+                if limited:
1747+                    remaining_space -= max_space_per_bucket
1748 
1749hunk ./src/allmydata/storage/server.py 232
1750-            elif share.is_complete():
1751-                # great! we already have it. easy.
1752-                pass
1753-            elif not share.is_complete():
1754-                # Note that we don't create BucketWriters for shnums that
1755-                # have a partial share (in incoming/), so if a second upload
1756-                # occurs while the first is still in progress, the second
1757-                # uploader will use different storage servers.
1758-                pass
1759+        #XXX We SHOULD DOCUMENT LATER.
1760 
1761         self.add_latency("allocate", time.time() - start)
1762         return alreadygot, bucketwriters
1763hunk ./src/allmydata/storage/server.py 238
1764 
1765     def _iter_share_files(self, storage_index):
1766-        for shnum, filename in self._get_bucket_shares(storage_index):
1767+        for shnum, filename in self._get_shares(storage_index):
1768             f = open(filename, 'rb')
1769             header = f.read(32)
1770             f.close()
1771hunk ./src/allmydata/storage/server.py 318
1772         si_s = si_b2a(storage_index)
1773         log.msg("storage: get_buckets %s" % si_s)
1774         bucketreaders = {} # k: sharenum, v: BucketReader
1775-        for shnum, filename in self.backend.get_bucket_shares(storage_index):
1776+        for shnum, filename in self.backend.get_shares(storage_index):
1777             bucketreaders[shnum] = BucketReader(self, filename,
1778                                                 storage_index, shnum)
1779         self.add_latency("get", time.time() - start)
1780hunk ./src/allmydata/storage/server.py 334
1781         # since all shares get the same lease data, we just grab the leases
1782         # from the first share
1783         try:
1784-            shnum, filename = self._get_bucket_shares(storage_index).next()
1785+            shnum, filename = self._get_shares(storage_index).next()
1786             sf = ShareFile(filename)
1787             return sf.get_leases()
1788         except StopIteration:
1789hunk ./src/allmydata/storage/shares.py 1
1790-#! /usr/bin/python
1791-
1792-from allmydata.storage.mutable import MutableShareFile
1793-from allmydata.storage.immutable import ShareFile
1794-
1795-def get_share_file(filename):
1796-    f = open(filename, "rb")
1797-    prefix = f.read(32)
1798-    f.close()
1799-    if prefix == MutableShareFile.MAGIC:
1800-        return MutableShareFile(filename)
1801-    # otherwise assume it's immutable
1802-    return ShareFile(filename)
1803-
1804rmfile ./src/allmydata/storage/shares.py
1805hunk ./src/allmydata/test/common_util.py 20
1806 
1807 def flip_one_bit(s, offset=0, size=None):
1808     """ flip one random bit of the string s, in a byte greater than or equal to offset and less
1809-    than offset+size. """
1810+    than offset+size. Return the new string. """
1811     if size is None:
1812         size=len(s)-offset
1813     i = randrange(offset, offset+size)
1814hunk ./src/allmydata/test/test_backends.py 7
1815 
1816 from allmydata.test.common_util import ReallyEqualMixin
1817 
1818-import mock
1819+import mock, os
1820 
1821 # This is the code that we're going to be testing.
1822hunk ./src/allmydata/test/test_backends.py 10
1823-from allmydata.storage.server import StorageServer, FSBackend, NullBackend
1824+from allmydata.storage.server import StorageServer
1825+
1826+from allmydata.storage.backends.das.core import DASCore
1827+from allmydata.storage.backends.null.core import NullCore
1828+
1829 
1830 # The following share file contents was generated with
1831 # storage.immutable.ShareFile from Tahoe-LAFS v1.8.2
1832hunk ./src/allmydata/test/test_backends.py 22
1833 share_data = 'a\x00\x00\x00\x00xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\x00(\xde\x80'
1834 share_file_data = '\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01' + share_data
1835 
1836-sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0'
1837+tempdir = 'teststoredir'
1838+sharedirname = os.path.join(tempdir, 'shares', 'or', 'orsxg5dtorxxeylhmvpws3temv4a')
1839+sharefname = os.path.join(sharedirname, '0')
1840 
1841 class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
1842     @mock.patch('time.time')
1843hunk ./src/allmydata/test/test_backends.py 58
1844         filesystem in only the prescribed ways. """
1845 
1846         def call_open(fname, mode):
1847-            if fname == 'testdir/bucket_counter.state':
1848-                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
1849-            elif fname == 'testdir/lease_checker.state':
1850-                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
1851-            elif fname == 'testdir/lease_checker.history':
1852+            if fname == os.path.join(tempdir,'bucket_counter.state'):
1853+                raise IOError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'bucket_counter.state'))
1854+            elif fname == os.path.join(tempdir, 'lease_checker.state'):
1855+                raise IOError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'lease_checker.state'))
1856+            elif fname == os.path.join(tempdir, 'lease_checker.history'):
1857                 return StringIO()
1858             else:
1859                 self.fail("Server with FS backend tried to open '%s' in mode '%s'" % (fname, mode))
1860hunk ./src/allmydata/test/test_backends.py 124
1861     @mock.patch('__builtin__.open')
1862     def setUp(self, mockopen):
1863         def call_open(fname, mode):
1864-            if fname == 'testdir/bucket_counter.state':
1865-                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
1866-            elif fname == 'testdir/lease_checker.state':
1867-                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
1868-            elif fname == 'testdir/lease_checker.history':
1869+            if fname == os.path.join(tempdir, 'bucket_counter.state'):
1870+                raise IOError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'bucket_counter.state'))
1871+            elif fname == os.path.join(tempdir, 'lease_checker.state'):
1872+                raise IOError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'lease_checker.state'))
1873+            elif fname == os.path.join(tempdir, 'lease_checker.history'):
1874                 return StringIO()
1875         mockopen.side_effect = call_open
1876hunk ./src/allmydata/test/test_backends.py 131
1877-
1878-        self.s = StorageServer('testnodeidxxxxxxxxxx', backend=FSBackend('teststoredir'))
1879+        expiration_policy = {'enabled' : False,
1880+                             'mode' : 'age',
1881+                             'override_lease_duration' : None,
1882+                             'cutoff_date' : None,
1883+                             'sharetypes' : None}
1884+        testbackend = DASCore(tempdir, expiration_policy)
1885+        self.s = StorageServer('testnodeidxxxxxxxxxx', backend=DASCore(tempdir, expiration_policy) )
1886 
1887     @mock.patch('time.time')
1888     @mock.patch('os.mkdir')
1889hunk ./src/allmydata/test/test_backends.py 148
1890         """ Write a new share. """
1891 
1892         def call_listdir(dirname):
1893-            self.failUnlessReallyEqual(dirname, 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
1894-            raise OSError(2, "No such file or directory: 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a'")
1895+            self.failUnlessReallyEqual(dirname, sharedirname)
1896+            raise OSError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'shares/or/orsxg5dtorxxeylhmvpws3temv4a'))
1897 
1898         mocklistdir.side_effect = call_listdir
1899 
1900hunk ./src/allmydata/test/test_backends.py 178
1901 
1902         sharefile = MockFile()
1903         def call_open(fname, mode):
1904-            self.failUnlessReallyEqual(fname, 'testdir/shares/incoming/or/orsxg5dtorxxeylhmvpws3temv4a/0' )
1905+            self.failUnlessReallyEqual(fname, os.path.join(tempdir, 'shares', 'or', 'orsxg5dtorxxeylhmvpws3temv4a', '0' ))
1906             return sharefile
1907 
1908         mockopen.side_effect = call_open
1909hunk ./src/allmydata/test/test_backends.py 200
1910         StorageServer object. """
1911 
1912         def call_listdir(dirname):
1913-            self.failUnlessReallyEqual(dirname,'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
1914+            self.failUnlessReallyEqual(dirname, os.path.join(tempdir, 'shares', 'or', 'orsxg5dtorxxeylhmvpws3temv4a'))
1915             return ['0']
1916 
1917         mocklistdir.side_effect = call_listdir
1918}
1919[checkpoint patch
1920wilcoxjg@gmail.com**20110626165715
1921 Ignore-this: fbfce2e8a1c1bb92715793b8ad6854d5
1922] {
1923hunk ./src/allmydata/storage/backends/das/core.py 21
1924 from allmydata.storage.lease import LeaseInfo
1925 from allmydata.storage.mutable import MutableShareFile, EmptyShare, \
1926      create_mutable_sharefile
1927-from allmydata.storage.backends.das.immutable import NullBucketWriter, BucketWriter, BucketReader
1928+from allmydata.storage.immutable import BucketWriter, BucketReader
1929 from allmydata.storage.crawler import FSBucketCountingCrawler
1930 from allmydata.storage.backends.das.expirer import FSLeaseCheckingCrawler
1931 
1932hunk ./src/allmydata/storage/backends/das/core.py 27
1933 from zope.interface import implements
1934 
1935+# $SHARENUM matches this regex:
1936+NUM_RE=re.compile("^[0-9]+$")
1937+
1938 class DASCore(Backend):
1939     implements(IStorageBackend)
1940     def __init__(self, storedir, expiration_policy, readonly=False, reserved_space=0):
1941hunk ./src/allmydata/storage/backends/das/core.py 80
1942         return fileutil.get_available_space(self.storedir, self.reserved_space)
1943 
1944     def get_shares(self, storage_index):
1945-        """Return a list of the FSBShare objects that correspond to the passed storage_index."""
1946+        """Return a list of the ImmutableShare objects that correspond to the passed storage_index."""
1947         finalstoragedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index))
1948         try:
1949             for f in os.listdir(finalstoragedir):
1950hunk ./src/allmydata/storage/backends/das/core.py 86
1951                 if NUM_RE.match(f):
1952                     filename = os.path.join(finalstoragedir, f)
1953-                    yield FSBShare(filename, int(f))
1954+                    yield ImmutableShare(self.sharedir, storage_index, int(f))
1955         except OSError:
1956             # Commonly caused by there being no buckets at all.
1957             pass
1958hunk ./src/allmydata/storage/backends/das/core.py 95
1959         immsh = ImmutableShare(self.sharedir, storage_index, shnum, max_size=max_space_per_bucket, create=True)
1960         bw = BucketWriter(self.ss, immsh, max_space_per_bucket, lease_info, canary)
1961         return bw
1962+
1963+    def set_storage_server(self, ss):
1964+        self.ss = ss
1965         
1966 
1967 # each share file (in storage/shares/$SI/$SHNUM) contains lease information
1968hunk ./src/allmydata/storage/server.py 29
1969 # Where "$START" denotes the first 10 bits worth of $STORAGEINDEX (that's 2
1970 # base-32 chars).
1971 
1972-# $SHARENUM matches this regex:
1973-NUM_RE=re.compile("^[0-9]+$")
1974 
1975 class StorageServer(service.MultiService, Referenceable):
1976     implements(RIStorageServer, IStatsProducer)
1977}
1978[checkpoint4
1979wilcoxjg@gmail.com**20110628202202
1980 Ignore-this: 9778596c10bb066b58fc211f8c1707b7
1981] {
1982hunk ./src/allmydata/storage/backends/das/core.py 96
1983         bw = BucketWriter(self.ss, immsh, max_space_per_bucket, lease_info, canary)
1984         return bw
1985 
1986+    def make_bucket_reader(self, share):
1987+        return BucketReader(self.ss, share)
1988+
1989     def set_storage_server(self, ss):
1990         self.ss = ss
1991         
1992hunk ./src/allmydata/storage/backends/das/core.py 138
1993         must not be None. """
1994         precondition((max_size is not None) or (not create), max_size, create)
1995         self.shnum = shnum
1996+        self.storage_index = storageindex
1997         self.fname = os.path.join(sharedir, storage_index_to_dir(storageindex), str(shnum))
1998         self._max_size = max_size
1999         if create:
2000hunk ./src/allmydata/storage/backends/das/core.py 173
2001             self._lease_offset = filesize - (num_leases * self.LEASE_SIZE)
2002         self._data_offset = 0xc
2003 
2004+    def get_shnum(self):
2005+        return self.shnum
2006+
2007     def unlink(self):
2008         os.unlink(self.fname)
2009 
2010hunk ./src/allmydata/storage/backends/null/core.py 2
2011 from allmydata.storage.backends.base import Backend
2012+from allmydata.storage.immutable import BucketWriter, BucketReader
2013 
2014 class NullCore(Backend):
2015     def __init__(self):
2016hunk ./src/allmydata/storage/backends/null/core.py 17
2017     def get_share(self, storage_index, sharenum):
2018         return None
2019 
2020-    def make_bucket_writer(self, storage_index, shnum, max_space_per_bucket, lease_info, canary):
2021-        return NullBucketWriter()
2022+    def make_bucket_writer(self, storageindex, shnum, max_space_per_bucket, lease_info, canary):
2023+       
2024+        return BucketWriter(self.ss, immutableshare, max_space_per_bucket, lease_info, canary)
2025+
2026+    def set_storage_server(self, ss):
2027+        self.ss = ss
2028+
2029+class ImmutableShare:
2030+    sharetype = "immutable"
2031+
2032+    def __init__(self, sharedir, storageindex, shnum, max_size=None, create=False):
2033+        """ If max_size is not None then I won't allow more than
2034+        max_size to be written to me. If create=True then max_size
2035+        must not be None. """
2036+        precondition((max_size is not None) or (not create), max_size, create)
2037+        self.shnum = shnum
2038+        self.storage_index = storageindex
2039+        self.fname = os.path.join(sharedir, storage_index_to_dir(storageindex), str(shnum))
2040+        self._max_size = max_size
2041+        if create:
2042+            # touch the file, so later callers will see that we're working on
2043+            # it. Also construct the metadata.
2044+            assert not os.path.exists(self.fname)
2045+            fileutil.make_dirs(os.path.dirname(self.fname))
2046+            f = open(self.fname, 'wb')
2047+            # The second field -- the four-byte share data length -- is no
2048+            # longer used as of Tahoe v1.3.0, but we continue to write it in
2049+            # there in case someone downgrades a storage server from >=
2050+            # Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one
2051+            # server to another, etc. We do saturation -- a share data length
2052+            # larger than 2**32-1 (what can fit into the field) is marked as
2053+            # the largest length that can fit into the field. That way, even
2054+            # if this does happen, the old < v1.3.0 server will still allow
2055+            # clients to read the first part of the share.
2056+            f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0))
2057+            f.close()
2058+            self._lease_offset = max_size + 0x0c
2059+            self._num_leases = 0
2060+        else:
2061+            f = open(self.fname, 'rb')
2062+            filesize = os.path.getsize(self.fname)
2063+            (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
2064+            f.close()
2065+            if version != 1:
2066+                msg = "sharefile %s had version %d but we wanted 1" % \
2067+                      (self.fname, version)
2068+                raise UnknownImmutableContainerVersionError(msg)
2069+            self._num_leases = num_leases
2070+            self._lease_offset = filesize - (num_leases * self.LEASE_SIZE)
2071+        self._data_offset = 0xc
2072+
2073+    def get_shnum(self):
2074+        return self.shnum
2075+
2076+    def unlink(self):
2077+        os.unlink(self.fname)
2078+
2079+    def read_share_data(self, offset, length):
2080+        precondition(offset >= 0)
2081+        # Reads beyond the end of the data are truncated. Reads that start
2082+        # beyond the end of the data return an empty string.
2083+        seekpos = self._data_offset+offset
2084+        fsize = os.path.getsize(self.fname)
2085+        actuallength = max(0, min(length, fsize-seekpos))
2086+        if actuallength == 0:
2087+            return ""
2088+        f = open(self.fname, 'rb')
2089+        f.seek(seekpos)
2090+        return f.read(actuallength)
2091+
2092+    def write_share_data(self, offset, data):
2093+        length = len(data)
2094+        precondition(offset >= 0, offset)
2095+        if self._max_size is not None and offset+length > self._max_size:
2096+            raise DataTooLargeError(self._max_size, offset, length)
2097+        f = open(self.fname, 'rb+')
2098+        real_offset = self._data_offset+offset
2099+        f.seek(real_offset)
2100+        assert f.tell() == real_offset
2101+        f.write(data)
2102+        f.close()
2103+
2104+    def _write_lease_record(self, f, lease_number, lease_info):
2105+        offset = self._lease_offset + lease_number * self.LEASE_SIZE
2106+        f.seek(offset)
2107+        assert f.tell() == offset
2108+        f.write(lease_info.to_immutable_data())
2109+
2110+    def _read_num_leases(self, f):
2111+        f.seek(0x08)
2112+        (num_leases,) = struct.unpack(">L", f.read(4))
2113+        return num_leases
2114+
2115+    def _write_num_leases(self, f, num_leases):
2116+        f.seek(0x08)
2117+        f.write(struct.pack(">L", num_leases))
2118+
2119+    def _truncate_leases(self, f, num_leases):
2120+        f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE)
2121+
2122+    def get_leases(self):
2123+        """Yields a LeaseInfo instance for all leases."""
2124+        f = open(self.fname, 'rb')
2125+        (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
2126+        f.seek(self._lease_offset)
2127+        for i in range(num_leases):
2128+            data = f.read(self.LEASE_SIZE)
2129+            if data:
2130+                yield LeaseInfo().from_immutable_data(data)
2131+
2132+    def add_lease(self, lease_info):
2133+        f = open(self.fname, 'rb+')
2134+        num_leases = self._read_num_leases(f)
2135+        self._write_lease_record(f, num_leases, lease_info)
2136+        self._write_num_leases(f, num_leases+1)
2137+        f.close()
2138+
2139+    def renew_lease(self, renew_secret, new_expire_time):
2140+        for i,lease in enumerate(self.get_leases()):
2141+            if constant_time_compare(lease.renew_secret, renew_secret):
2142+                # yup. See if we need to update the owner time.
2143+                if new_expire_time > lease.expiration_time:
2144+                    # yes
2145+                    lease.expiration_time = new_expire_time
2146+                    f = open(self.fname, 'rb+')
2147+                    self._write_lease_record(f, i, lease)
2148+                    f.close()
2149+                return
2150+        raise IndexError("unable to renew non-existent lease")
2151+
2152+    def add_or_renew_lease(self, lease_info):
2153+        try:
2154+            self.renew_lease(lease_info.renew_secret,
2155+                             lease_info.expiration_time)
2156+        except IndexError:
2157+            self.add_lease(lease_info)
2158+
2159+
2160+    def cancel_lease(self, cancel_secret):
2161+        """Remove a lease with the given cancel_secret. If the last lease is
2162+        cancelled, the file will be removed. Return the number of bytes that
2163+        were freed (by truncating the list of leases, and possibly by
2164+        deleting the file. Raise IndexError if there was no lease with the
2165+        given cancel_secret.
2166+        """
2167+
2168+        leases = list(self.get_leases())
2169+        num_leases_removed = 0
2170+        for i,lease in enumerate(leases):
2171+            if constant_time_compare(lease.cancel_secret, cancel_secret):
2172+                leases[i] = None
2173+                num_leases_removed += 1
2174+        if not num_leases_removed:
2175+            raise IndexError("unable to find matching lease to cancel")
2176+        if num_leases_removed:
2177+            # pack and write out the remaining leases. We write these out in
2178+            # the same order as they were added, so that if we crash while
2179+            # doing this, we won't lose any non-cancelled leases.
2180+            leases = [l for l in leases if l] # remove the cancelled leases
2181+            f = open(self.fname, 'rb+')
2182+            for i,lease in enumerate(leases):
2183+                self._write_lease_record(f, i, lease)
2184+            self._write_num_leases(f, len(leases))
2185+            self._truncate_leases(f, len(leases))
2186+            f.close()
2187+        space_freed = self.LEASE_SIZE * num_leases_removed
2188+        if not len(leases):
2189+            space_freed += os.stat(self.fname)[stat.ST_SIZE]
2190+            self.unlink()
2191+        return space_freed
2192hunk ./src/allmydata/storage/immutable.py 114
2193 class BucketReader(Referenceable):
2194     implements(RIBucketReader)
2195 
2196-    def __init__(self, ss, sharefname, storage_index=None, shnum=None):
2197+    def __init__(self, ss, share):
2198         self.ss = ss
2199hunk ./src/allmydata/storage/immutable.py 116
2200-        self._share_file = ShareFile(sharefname)
2201-        self.storage_index = storage_index
2202-        self.shnum = shnum
2203+        self._share_file = share
2204+        self.storage_index = share.storage_index
2205+        self.shnum = share.shnum
2206 
2207     def __repr__(self):
2208         return "<%s %s %s>" % (self.__class__.__name__,
2209hunk ./src/allmydata/storage/server.py 316
2210         si_s = si_b2a(storage_index)
2211         log.msg("storage: get_buckets %s" % si_s)
2212         bucketreaders = {} # k: sharenum, v: BucketReader
2213-        for shnum, filename in self.backend.get_shares(storage_index):
2214-            bucketreaders[shnum] = BucketReader(self, filename,
2215-                                                storage_index, shnum)
2216+        self.backend.set_storage_server(self)
2217+        for share in self.backend.get_shares(storage_index):
2218+            bucketreaders[share.get_shnum()] = self.backend.make_bucket_reader(share)
2219         self.add_latency("get", time.time() - start)
2220         return bucketreaders
2221 
2222hunk ./src/allmydata/test/test_backends.py 25
2223 tempdir = 'teststoredir'
2224 sharedirname = os.path.join(tempdir, 'shares', 'or', 'orsxg5dtorxxeylhmvpws3temv4a')
2225 sharefname = os.path.join(sharedirname, '0')
2226+expiration_policy = {'enabled' : False,
2227+                     'mode' : 'age',
2228+                     'override_lease_duration' : None,
2229+                     'cutoff_date' : None,
2230+                     'sharetypes' : None}
2231 
2232 class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
2233     @mock.patch('time.time')
2234hunk ./src/allmydata/test/test_backends.py 43
2235         tries to read or write to the file system. """
2236 
2237         # Now begin the test.
2238-        s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend())
2239+        s = StorageServer('testnodeidxxxxxxxxxx', backend=NullCore())
2240 
2241         self.failIf(mockisdir.called)
2242         self.failIf(mocklistdir.called)
2243hunk ./src/allmydata/test/test_backends.py 74
2244         mockopen.side_effect = call_open
2245 
2246         # Now begin the test.
2247-        s = StorageServer('testnodeidxxxxxxxxxx', backend=FSBackend('teststoredir'))
2248+        s = StorageServer('testnodeidxxxxxxxxxx', backend=DASCore('teststoredir', expiration_policy))
2249 
2250         self.failIf(mockisdir.called)
2251         self.failIf(mocklistdir.called)
2252hunk ./src/allmydata/test/test_backends.py 86
2253 
2254 class TestServerNullBackend(unittest.TestCase, ReallyEqualMixin):
2255     def setUp(self):
2256-        self.s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend())
2257+        self.s = StorageServer('testnodeidxxxxxxxxxx', backend=NullCore())
2258 
2259     @mock.patch('os.mkdir')
2260     @mock.patch('__builtin__.open')
2261hunk ./src/allmydata/test/test_backends.py 136
2262             elif fname == os.path.join(tempdir, 'lease_checker.history'):
2263                 return StringIO()
2264         mockopen.side_effect = call_open
2265-        expiration_policy = {'enabled' : False,
2266-                             'mode' : 'age',
2267-                             'override_lease_duration' : None,
2268-                             'cutoff_date' : None,
2269-                             'sharetypes' : None}
2270         testbackend = DASCore(tempdir, expiration_policy)
2271         self.s = StorageServer('testnodeidxxxxxxxxxx', backend=DASCore(tempdir, expiration_policy) )
2272 
2273}
2274[checkpoint5
2275wilcoxjg@gmail.com**20110705034626
2276 Ignore-this: 255780bd58299b0aa33c027e9d008262
2277] {
2278addfile ./src/allmydata/storage/backends/base.py
2279hunk ./src/allmydata/storage/backends/base.py 1
2280+from twisted.application import service
2281+
2282+class Backend(service.MultiService):
2283+    def __init__(self):
2284+        service.MultiService.__init__(self)
2285hunk ./src/allmydata/storage/backends/null/core.py 19
2286 
2287     def make_bucket_writer(self, storageindex, shnum, max_space_per_bucket, lease_info, canary):
2288         
2289+        immutableshare = ImmutableShare()
2290         return BucketWriter(self.ss, immutableshare, max_space_per_bucket, lease_info, canary)
2291 
2292     def set_storage_server(self, ss):
2293hunk ./src/allmydata/storage/backends/null/core.py 28
2294 class ImmutableShare:
2295     sharetype = "immutable"
2296 
2297-    def __init__(self, sharedir, storageindex, shnum, max_size=None, create=False):
2298+    def __init__(self):
2299         """ If max_size is not None then I won't allow more than
2300         max_size to be written to me. If create=True then max_size
2301         must not be None. """
2302hunk ./src/allmydata/storage/backends/null/core.py 32
2303-        precondition((max_size is not None) or (not create), max_size, create)
2304-        self.shnum = shnum
2305-        self.storage_index = storageindex
2306-        self.fname = os.path.join(sharedir, storage_index_to_dir(storageindex), str(shnum))
2307-        self._max_size = max_size
2308-        if create:
2309-            # touch the file, so later callers will see that we're working on
2310-            # it. Also construct the metadata.
2311-            assert not os.path.exists(self.fname)
2312-            fileutil.make_dirs(os.path.dirname(self.fname))
2313-            f = open(self.fname, 'wb')
2314-            # The second field -- the four-byte share data length -- is no
2315-            # longer used as of Tahoe v1.3.0, but we continue to write it in
2316-            # there in case someone downgrades a storage server from >=
2317-            # Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one
2318-            # server to another, etc. We do saturation -- a share data length
2319-            # larger than 2**32-1 (what can fit into the field) is marked as
2320-            # the largest length that can fit into the field. That way, even
2321-            # if this does happen, the old < v1.3.0 server will still allow
2322-            # clients to read the first part of the share.
2323-            f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0))
2324-            f.close()
2325-            self._lease_offset = max_size + 0x0c
2326-            self._num_leases = 0
2327-        else:
2328-            f = open(self.fname, 'rb')
2329-            filesize = os.path.getsize(self.fname)
2330-            (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
2331-            f.close()
2332-            if version != 1:
2333-                msg = "sharefile %s had version %d but we wanted 1" % \
2334-                      (self.fname, version)
2335-                raise UnknownImmutableContainerVersionError(msg)
2336-            self._num_leases = num_leases
2337-            self._lease_offset = filesize - (num_leases * self.LEASE_SIZE)
2338-        self._data_offset = 0xc
2339+        pass
2340 
2341     def get_shnum(self):
2342         return self.shnum
2343hunk ./src/allmydata/storage/backends/null/core.py 54
2344         return f.read(actuallength)
2345 
2346     def write_share_data(self, offset, data):
2347-        length = len(data)
2348-        precondition(offset >= 0, offset)
2349-        if self._max_size is not None and offset+length > self._max_size:
2350-            raise DataTooLargeError(self._max_size, offset, length)
2351-        f = open(self.fname, 'rb+')
2352-        real_offset = self._data_offset+offset
2353-        f.seek(real_offset)
2354-        assert f.tell() == real_offset
2355-        f.write(data)
2356-        f.close()
2357+        pass
2358 
2359     def _write_lease_record(self, f, lease_number, lease_info):
2360         offset = self._lease_offset + lease_number * self.LEASE_SIZE
2361hunk ./src/allmydata/storage/backends/null/core.py 84
2362             if data:
2363                 yield LeaseInfo().from_immutable_data(data)
2364 
2365-    def add_lease(self, lease_info):
2366-        f = open(self.fname, 'rb+')
2367-        num_leases = self._read_num_leases(f)
2368-        self._write_lease_record(f, num_leases, lease_info)
2369-        self._write_num_leases(f, num_leases+1)
2370-        f.close()
2371+    def add_lease(self, lease):
2372+        pass
2373 
2374     def renew_lease(self, renew_secret, new_expire_time):
2375         for i,lease in enumerate(self.get_leases()):
2376hunk ./src/allmydata/test/test_backends.py 32
2377                      'sharetypes' : None}
2378 
2379 class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
2380-    @mock.patch('time.time')
2381-    @mock.patch('os.mkdir')
2382-    @mock.patch('__builtin__.open')
2383-    @mock.patch('os.listdir')
2384-    @mock.patch('os.path.isdir')
2385-    def test_create_server_null_backend(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
2386-        """ This tests whether a server instance can be constructed
2387-        with a null backend. The server instance fails the test if it
2388-        tries to read or write to the file system. """
2389-
2390-        # Now begin the test.
2391-        s = StorageServer('testnodeidxxxxxxxxxx', backend=NullCore())
2392-
2393-        self.failIf(mockisdir.called)
2394-        self.failIf(mocklistdir.called)
2395-        self.failIf(mockopen.called)
2396-        self.failIf(mockmkdir.called)
2397-
2398-        # You passed!
2399-
2400     @mock.patch('time.time')
2401     @mock.patch('os.mkdir')
2402     @mock.patch('__builtin__.open')
2403hunk ./src/allmydata/test/test_backends.py 53
2404                 self.fail("Server with FS backend tried to open '%s' in mode '%s'" % (fname, mode))
2405         mockopen.side_effect = call_open
2406 
2407-        # Now begin the test.
2408-        s = StorageServer('testnodeidxxxxxxxxxx', backend=DASCore('teststoredir', expiration_policy))
2409-
2410-        self.failIf(mockisdir.called)
2411-        self.failIf(mocklistdir.called)
2412-        self.failIf(mockopen.called)
2413-        self.failIf(mockmkdir.called)
2414-        self.failIf(mocktime.called)
2415-
2416-        # You passed!
2417-
2418-class TestServerNullBackend(unittest.TestCase, ReallyEqualMixin):
2419-    def setUp(self):
2420-        self.s = StorageServer('testnodeidxxxxxxxxxx', backend=NullCore())
2421-
2422-    @mock.patch('os.mkdir')
2423-    @mock.patch('__builtin__.open')
2424-    @mock.patch('os.listdir')
2425-    @mock.patch('os.path.isdir')
2426-    def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir):
2427-        """ Write a new share. """
2428-
2429-        # Now begin the test.
2430-        alreadygot, bs = self.s.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock())
2431-        bs[0].remote_write(0, 'a')
2432-        self.failIf(mockisdir.called)
2433-        self.failIf(mocklistdir.called)
2434-        self.failIf(mockopen.called)
2435-        self.failIf(mockmkdir.called)
2436+        def call_isdir(fname):
2437+            if fname == os.path.join(tempdir,'shares'):
2438+                return True
2439+            elif fname == os.path.join(tempdir,'shares', 'incoming'):
2440+                return True
2441+            else:
2442+                self.fail("Server with FS backend tried to idsir '%s'" % (fname,))
2443+        mockisdir.side_effect = call_isdir
2444 
2445hunk ./src/allmydata/test/test_backends.py 62
2446-    @mock.patch('os.path.exists')
2447-    @mock.patch('os.path.getsize')
2448-    @mock.patch('__builtin__.open')
2449-    @mock.patch('os.listdir')
2450-    def test_read_share(self, mocklistdir, mockopen, mockgetsize, mockexists):
2451-        """ This tests whether the code correctly finds and reads
2452-        shares written out by old (Tahoe-LAFS <= v1.8.2)
2453-        servers. There is a similar test in test_download, but that one
2454-        is from the perspective of the client and exercises a deeper
2455-        stack of code. This one is for exercising just the
2456-        StorageServer object. """
2457+        def call_mkdir(fname, mode):
2458+            """XXX something is calling mkdir teststoredir and teststoredir/shares twice...  this is odd!"""
2459+            self.failUnlessEqual(0777, mode)
2460+            if fname == tempdir:
2461+                return None
2462+            elif fname == os.path.join(tempdir,'shares'):
2463+                return None
2464+            elif fname == os.path.join(tempdir,'shares', 'incoming'):
2465+                return None
2466+            else:
2467+                self.fail("Server with FS backend tried to mkdir '%s'" % (fname,))
2468+        mockmkdir.side_effect = call_mkdir
2469 
2470         # Now begin the test.
2471hunk ./src/allmydata/test/test_backends.py 76
2472-        bs = self.s.remote_get_buckets('teststorage_index')
2473+        s = StorageServer('testnodeidxxxxxxxxxx', backend=DASCore('teststoredir', expiration_policy))
2474 
2475hunk ./src/allmydata/test/test_backends.py 78
2476-        self.failUnlessEqual(len(bs), 0)
2477-        self.failIf(mocklistdir.called)
2478-        self.failIf(mockopen.called)
2479-        self.failIf(mockgetsize.called)
2480-        self.failIf(mockexists.called)
2481+        self.failIf(mocklistdir.called, mocklistdir.call_args_list)
2482 
2483 
2484 class TestServerFSBackend(unittest.TestCase, ReallyEqualMixin):
2485hunk ./src/allmydata/test/test_backends.py 193
2486         self.failUnlessReallyEqual(b.remote_read(datalen+1, 3), '')
2487 
2488 
2489+
2490+class TestBackendConstruction(unittest.TestCase, ReallyEqualMixin):
2491+    @mock.patch('time.time')
2492+    @mock.patch('os.mkdir')
2493+    @mock.patch('__builtin__.open')
2494+    @mock.patch('os.listdir')
2495+    @mock.patch('os.path.isdir')
2496+    def test_create_fs_backend(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
2497+        """ This tests whether a file system backend instance can be
2498+        constructed. To pass the test, it has to use the
2499+        filesystem in only the prescribed ways. """
2500+
2501+        def call_open(fname, mode):
2502+            if fname == os.path.join(tempdir,'bucket_counter.state'):
2503+                raise IOError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'bucket_counter.state'))
2504+            elif fname == os.path.join(tempdir, 'lease_checker.state'):
2505+                raise IOError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'lease_checker.state'))
2506+            elif fname == os.path.join(tempdir, 'lease_checker.history'):
2507+                return StringIO()
2508+            else:
2509+                self.fail("Server with FS backend tried to open '%s' in mode '%s'" % (fname, mode))
2510+        mockopen.side_effect = call_open
2511+
2512+        def call_isdir(fname):
2513+            if fname == os.path.join(tempdir,'shares'):
2514+                return True
2515+            elif fname == os.path.join(tempdir,'shares', 'incoming'):
2516+                return True
2517+            else:
2518+                self.fail("Server with FS backend tried to idsir '%s'" % (fname,))
2519+        mockisdir.side_effect = call_isdir
2520+
2521+        def call_mkdir(fname, mode):
2522+            """XXX something is calling mkdir teststoredir and teststoredir/shares twice...  this is odd!"""
2523+            self.failUnlessEqual(0777, mode)
2524+            if fname == tempdir:
2525+                return None
2526+            elif fname == os.path.join(tempdir,'shares'):
2527+                return None
2528+            elif fname == os.path.join(tempdir,'shares', 'incoming'):
2529+                return None
2530+            else:
2531+                self.fail("Server with FS backend tried to mkdir '%s'" % (fname,))
2532+        mockmkdir.side_effect = call_mkdir
2533+
2534+        # Now begin the test.
2535+        DASCore('teststoredir', expiration_policy)
2536+
2537+        self.failIf(mocklistdir.called, mocklistdir.call_args_list)
2538}
2539
2540Context:
2541
2542[add Protovis.js-based download-status timeline visualization
2543Brian Warner <warner@lothar.com>**20110629222606
2544 Ignore-this: 477ccef5c51b30e246f5b6e04ab4a127
2545 
2546 provide status overlap info on the webapi t=json output, add decode/decrypt
2547 rate tooltips, add zoomin/zoomout buttons
2548]
2549[add more download-status data, fix tests
2550Brian Warner <warner@lothar.com>**20110629222555
2551 Ignore-this: e9e0b7e0163f1e95858aa646b9b17b8c
2552]
2553[prepare for viz: improve DownloadStatus events
2554Brian Warner <warner@lothar.com>**20110629222542
2555 Ignore-this: 16d0bde6b734bb501aa6f1174b2b57be
2556 
2557 consolidate IDownloadStatusHandlingConsumer stuff into DownloadNode
2558]
2559[docs: fix error in crypto specification that was noticed by Taylor R Campbell <campbell+tahoe@mumble.net>
2560zooko@zooko.com**20110629185711
2561 Ignore-this: b921ed60c1c8ba3c390737fbcbe47a67
2562]
2563[setup.py: don't make bin/tahoe.pyscript executable. fixes #1347
2564david-sarah@jacaranda.org**20110130235809
2565 Ignore-this: 3454c8b5d9c2c77ace03de3ef2d9398a
2566]
2567[Makefile: remove targets relating to 'setup.py check_auto_deps' which no longer exists. fixes #1345
2568david-sarah@jacaranda.org**20110626054124
2569 Ignore-this: abb864427a1b91bd10d5132b4589fd90
2570]
2571[Makefile: add 'make check' as an alias for 'make test'. Also remove an unnecessary dependency of 'test' on 'build' and 'src/allmydata/_version.py'. fixes #1344
2572david-sarah@jacaranda.org**20110623205528
2573 Ignore-this: c63e23146c39195de52fb17c7c49b2da
2574]
2575[Rename test_package_initialization.py to (much shorter) test_import.py .
2576Brian Warner <warner@lothar.com>**20110611190234
2577 Ignore-this: 3eb3dbac73600eeff5cfa6b65d65822
2578 
2579 The former name was making my 'ls' listings hard to read, by forcing them
2580 down to just two columns.
2581]
2582[tests: fix tests to accomodate [20110611153758-92b7f-0ba5e4726fb6318dac28fb762a6512a003f4c430]
2583zooko@zooko.com**20110611163741
2584 Ignore-this: 64073a5f39e7937e8e5e1314c1a302d1
2585 Apparently none of the two authors (stercor, terrell), three reviewers (warner, davidsarah, terrell), or one committer (me) actually ran the tests. This is presumably due to #20.
2586 fixes #1412
2587]
2588[wui: right-align the size column in the WUI
2589zooko@zooko.com**20110611153758
2590 Ignore-this: 492bdaf4373c96f59f90581c7daf7cd7
2591 Thanks to Ted "stercor" Rolle Jr. and Terrell Russell.
2592 fixes #1412
2593]
2594[docs: three minor fixes
2595zooko@zooko.com**20110610121656
2596 Ignore-this: fec96579eb95aceb2ad5fc01a814c8a2
2597 CREDITS for arc for stats tweak
2598 fix link to .zip file in quickstart.rst (thanks to ChosenOne for noticing)
2599 English usage tweak
2600]
2601[docs/running.rst: fix stray HTML (not .rst) link noticed by ChosenOne.
2602david-sarah@jacaranda.org**20110609223719
2603 Ignore-this: fc50ac9c94792dcac6f1067df8ac0d4a
2604]
2605[server.py:  get_latencies now reports percentiles _only_ if there are sufficient observations for the interpretation of the percentile to be unambiguous.
2606wilcoxjg@gmail.com**20110527120135
2607 Ignore-this: 2e7029764bffc60e26f471d7c2b6611e
2608 interfaces.py:  modified the return type of RIStatsProvider.get_stats to allow for None as a return value
2609 NEWS.rst, stats.py: documentation of change to get_latencies
2610 stats.rst: now documents percentile modification in get_latencies
2611 test_storage.py:  test_latencies now expects None in output categories that contain too few samples for the associated percentile to be unambiguously reported.
2612 fixes #1392
2613]
2614[docs: revert link in relnotes.txt from NEWS.rst to NEWS, since the former did not exist at revision 5000.
2615david-sarah@jacaranda.org**20110517011214
2616 Ignore-this: 6a5be6e70241e3ec0575641f64343df7
2617]
2618[docs: convert NEWS to NEWS.rst and change all references to it.
2619david-sarah@jacaranda.org**20110517010255
2620 Ignore-this: a820b93ea10577c77e9c8206dbfe770d
2621]
2622[docs: remove out-of-date docs/testgrid/introducer.furl and containing directory. fixes #1404
2623david-sarah@jacaranda.org**20110512140559
2624 Ignore-this: 784548fc5367fac5450df1c46890876d
2625]
2626[scripts/common.py: don't assume that the default alias is always 'tahoe' (it is, but the API of get_alias doesn't say so). refs #1342
2627david-sarah@jacaranda.org**20110130164923
2628 Ignore-this: a271e77ce81d84bb4c43645b891d92eb
2629]
2630[setup: don't catch all Exception from check_requirement(), but only PackagingError and ImportError
2631zooko@zooko.com**20110128142006
2632 Ignore-this: 57d4bc9298b711e4bc9dc832c75295de
2633 I noticed this because I had accidentally inserted a bug which caused AssertionError to be raised from check_requirement().
2634]
2635[M-x whitespace-cleanup
2636zooko@zooko.com**20110510193653
2637 Ignore-this: dea02f831298c0f65ad096960e7df5c7
2638]
2639[docs: fix typo in running.rst, thanks to arch_o_median
2640zooko@zooko.com**20110510193633
2641 Ignore-this: ca06de166a46abbc61140513918e79e8
2642]
2643[relnotes.txt: don't claim to work on Cygwin (which has been untested for some time). refs #1342
2644david-sarah@jacaranda.org**20110204204902
2645 Ignore-this: 85ef118a48453d93fa4cddc32d65b25b
2646]
2647[relnotes.txt: forseeable -> foreseeable. refs #1342
2648david-sarah@jacaranda.org**20110204204116
2649 Ignore-this: 746debc4d82f4031ebf75ab4031b3a9
2650]
2651[replace remaining .html docs with .rst docs
2652zooko@zooko.com**20110510191650
2653 Ignore-this: d557d960a986d4ac8216d1677d236399
2654 Remove install.html (long since deprecated).
2655 Also replace some obsolete references to install.html with references to quickstart.rst.
2656 Fix some broken internal references within docs/historical/historical_known_issues.txt.
2657 Thanks to Ravi Pinjala and Patrick McDonald.
2658 refs #1227
2659]
2660[docs: FTP-and-SFTP.rst: fix a minor error and update the information about which version of Twisted fixes #1297
2661zooko@zooko.com**20110428055232
2662 Ignore-this: b63cfb4ebdbe32fb3b5f885255db4d39
2663]
2664[munin tahoe_files plugin: fix incorrect file count
2665francois@ctrlaltdel.ch**20110428055312
2666 Ignore-this: 334ba49a0bbd93b4a7b06a25697aba34
2667 fixes #1391
2668]
2669[corrected "k must never be smaller than N" to "k must never be greater than N"
2670secorp@allmydata.org**20110425010308
2671 Ignore-this: 233129505d6c70860087f22541805eac
2672]
2673[Fix a test failure in test_package_initialization on Python 2.4.x due to exceptions being stringified differently than in later versions of Python. refs #1389
2674david-sarah@jacaranda.org**20110411190738
2675 Ignore-this: 7847d26bc117c328c679f08a7baee519
2676]
2677[tests: add test for including the ImportError message and traceback entry in the summary of errors from importing dependencies. refs #1389
2678david-sarah@jacaranda.org**20110410155844
2679 Ignore-this: fbecdbeb0d06a0f875fe8d4030aabafa
2680]
2681[allmydata/__init__.py: preserve the message and last traceback entry (file, line number, function, and source line) of ImportErrors in the package versions string. fixes #1389
2682david-sarah@jacaranda.org**20110410155705
2683 Ignore-this: 2f87b8b327906cf8bfca9440a0904900
2684]
2685[remove unused variable detected by pyflakes
2686zooko@zooko.com**20110407172231
2687 Ignore-this: 7344652d5e0720af822070d91f03daf9
2688]
2689[allmydata/__init__.py: Nicer reporting of unparseable version numbers in dependencies. fixes #1388
2690david-sarah@jacaranda.org**20110401202750
2691 Ignore-this: 9c6bd599259d2405e1caadbb3e0d8c7f
2692]
2693[update FTP-and-SFTP.rst: the necessary patch is included in Twisted-10.1
2694Brian Warner <warner@lothar.com>**20110325232511
2695 Ignore-this: d5307faa6900f143193bfbe14e0f01a
2696]
2697[control.py: remove all uses of s.get_serverid()
2698warner@lothar.com**20110227011203
2699 Ignore-this: f80a787953bd7fa3d40e828bde00e855
2700]
2701[web: remove some uses of s.get_serverid(), not all
2702warner@lothar.com**20110227011159
2703 Ignore-this: a9347d9cf6436537a47edc6efde9f8be
2704]
2705[immutable/downloader/fetcher.py: remove all get_serverid() calls
2706warner@lothar.com**20110227011156
2707 Ignore-this: fb5ef018ade1749348b546ec24f7f09a
2708]
2709[immutable/downloader/fetcher.py: fix diversity bug in server-response handling
2710warner@lothar.com**20110227011153
2711 Ignore-this: bcd62232c9159371ae8a16ff63d22c1b
2712 
2713 When blocks terminate (either COMPLETE or CORRUPT/DEAD/BADSEGNUM), the
2714 _shares_from_server dict was being popped incorrectly (using shnum as the
2715 index instead of serverid). I'm still thinking through the consequences of
2716 this bug. It was probably benign and really hard to detect. I think it would
2717 cause us to incorrectly believe that we're pulling too many shares from a
2718 server, and thus prefer a different server rather than asking for a second
2719 share from the first server. The diversity code is intended to spread out the
2720 number of shares simultaneously being requested from each server, but with
2721 this bug, it might be spreading out the total number of shares requested at
2722 all, not just simultaneously. (note that SegmentFetcher is scoped to a single
2723 segment, so the effect doesn't last very long).
2724]
2725[immutable/downloader/share.py: reduce get_serverid(), one left, update ext deps
2726warner@lothar.com**20110227011150
2727 Ignore-this: d8d56dd8e7b280792b40105e13664554
2728 
2729 test_download.py: create+check MyShare instances better, make sure they share
2730 Server objects, now that finder.py cares
2731]
2732[immutable/downloader/finder.py: reduce use of get_serverid(), one left
2733warner@lothar.com**20110227011146
2734 Ignore-this: 5785be173b491ae8a78faf5142892020
2735]
2736[immutable/offloaded.py: reduce use of get_serverid() a bit more
2737warner@lothar.com**20110227011142
2738 Ignore-this: b48acc1b2ae1b311da7f3ba4ffba38f
2739]
2740[immutable/upload.py: reduce use of get_serverid()
2741warner@lothar.com**20110227011138
2742 Ignore-this: ffdd7ff32bca890782119a6e9f1495f6
2743]
2744[immutable/checker.py: remove some uses of s.get_serverid(), not all
2745warner@lothar.com**20110227011134
2746 Ignore-this: e480a37efa9e94e8016d826c492f626e
2747]
2748[add remaining get_* methods to storage_client.Server, NoNetworkServer, and
2749warner@lothar.com**20110227011132
2750 Ignore-this: 6078279ddf42b179996a4b53bee8c421
2751 MockIServer stubs
2752]
2753[upload.py: rearrange _make_trackers a bit, no behavior changes
2754warner@lothar.com**20110227011128
2755 Ignore-this: 296d4819e2af452b107177aef6ebb40f
2756]
2757[happinessutil.py: finally rename merge_peers to merge_servers
2758warner@lothar.com**20110227011124
2759 Ignore-this: c8cd381fea1dd888899cb71e4f86de6e
2760]
2761[test_upload.py: factor out FakeServerTracker
2762warner@lothar.com**20110227011120
2763 Ignore-this: 6c182cba90e908221099472cc159325b
2764]
2765[test_upload.py: server-vs-tracker cleanup
2766warner@lothar.com**20110227011115
2767 Ignore-this: 2915133be1a3ba456e8603885437e03
2768]
2769[happinessutil.py: server-vs-tracker cleanup
2770warner@lothar.com**20110227011111
2771 Ignore-this: b856c84033562d7d718cae7cb01085a9
2772]
2773[upload.py: more tracker-vs-server cleanup
2774warner@lothar.com**20110227011107
2775 Ignore-this: bb75ed2afef55e47c085b35def2de315
2776]
2777[upload.py: fix var names to avoid confusion between 'trackers' and 'servers'
2778warner@lothar.com**20110227011103
2779 Ignore-this: 5d5e3415b7d2732d92f42413c25d205d
2780]
2781[refactor: s/peer/server/ in immutable/upload, happinessutil.py, test_upload
2782warner@lothar.com**20110227011100
2783 Ignore-this: 7ea858755cbe5896ac212a925840fe68
2784 
2785 No behavioral changes, just updating variable/method names and log messages.
2786 The effects outside these three files should be minimal: some exception
2787 messages changed (to say "server" instead of "peer"), and some internal class
2788 names were changed. A few things still use "peer" to minimize external
2789 changes, like UploadResults.timings["peer_selection"] and
2790 happinessutil.merge_peers, which can be changed later.
2791]
2792[storage_client.py: clean up test_add_server/test_add_descriptor, remove .test_servers
2793warner@lothar.com**20110227011056
2794 Ignore-this: efad933e78179d3d5fdcd6d1ef2b19cc
2795]
2796[test_client.py, upload.py:: remove KiB/MiB/etc constants, and other dead code
2797warner@lothar.com**20110227011051
2798 Ignore-this: dc83c5794c2afc4f81e592f689c0dc2d
2799]
2800[test: increase timeout on a network test because Francois's ARM machine hit that timeout
2801zooko@zooko.com**20110317165909
2802 Ignore-this: 380c345cdcbd196268ca5b65664ac85b
2803 I'm skeptical that the test was proceeding correctly but ran out of time. It seems more likely that it had gotten hung. But if we raise the timeout to an even more extravagant number then we can be even more certain that the test was never going to finish.
2804]
2805[docs/configuration.rst: add a "Frontend Configuration" section
2806Brian Warner <warner@lothar.com>**20110222014323
2807 Ignore-this: 657018aa501fe4f0efef9851628444ca
2808 
2809 this points to docs/frontends/*.rst, which were previously underlinked
2810]
2811[web/filenode.py: avoid calling req.finish() on closed HTTP connections. Closes #1366
2812"Brian Warner <warner@lothar.com>"**20110221061544
2813 Ignore-this: 799d4de19933f2309b3c0c19a63bb888
2814]
2815[Add unit tests for cross_check_pkg_resources_versus_import, and a regression test for ref #1355. This requires a little refactoring to make it testable.
2816david-sarah@jacaranda.org**20110221015817
2817 Ignore-this: 51d181698f8c20d3aca58b057e9c475a
2818]
2819[allmydata/__init__.py: .name was used in place of the correct .__name__ when printing an exception. Also, robustify string formatting by using %r instead of %s in some places. fixes #1355.
2820david-sarah@jacaranda.org**20110221020125
2821 Ignore-this: b0744ed58f161bf188e037bad077fc48
2822]
2823[Refactor StorageFarmBroker handling of servers
2824Brian Warner <warner@lothar.com>**20110221015804
2825 Ignore-this: 842144ed92f5717699b8f580eab32a51
2826 
2827 Pass around IServer instance instead of (peerid, rref) tuple. Replace
2828 "descriptor" with "server". Other replacements:
2829 
2830  get_all_servers -> get_connected_servers/get_known_servers
2831  get_servers_for_index -> get_servers_for_psi (now returns IServers)
2832 
2833 This change still needs to be pushed further down: lots of code is now
2834 getting the IServer and then distributing (peerid, rref) internally.
2835 Instead, it ought to distribute the IServer internally and delay
2836 extracting a serverid or rref until the last moment.
2837 
2838 no_network.py was updated to retain parallelism.
2839]
2840[TAG allmydata-tahoe-1.8.2
2841warner@lothar.com**20110131020101]
2842Patch bundle hash:
2843140e91e4fd8eb0f8f5cca7c5128219e97617fbf4