Ticket #778: tests.3.txt

File tests.3.txt, 41.4 KB (added by kevan, at 2009-10-30T09:47:06Z)
Line 
1Sun Oct 11 20:40:05 PDT 2009  Kevan Carstensen <kevan@isnotajoke.com>
2  * Alter various unit tests to work with the new happy behavior
3
4Sat Oct 17 18:30:13 PDT 2009  Kevan Carstensen <kevan@isnotajoke.com>
5  * Alter NoNetworkGrid to allow the creation of readonly servers for testing purposes.
6
7Fri Oct 30 02:19:08 PDT 2009  "Kevan Carstensen" <kevan@isnotajoke.com>
8  * Refactor some behavior into a mixin, and add tests for the behavior described in #778
9
10New patches:
11
12[Alter various unit tests to work with the new happy behavior
13Kevan Carstensen <kevan@isnotajoke.com>**20091012034005
14 Ignore-this: 8b6823526e72390cf69e0f1cf1f97de4
15] {
16hunk ./src/allmydata/test/common.py 918
17             # We need multiple segments to test crypttext hash trees that are
18             # non-trivial (i.e. they have more than just one hash in them).
19             cl0.DEFAULT_ENCODING_PARAMETERS['max_segment_size'] = 12
20+            # Tests that need to test servers of happiness using this should
21+            # set their own value for happy -- the default (7) breaks stuff.
22+            cl0.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
23             d2 = cl0.upload(immutable.upload.Data(TEST_DATA, convergence=""))
24             def _after_upload(u):
25                 filecap = u.uri
26hunk ./src/allmydata/test/test_system.py 111
27         d = self.set_up_nodes()
28         def _check_connections(res):
29             for c in self.clients:
30+                c.DEFAULT_ENCODING_PARAMETERS['happy'] = 5
31                 all_peerids = c.get_storage_broker().get_all_serverids()
32                 self.failUnlessEqual(len(all_peerids), self.numclients)
33                 sb = c.storage_broker
34hunk ./src/allmydata/test/test_system.py 250
35                                                       add_to_sparent=True))
36         def _added(extra_node):
37             self.extra_node = extra_node
38+            self.extra_node.DEFAULT_ENCODING_PARAMETERS['happy'] = 5
39         d.addCallback(_added)
40 
41         HELPER_DATA = "Data that needs help to upload" * 1000
42hunk ./src/allmydata/test/test_system.py 749
43         self.basedir = "system/SystemTest/test_vdrive"
44         self.data = LARGE_DATA
45         d = self.set_up_nodes(use_stats_gatherer=True)
46+        def _new_happy_semantics(ign):
47+            for c in self.clients:
48+                c.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
49+        d.addCallback(_new_happy_semantics)
50         d.addCallback(self._test_introweb)
51         d.addCallback(self.log, "starting publish")
52         d.addCallback(self._do_publish1)
53hunk ./src/allmydata/test/test_system.py 1167
54         d.addCallback(self.failUnlessEqual, "new.txt contents")
55         # and again with something large enough to use multiple segments,
56         # and hopefully trigger pauseProducing too
57+        def _new_happy_semantics(ign):
58+            for c in self.clients:
59+                # these get reset somewhere? Whatever.
60+                c.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
61+        d.addCallback(_new_happy_semantics)
62         d.addCallback(lambda res: self.PUT(public + "/subdir3/big.txt",
63                                            "big" * 500000)) # 1.5MB
64         d.addCallback(lambda res: self.GET(public + "/subdir3/big.txt"))
65hunk ./src/allmydata/test/test_upload.py 169
66 
67 class FakeClient:
68     DEFAULT_ENCODING_PARAMETERS = {"k":25,
69-                                   "happy": 75,
70+                                   "happy": 25,
71                                    "n": 100,
72                                    "max_segment_size": 1*MiB,
73                                    }
74hunk ./src/allmydata/test/test_upload.py 315
75         data = self.get_data(SIZE_LARGE)
76         segsize = int(SIZE_LARGE / 2.5)
77         # we want 3 segments, since that's not a power of two
78-        self.set_encoding_parameters(25, 75, 100, segsize)
79+        self.set_encoding_parameters(25, 25, 100, segsize)
80         d = upload_data(self.u, data)
81         d.addCallback(extract_uri)
82         d.addCallback(self._check_large, SIZE_LARGE)
83hunk ./src/allmydata/test/test_upload.py 394
84     def test_first_error(self):
85         mode = dict([(0,"good")] + [(i,"first-fail") for i in range(1,10)])
86         self.make_node(mode)
87+        self.set_encoding_parameters(k=25, happy=1, n=50)
88         d = upload_data(self.u, DATA)
89         d.addCallback(extract_uri)
90         d.addCallback(self._check_large, SIZE_LARGE)
91hunk ./src/allmydata/test/test_upload.py 512
92 
93         self.make_client()
94         data = self.get_data(SIZE_LARGE)
95-        self.set_encoding_parameters(50, 75, 100)
96+        # if there are 50 peers, then happy needs to be <= 50
97+        self.set_encoding_parameters(50, 50, 100)
98         d = upload_data(self.u, data)
99         d.addCallback(extract_uri)
100         d.addCallback(self._check_large, SIZE_LARGE)
101hunk ./src/allmydata/test/test_upload.py 559
102 
103         self.make_client()
104         data = self.get_data(SIZE_LARGE)
105-        self.set_encoding_parameters(100, 150, 200)
106+        # if there are 50 peers, then happy should be no more than 50 if
107+        # we want this to work.
108+        self.set_encoding_parameters(100, 50, 200)
109         d = upload_data(self.u, data)
110         d.addCallback(extract_uri)
111         d.addCallback(self._check_large, SIZE_LARGE)
112hunk ./src/allmydata/test/test_upload.py 579
113 
114         self.make_client(3)
115         data = self.get_data(SIZE_LARGE)
116-        self.set_encoding_parameters(3, 5, 10)
117+        self.set_encoding_parameters(3, 3, 10)
118         d = upload_data(self.u, data)
119         d.addCallback(extract_uri)
120         d.addCallback(self._check_large, SIZE_LARGE)
121hunk ./src/allmydata/test/test_web.py 3419
122         self.basedir = "web/Grid/exceptions"
123         self.set_up_grid(num_clients=1, num_servers=2)
124         c0 = self.g.clients[0]
125+        c0.DEFAULT_ENCODING_PARAMETERS['happy'] = 2
126         self.fileurls = {}
127         DATA = "data" * 100
128         d = c0.create_dirnode()
129}
130[Alter NoNetworkGrid to allow the creation of readonly servers for testing purposes.
131Kevan Carstensen <kevan@isnotajoke.com>**20091018013013
132 Ignore-this: e12cd7c4ddeb65305c5a7e08df57c754
133] {
134hunk ./src/allmydata/test/no_network.py 203
135             c.setServiceParent(self)
136             self.clients.append(c)
137 
138-    def make_server(self, i):
139+    def make_server(self, i, readonly=False):
140         serverid = hashutil.tagged_hash("serverid", str(i))[:20]
141         serverdir = os.path.join(self.basedir, "servers",
142                                  idlib.shortnodeid_b2a(serverid))
143hunk ./src/allmydata/test/no_network.py 208
144         fileutil.make_dirs(serverdir)
145-        ss = StorageServer(serverdir, serverid, stats_provider=SimpleStats())
146+        ss = StorageServer(serverdir, serverid, stats_provider=SimpleStats(),
147+                           readonly_storage=readonly)
148         return ss
149 
150     def add_server(self, i, ss):
151}
152[Refactor some behavior into a mixin, and add tests for the behavior described in #778
153"Kevan Carstensen" <kevan@isnotajoke.com>**20091030091908
154 Ignore-this: a6f9797057ca135579b249af3b2b66ac
155] {
156hunk ./src/allmydata/test/test_upload.py 2
157 
158-import os
159+import os, shutil
160 from cStringIO import StringIO
161 from twisted.trial import unittest
162 from twisted.python.failure import Failure
163hunk ./src/allmydata/test/test_upload.py 12
164 
165 import allmydata # for __full_version__
166 from allmydata import uri, monitor, client
167-from allmydata.immutable import upload
168+from allmydata.immutable import upload, encode
169 from allmydata.interfaces import IFileURI, FileTooLargeError, NoSharesError, \
170      NotEnoughSharesError
171 from allmydata.util.assertutil import precondition
172hunk ./src/allmydata/test/test_upload.py 20
173 from no_network import GridTestMixin
174 from common_util import ShouldFailMixin
175 from allmydata.storage_client import StorageFarmBroker
176+from allmydata.storage.server import storage_index_to_dir
177 
178 MiB = 1024*1024
179 
180hunk ./src/allmydata/test/test_upload.py 91
181 class ServerError(Exception):
182     pass
183 
184+class SetDEPMixin:
185+    def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB):
186+        p = {"k": k,
187+             "happy": happy,
188+             "n": n,
189+             "max_segment_size": max_segsize,
190+             }
191+        self.node.DEFAULT_ENCODING_PARAMETERS = p
192+
193 class FakeStorageServer:
194     def __init__(self, mode):
195         self.mode = mode
196hunk ./src/allmydata/test/test_upload.py 247
197     u = upload.FileHandle(fh, convergence=None)
198     return uploader.upload(u)
199 
200-class GoodServer(unittest.TestCase, ShouldFailMixin):
201+class GoodServer(unittest.TestCase, ShouldFailMixin, SetDEPMixin):
202     def setUp(self):
203         self.node = FakeClient(mode="good")
204         self.u = upload.Uploader()
205hunk ./src/allmydata/test/test_upload.py 254
206         self.u.running = True
207         self.u.parent = self.node
208 
209-    def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB):
210-        p = {"k": k,
211-             "happy": happy,
212-             "n": n,
213-             "max_segment_size": max_segsize,
214-             }
215-        self.node.DEFAULT_ENCODING_PARAMETERS = p
216-
217     def _check_small(self, newuri, size):
218         u = IFileURI(newuri)
219         self.failUnless(isinstance(u, uri.LiteralFileURI))
220hunk ./src/allmydata/test/test_upload.py 377
221         d.addCallback(self._check_large, SIZE_LARGE)
222         return d
223 
224-class ServerErrors(unittest.TestCase, ShouldFailMixin):
225+class ServerErrors(unittest.TestCase, ShouldFailMixin, SetDEPMixin):
226     def make_node(self, mode, num_servers=10):
227         self.node = FakeClient(mode, num_servers)
228         self.u = upload.Uploader()
229hunk ./src/allmydata/test/test_upload.py 681
230         d.addCallback(_done)
231         return d
232 
233-class EncodingParameters(GridTestMixin, unittest.TestCase):
234+class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin,
235+    ShouldFailMixin):
236+    def _do_upload_with_broken_servers(self, servers_to_break):
237+        """
238+        I act like a normal upload, but before I send the results of
239+        Tahoe2PeerSelector to the Encoder, I break the first servers_to_break
240+        PeerTrackers in the used_peers part of the return result.
241+        """
242+        assert self.g, "I tried to find a grid at self.g, but failed"
243+        broker = self.g.clients[0].storage_broker
244+        sh     = self.g.clients[0]._secret_holder
245+        data = upload.Data("data" * 10000, convergence="")
246+        data.encoding_param_k = 3
247+        data.encoding_param_happy = 4
248+        data.encoding_param_n = 10
249+        uploadable = upload.EncryptAnUploadable(data)
250+        encoder = encode.Encoder()
251+        encoder.set_encrypted_uploadable(uploadable)
252+        status = upload.UploadStatus()
253+        selector = upload.Tahoe2PeerSelector("dglev", "test", status)
254+        storage_index = encoder.get_param("storage_index")
255+        share_size = encoder.get_param("share_size")
256+        block_size = encoder.get_param("block_size")
257+        num_segments = encoder.get_param("num_segments")
258+        d = selector.get_shareholders(broker, sh, storage_index,
259+                                      share_size, block_size, num_segments,
260+                                      10, 4)
261+        def _have_shareholders((used_peers, already_peers)):
262+            assert servers_to_break <= len(used_peers)
263+            for index in xrange(servers_to_break):
264+                server = list(used_peers)[index]
265+                for share in server.buckets.keys():
266+                    server.buckets[share].abort()
267+            buckets = {}
268+            for peer in used_peers:
269+                buckets.update(peer.buckets)
270+            encoder.set_shareholders(buckets)
271+            d = encoder.start()
272+            return d
273+        d.addCallback(_have_shareholders)
274+        return d
275+
276+    def _add_server_with_share(self, server_number, share_number=None,
277+                               readonly=False):
278+        assert self.g, "I tried to find a grid at self.g, but failed"
279+        assert self.shares, "I tried to find shares at self.shares, but failed"
280+        ss = self.g.make_server(server_number, readonly)
281+        self.g.add_server(server_number, ss)
282+        if share_number:
283+            # Copy share i from the directory associated with the first
284+            # storage server to the directory associated with this one.
285+            old_share_location = self.shares[share_number][2]
286+            new_share_location = os.path.join(ss.storedir, "shares")
287+            si = uri.from_string(self.uri).get_storage_index()
288+            new_share_location = os.path.join(new_share_location,
289+                                              storage_index_to_dir(si))
290+            if not os.path.exists(new_share_location):
291+                os.makedirs(new_share_location)
292+            new_share_location = os.path.join(new_share_location,
293+                                              str(share_number))
294+            shutil.copy(old_share_location, new_share_location)
295+            shares = self.find_shares(self.uri)
296+            # Make sure that the storage server has the share.
297+            self.failUnless((share_number, ss.my_nodeid, new_share_location)
298+                            in shares)
299+
300+    def _setup_and_upload(self):
301+        """
302+        I set up a NoNetworkGrid with a single server and client,
303+        upload a file to it, store its uri in self.uri, and store its
304+        sharedata in self.shares.
305+        """
306+        self.set_up_grid(num_clients=1, num_servers=1)
307+        client = self.g.clients[0]
308+        client.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
309+        data = upload.Data("data" * 10000, convergence="")
310+        self.data = data
311+        d = client.upload(data)
312+        def _store_uri(ur):
313+            self.uri = ur.uri
314+        d.addCallback(_store_uri)
315+        d.addCallback(lambda ign:
316+            self.find_shares(self.uri))
317+        def _store_shares(shares):
318+            self.shares = shares
319+        d.addCallback(_store_shares)
320+        return d
321+
322     def test_configure_parameters(self):
323         self.basedir = self.mktemp()
324         hooks = {0: self._set_up_nodes_extra_config}
325hunk ./src/allmydata/test/test_upload.py 788
326         d.addCallback(_check)
327         return d
328 
329+    def _setUp(self, ns):
330+        # Used by test_happy_semantics and test_prexisting_share_behavior
331+        # to set up the grid.
332+        self.node = FakeClient(mode="good", num_servers=ns)
333+        self.u = upload.Uploader()
334+        self.u.running = True
335+        self.u.parent = self.node
336+
337+    def test_happy_semantics(self):
338+        self._setUp(2)
339+        DATA = upload.Data("kittens" * 10000, convergence="")
340+        # These parameters are unsatisfiable with the client that we've made
341+        # -- we'll use them to test that the semnatics work correctly.
342+        self.set_encoding_parameters(k=3, happy=5, n=10)
343+        d = self.shouldFail(NotEnoughSharesError, "test_happy_semantics",
344+                            "shares could only be placed on 2 servers "
345+                            "(5 were requested)",
346+                            self.u.upload, DATA)
347+        # Let's reset the client to have 10 servers
348+        d.addCallback(lambda ign:
349+            self._setUp(10))
350+        # These parameters are satisfiable with the client we've made.
351+        d.addCallback(lambda ign:
352+            self.set_encoding_parameters(k=3, happy=5, n=10))
353+        # this should work
354+        d.addCallback(lambda ign:
355+            self.u.upload(DATA))
356+        # Let's reset the client to have 7 servers
357+        # (this is less than n, but more than h)
358+        d.addCallback(lambda ign:
359+            self._setUp(7))
360+        # These encoding parameters should still be satisfiable with our
361+        # client setup
362+        d.addCallback(lambda ign:
363+            self.set_encoding_parameters(k=3, happy=5, n=10))
364+        # This, then, should work.
365+        d.addCallback(lambda ign:
366+            self.u.upload(DATA))
367+        return d
368+
369+    def test_problem_layouts(self):
370+        self.basedir = self.mktemp()
371+        # This scenario is at
372+        # http://allmydata.org/trac/tahoe/ticket/778#comment:52
373+        #
374+        # The scenario in comment:52 proposes that we have a layout
375+        # like:
376+        # server 1: share 1
377+        # server 2: share 1
378+        # server 3: share 1
379+        # server 4: shares 2 - 10
380+        # To get access to the shares, we will first upload to one
381+        # server, which will then have shares 1 - 10. We'll then
382+        # add three new servers, configure them to not accept any new
383+        # shares, then write share 1 directly into the serverdir of each.
384+        # Then each of servers 1 - 3 will report that they have share 1,
385+        # and will not accept any new share, while server 4 will report that
386+        # it has shares 2 - 10 and will accept new shares.
387+        # We'll then set 'happy' = 4, and see that an upload fails
388+        # (as it should)
389+        d = self._setup_and_upload()
390+        d.addCallback(lambda ign:
391+            self._add_server_with_share(1, 0, True))
392+        d.addCallback(lambda ign:
393+            self._add_server_with_share(2, 0, True))
394+        d.addCallback(lambda ign:
395+            self._add_server_with_share(3, 0, True))
396+        # Remove the first share from server 0.
397+        def _remove_share_0():
398+            share_location = self.shares[0][2]
399+            os.remove(share_location)
400+        d.addCallback(lambda ign:
401+            _remove_share_0())
402+        # Set happy = 4 in the client.
403+        def _prepare():
404+            client = self.g.clients[0]
405+            client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
406+            return client
407+        d.addCallback(lambda ign:
408+            _prepare())
409+        # Uploading data should fail
410+        d.addCallback(lambda client:
411+            self.shouldFail(NotEnoughSharesError, "test_happy_semantics",
412+                            "shares could only be placed on 1 servers "
413+                            "(4 were requested)",
414+                            client.upload, upload.Data("data" * 10000,
415+                                                       convergence="")))
416+
417+
418+        # This scenario is at
419+        # http://allmydata.org/trac/tahoe/ticket/778#comment:53
420+        #
421+        # Set up the grid to have one server
422+        def _change_basedir(ign):
423+            self.basedir = self.mktemp()
424+        d.addCallback(_change_basedir)
425+        d.addCallback(lambda ign:
426+            self._setup_and_upload())
427+        # We want to have a layout like this:
428+        # server 1: share 1
429+        # server 2: share 2
430+        # server 3: share 3
431+        # server 4: shares 1 - 10
432+        # (this is an expansion of Zooko's example because it is easier
433+        #  to code, but it will fail in the same way)
434+        # To start, we'll create a server with shares 1-10 of the data
435+        # we're about to upload.
436+        # Next, we'll add three new servers to our NoNetworkGrid. We'll add
437+        # one share from our initial upload to each of these.
438+        # The counterintuitive ordering of the share numbers is to deal with
439+        # the permuting of these servers -- distributing the shares this
440+        # way ensures that the Tahoe2PeerSelector sees them in the order
441+        # described above.
442+        d.addCallback(lambda ign:
443+            self._add_server_with_share(server_number=1, share_number=2))
444+        d.addCallback(lambda ign:
445+            self._add_server_with_share(server_number=2, share_number=0))
446+        d.addCallback(lambda ign:
447+            self._add_server_with_share(server_number=3, share_number=1))
448+        # So, we now have the following layout:
449+        # server 0: shares 1 - 10
450+        # server 1: share 0
451+        # server 2: share 1
452+        # server 3: share 2
453+        # We want to change the 'happy' parameter in the client to 4.
454+        # We then want to feed the upload process a list of peers that
455+        # server 0 is at the front of, so we trigger Zooko's scenario.
456+        # Ideally, a reupload of our original data should work.
457+        def _reset_encoding_parameters(ign):
458+            client = self.g.clients[0]
459+            client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
460+            return client
461+        d.addCallback(_reset_encoding_parameters)
462+        # We need this to get around the fact that the old Data
463+        # instance already has a happy parameter set.
464+        d.addCallback(lambda client:
465+            client.upload(upload.Data("data" * 10000, convergence="")))
466+        return d
467+
468+
469+    def test_dropped_servers_in_encoder(self):
470+        def _set_basedir(ign=None):
471+            self.basedir = self.mktemp()
472+        _set_basedir()
473+        d = self._setup_and_upload();
474+        # Add 5 servers, with one share each from the original
475+        # Add a readonly server
476+        def _do_server_setup(ign):
477+            self._add_server_with_share(1, 1, True)
478+            self._add_server_with_share(2)
479+            self._add_server_with_share(3)
480+            self._add_server_with_share(4)
481+            self._add_server_with_share(5)
482+        d.addCallback(_do_server_setup)
483+        # remove the original server
484+        # (necessary to ensure that the Tahoe2PeerSelector will distribute
485+        #  all the shares)
486+        def _remove_server(ign):
487+            server = self.g.servers_by_number[0]
488+            self.g.remove_server(server.my_nodeid)
489+        d.addCallback(_remove_server)
490+        # This should succeed.
491+        d.addCallback(lambda ign:
492+            self._do_upload_with_broken_servers(1))
493+        # Now, do the same thing over again, but drop 2 servers instead
494+        # of 1. This should fail.
495+        d.addCallback(_set_basedir)
496+        d.addCallback(lambda ign:
497+            self._setup_and_upload())
498+        d.addCallback(_do_server_setup)
499+        d.addCallback(_remove_server)
500+        d.addCallback(lambda ign:
501+            self.shouldFail(NotEnoughSharesError,
502+                            "test_dropped_server_in_encoder", "",
503+                            self._do_upload_with_broken_servers, 2))
504+        return d
505+
506+
507+    def test_servers_with_unique_shares(self):
508+        # servers_with_unique_shares expects a dict of
509+        # shnum => peerid as a preexisting shares argument.
510+        test1 = {
511+                 1 : "server1",
512+                 2 : "server2",
513+                 3 : "server3",
514+                 4 : "server4"
515+                }
516+        unique_servers = upload.servers_with_unique_shares(test1)
517+        self.failUnlessEqual(4, len(unique_servers))
518+        for server in ["server1", "server2", "server3", "server4"]:
519+            self.failUnlessIn(server, unique_servers)
520+        test1[4] = "server1"
521+        # Now there should only be 3 unique servers.
522+        unique_servers = upload.servers_with_unique_shares(test1)
523+        self.failUnlessEqual(3, len(unique_servers))
524+        for server in ["server1", "server2", "server3"]:
525+            self.failUnlessIn(server, unique_servers)
526+        # servers_with_unique_shares expects a set of PeerTracker
527+        # instances as a used_peers argument, but only uses the peerid
528+        # instance variable to assess uniqueness. So we feed it some fake
529+        # PeerTrackers whose only important characteristic is that they
530+        # have peerid set to something.
531+        class FakePeerTracker:
532+            pass
533+        trackers = []
534+        for server in ["server5", "server6", "server7", "server8"]:
535+            t = FakePeerTracker()
536+            t.peerid = server
537+            trackers.append(t)
538+        # Recall that there are 3 unique servers in test1. Since none of
539+        # those overlap with the ones in trackers, we should get 7 back
540+        unique_servers = upload.servers_with_unique_shares(test1, set(trackers))
541+        self.failUnlessEqual(7, len(unique_servers))
542+        expected_servers = ["server" + str(i) for i in xrange(1, 9)]
543+        expected_servers.remove("server4")
544+        for server in expected_servers:
545+            self.failUnlessIn(server, unique_servers)
546+        # Now add an overlapping server to trackers.
547+        t = FakePeerTracker()
548+        t.peerid = "server1"
549+        trackers.append(t)
550+        unique_servers = upload.servers_with_unique_shares(test1, set(trackers))
551+        self.failUnlessEqual(7, len(unique_servers))
552+        for server in expected_servers:
553+            self.failUnlessIn(server, unique_servers)
554+
555+
556     def _set_up_nodes_extra_config(self, clientdir):
557         cfgfn = os.path.join(clientdir, "tahoe.cfg")
558         oldcfg = open(cfgfn, "r").read()
559}
560
561Context:
562
563[wui: s/TahoeLAFS/Tahoe-LAFS/
564zooko@zooko.com**20091029035050
565 Ignore-this: 901e64cd862e492ed3132bd298583c26
566]
567[tests: bump up the timeout on test_repairer to see if 120 seconds was too short for François's ARM box to do the test even when it was doing it right.
568zooko@zooko.com**20091027224800
569 Ignore-this: 95e93dc2e018b9948253c2045d506f56
570]
571[dirnode.pack_children(): add deep_immutable= argument
572Brian Warner <warner@lothar.com>**20091026162809
573 Ignore-this: d5a2371e47662c4bc6eff273e8181b00
574 
575 This will be used by DIR2:CHK to enforce the deep-immutability requirement.
576]
577[webapi: use t=mkdir-with-children instead of a children= arg to t=mkdir .
578Brian Warner <warner@lothar.com>**20091026011321
579 Ignore-this: 769cab30b6ab50db95000b6c5a524916
580 
581 This is safer: in the earlier API, an old webapi server would silently ignore
582 the initial children, and clients trying to set them would have to fetch the
583 newly-created directory to discover the incompatibility. In the new API,
584 clients using t=mkdir-with-children against an old webapi server will get a
585 clear error.
586]
587[nodemaker.create_new_mutable_directory: pack_children() in initial_contents=
588Brian Warner <warner@lothar.com>**20091020005118
589 Ignore-this: bd43c4eefe06fd32b7492bcb0a55d07e
590 instead of creating an empty file and then adding the children later.
591 
592 This should speed up mkdir(initial_children) considerably, removing two
593 roundtrips and an entire read-modify-write cycle, probably bringing it down
594 to a single roundtrip. A quick test (against the volunteergrid) suggests a
595 30% speedup.
596 
597 test_dirnode: add new tests to enforce the restrictions that interfaces.py
598 claims for create_new_mutable_directory(): no UnknownNodes, metadata dicts
599]
600[test_dirnode.py: add tests of initial_children= args to client.create_dirnode
601Brian Warner <warner@lothar.com>**20091017194159
602 Ignore-this: 2e2da28323a4d5d815466387914abc1b
603 and nodemaker.create_new_mutable_directory
604]
605[update many dirnode interfaces to accept dict-of-nodes instead of dict-of-caps
606Brian Warner <warner@lothar.com>**20091017192829
607 Ignore-this: b35472285143862a856bf4b361d692f0
608 
609 interfaces.py: define INodeMaker, document argument values, change
610                create_new_mutable_directory() to take dict-of-nodes. Change
611                dirnode.set_nodes() and dirnode.create_subdirectory() too.
612 nodemaker.py: use INodeMaker, update create_new_mutable_directory()
613 client.py: have create_dirnode() delegate initial_children= to nodemaker
614 dirnode.py (Adder): take dict-of-nodes instead of list-of-nodes, which
615                     updates set_nodes() and create_subdirectory()
616 web/common.py (convert_initial_children_json): create dict-of-nodes
617 web/directory.py: same
618 web/unlinked.py: same
619 test_dirnode.py: update tests to match
620]
621[dirnode.py: move pack_children() out to a function, for eventual use by others
622Brian Warner <warner@lothar.com>**20091017180707
623 Ignore-this: 6a823fb61f2c180fd38d6742d3196a7a
624]
625[move dirnode.CachingDict to dictutil.AuxValueDict, generalize method names,
626Brian Warner <warner@lothar.com>**20091017180005
627 Ignore-this: b086933cf429df0fcea16a308d2640dd
628 improve tests. Let dirnode _pack_children accept either dict or AuxValueDict.
629]
630[test/common.py: update FakeMutableFileNode to new contents= callable scheme
631Brian Warner <warner@lothar.com>**20091013052154
632 Ignore-this: 62f00a76454a2190d1c8641c5993632f
633]
634[The initial_children= argument to nodemaker.create_new_mutable_directory is
635Brian Warner <warner@lothar.com>**20091013031922
636 Ignore-this: 72e45317c21f9eb9ec3bd79bd4311f48
637 now enabled.
638]
639[client.create_mutable_file(contents=) now accepts a callable, which is
640Brian Warner <warner@lothar.com>**20091013031232
641 Ignore-this: 3c89d2f50c1e652b83f20bd3f4f27c4b
642 invoked with the new MutableFileNode and is supposed to return the initial
643 contents. This can be used by e.g. a new dirnode which needs the filenode's
644 writekey to encrypt its initial children.
645 
646 create_mutable_file() still accepts a bytestring too, or None for an empty
647 file.
648]
649[webapi: t=mkdir now accepts initial children, using the same JSON that t=json
650Brian Warner <warner@lothar.com>**20091013023444
651 Ignore-this: 574a46ed46af4251abf8c9580fd31ef7
652 emits.
653 
654 client.create_dirnode(initial_children=) now works.
655]
656[replace dirnode.create_empty_directory() with create_subdirectory(), which
657Brian Warner <warner@lothar.com>**20091013021520
658 Ignore-this: 6b57cb51bcfcc6058d0df569fdc8a9cf
659 takes an initial_children= argument
660]
661[dirnode.set_children: change return value: fire with self instead of None
662Brian Warner <warner@lothar.com>**20091013015026
663 Ignore-this: f1d14e67e084e4b2a4e25fa849b0e753
664]
665[dirnode.set_nodes: change return value: fire with self instead of None
666Brian Warner <warner@lothar.com>**20091013014546
667 Ignore-this: b75b3829fb53f7399693f1c1a39aacae
668]
669[dirnode.set_children: take a dict, not a list
670Brian Warner <warner@lothar.com>**20091013002440
671 Ignore-this: 540ce72ce2727ee053afaae1ff124e21
672]
673[dirnode.set_uri/set_children: change signature to take writecap+readcap
674Brian Warner <warner@lothar.com>**20091012235126
675 Ignore-this: 5df617b2d379a51c79148a857e6026b1
676 instead of a single cap. The webapi t=set_children call benefits too.
677]
678[replace Client.create_empty_dirnode() with create_dirnode(), in anticipation
679Brian Warner <warner@lothar.com>**20091012224506
680 Ignore-this: cbdaa4266ecb3c6496ffceab4f95709d
681 of adding initial_children= argument.
682 
683 Includes stubbed-out initial_children= support.
684]
685[test_web.py: use a less-fake client, making test harness smaller
686Brian Warner <warner@lothar.com>**20091012222808
687 Ignore-this: 29e95147f8c94282885c65b411d100bb
688]
689[webapi.txt: document t=set_children, other small edits
690Brian Warner <warner@lothar.com>**20091009200446
691 Ignore-this: 4d7e76b04a7b8eaa0a981879f778ea5d
692]
693[Verifier: check the full cryptext-hash tree on each share. Removed .todos
694Brian Warner <warner@lothar.com>**20091005221849
695 Ignore-this: 6fb039c5584812017d91725e687323a5
696 from the last few test_repairer tests that were waiting on this.
697]
698[Verifier: check the full block-hash-tree on each share
699Brian Warner <warner@lothar.com>**20091005214844
700 Ignore-this: 3f7ccf6d253f32340f1bf1da27803eee
701 
702 Removed the .todo from two test_repairer tests that check this. The only
703 remaining .todos are on the three crypttext-hash-tree tests.
704]
705[Verifier: check the full share-hash chain on each share
706Brian Warner <warner@lothar.com>**20091005213443
707 Ignore-this: 3d30111904158bec06a4eac22fd39d17
708 
709 Removed the .todo from two test_repairer tests that check this.
710]
711[test_repairer: rename Verifier test cases to be more precise and less verbose
712Brian Warner <warner@lothar.com>**20091005201115
713 Ignore-this: 64be7094e33338c7c2aea9387e138771
714]
715[immutable/checker.py: rearrange code a little bit, make it easier to follow
716Brian Warner <warner@lothar.com>**20091005200252
717 Ignore-this: 91cc303fab66faf717433a709f785fb5
718]
719[test/common.py: wrap docstrings to 80cols so I can read them more easily
720Brian Warner <warner@lothar.com>**20091005200143
721 Ignore-this: b180a3a0235cbe309c87bd5e873cbbb3
722]
723[immutable/download.py: wrap to 80cols, no functional changes
724Brian Warner <warner@lothar.com>**20091005192542
725 Ignore-this: 6b05fe3dc6d78832323e708b9e6a1fe
726]
727[CHK-hashes.svg: cross out plaintext hashes, since we don't include
728Brian Warner <warner@lothar.com>**20091005010803
729 Ignore-this: bea2e953b65ec7359363aa20de8cb603
730 them (until we finish #453)
731]
732[docs: a few licensing clarifications requested by Ubuntu
733zooko@zooko.com**20090927033226
734 Ignore-this: 749fc8c9aeb6dc643669854a3e81baa7
735]
736[setup: remove binary WinFUSE modules
737zooko@zooko.com**20090924211436
738 Ignore-this: 8aefc571d2ae22b9405fc650f2c2062
739 I would prefer to have just source code, or indications of what 3rd-party packages are required, under revision control, and have the build process generate o
740 r acquire the binaries as needed.  Also, having these in our release tarballs is interfering with getting Tahoe-LAFS uploaded into Ubuntu Karmic.  (Technicall
741 y, they would accept binary modules as long as they came with the accompanying source so that they could satisfy their obligations under GPL2+ and TGPPL1+, bu
742 t it is easier for now to remove the binaries from the source tree.)
743 In this case, the binaries are from the tahoe-w32-client project: http://allmydata.org/trac/tahoe-w32-client , from which you can also get the source.
744]
745[setup: remove binary _fusemodule.so 's
746zooko@zooko.com**20090924211130
747 Ignore-this: 74487bbe27d280762ac5dd5f51e24186
748 I would prefer to have just source code, or indications of what 3rd-party packages are required, under revision control, and have the build process generate or acquire the binaries as needed.  Also, having these in our release tarballs is interfering with getting Tahoe-LAFS uploaded into Ubuntu Karmic.  (Technically, they would accept binary modules as long as they came with the accompanying source so that they could satisfy their obligations under GPL2+ and TGPPL1+, but it is easier for now to remove the binaries from the source tree.)
749 In this case, these modules come from the MacFUSE project: http://code.google.com/p/macfuse/
750]
751[doc: add a copy of LGPL2 for documentation purposes for ubuntu
752zooko@zooko.com**20090924054218
753 Ignore-this: 6a073b48678a7c84dc4fbcef9292ab5b
754]
755[setup: remove a convenience copy of figleaf, to ease inclusion into Ubuntu Karmic Koala
756zooko@zooko.com**20090924053215
757 Ignore-this: a0b0c990d6e2ee65c53a24391365ac8d
758 We need to carefully document the licence of figleaf in order to get Tahoe-LAFS into Ubuntu Karmic Koala.  However, figleaf isn't really a part of Tahoe-LAFS per se -- this is just a "convenience copy" of a development tool.  The quickest way to make Tahoe-LAFS acceptable for Karmic then, is to remove figleaf from the Tahoe-LAFS tarball itself.  People who want to run figleaf on Tahoe-LAFS (as everyone should want) can install figleaf themselves.  I haven't tested this -- there may be incompatibilities between upstream figleaf and the copy that we had here...
759]
760[setup: shebang for misc/build-deb.py to fail quickly
761zooko@zooko.com**20090819135626
762 Ignore-this: 5a1b893234d2d0bb7b7346e84b0a6b4d
763 Without this patch, when I ran "chmod +x ./misc/build-deb.py && ./misc/build-deb.py" then it hung indefinitely.  (I wonder what it was doing.)
764]
765[docs: Shawn Willden grants permission for his contributions under GPL2+|TGPPL1+
766zooko@zooko.com**20090921164651
767 Ignore-this: ef1912010d07ff2ffd9678e7abfd0d57
768]
769[docs: Csaba Henk granted permission to license fuse.py under the same terms as Tahoe-LAFS itself
770zooko@zooko.com**20090921154659
771 Ignore-this: c61ba48dcb7206a89a57ca18a0450c53
772]
773[setup: mark setup.py as having utf-8 encoding in it
774zooko@zooko.com**20090920180343
775 Ignore-this: 9d3850733700a44ba7291e9c5e36bb91
776]
777[doc: licensing cleanups
778zooko@zooko.com**20090920171631
779 Ignore-this: 7654f2854bf3c13e6f4d4597633a6630
780 Use nice utf-8 © instead of "(c)". Remove licensing statements on utility modules that have been assigned to allmydata.com by their original authors. (Nattraverso was not assigned to allmydata.com -- it was LGPL'ed -- but I checked and src/allmydata/util/iputil.py was completely rewritten and doesn't contain any line of code from nattraverso.)  Add notes to misc/debian/copyright about licensing on files that aren't just allmydata.com-licensed.
781]
782[build-deb.py: run darcsver early, otherwise we get the wrong version later on
783Brian Warner <warner@lothar.com>**20090918033620
784 Ignore-this: 6635c5b85e84f8aed0d8390490c5392a
785]
786[new approach for debian packaging, sharing pieces across distributions. Still experimental, still only works for sid.
787warner@lothar.com**20090818190527
788 Ignore-this: a75eb63db9106b3269badbfcdd7f5ce1
789]
790[new experimental deb-packaging rules. Only works for sid so far.
791Brian Warner <warner@lothar.com>**20090818014052
792 Ignore-this: 3a26ad188668098f8f3cc10a7c0c2f27
793]
794[setup.py: read _version.py and pass to setup(version=), so more commands work
795Brian Warner <warner@lothar.com>**20090818010057
796 Ignore-this: b290eb50216938e19f72db211f82147e
797 like "setup.py --version" and "setup.py --fullname"
798]
799[test/check_speed.py: fix shbang line
800Brian Warner <warner@lothar.com>**20090818005948
801 Ignore-this: 7f3a37caf349c4c4de704d0feb561f8d
802]
803[setup: remove bundled version of darcsver-1.2.1
804zooko@zooko.com**20090816233432
805 Ignore-this: 5357f26d2803db2d39159125dddb963a
806 That version of darcsver emits a scary error message when the darcs executable or the _darcs subdirectory is not found.
807 This error is hidden (unless the --loud option is passed) in darcsver >= 1.3.1.
808 Fixes #788.
809]
810[de-Service-ify Helper, pass in storage_broker and secret_holder directly.
811Brian Warner <warner@lothar.com>**20090815201737
812 Ignore-this: 86b8ac0f90f77a1036cd604dd1304d8b
813 This makes it more obvious that the Helper currently generates leases with
814 the Helper's own secrets, rather than getting values from the client, which
815 is arguably a bug that will likely be resolved with the Accounting project.
816]
817[immutable.Downloader: pass StorageBroker to constructor, stop being a Service
818Brian Warner <warner@lothar.com>**20090815192543
819 Ignore-this: af5ab12dbf75377640a670c689838479
820 child of the client, access with client.downloader instead of
821 client.getServiceNamed("downloader"). The single "Downloader" instance is
822 scheduled for demolition anyways, to be replaced by individual
823 filenode.download calls.
824]
825[tests: double the timeout on test_runner.RunNode.test_introducer since feisty hit a timeout
826zooko@zooko.com**20090815160512
827 Ignore-this: ca7358bce4bdabe8eea75dedc39c0e67
828 I'm not sure if this is an actual timing issue (feisty is running on an overloaded VM if I recall correctly), or it there is a deeper bug.
829]
830[stop making History be a Service, it wasn't necessary
831Brian Warner <warner@lothar.com>**20090815114415
832 Ignore-this: b60449231557f1934a751c7effa93cfe
833]
834[Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
835Brian Warner <warner@lothar.com>**20090815112846
836 Ignore-this: 1db1b9c149a60a310228aba04c5c8e5f
837 
838 * stop using IURI as an adapter
839 * pass cap strings around instead of URI instances
840 * move filenode/dirnode creation duties from Client to new NodeMaker class
841 * move other Client duties to KeyGenerator, SecretHolder, History classes
842 * stop passing Client reference to dirnode/filenode constructors
843   - pass less-powerful references instead, like StorageBroker or Uploader
844 * always create DirectoryNodes by wrapping a filenode (mutable for now)
845 * remove some specialized mock classes from unit tests
846 
847 Detailed list of changes (done one at a time, then merged together)
848 
849 always pass a string to create_node_from_uri(), not an IURI instance
850 always pass a string to IFilesystemNode constructors, not an IURI instance
851 stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
852 client.py: move SecretHolder code out to a separate class
853 test_web.py: hush pyflakes
854 client.py: move NodeMaker functionality out into a separate object
855 LiteralFileNode: stop storing a Client reference
856 immutable Checker: remove Client reference, it only needs a SecretHolder
857 immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
858 immutable Repairer: replace Client reference with StorageBroker and SecretHolder
859 immutable FileNode: remove Client reference
860 mutable.Publish: stop passing Client
861 mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
862 MutableChecker: reference StorageBroker and History directly, not through Client
863 mutable.FileNode: removed unused indirection to checker classes
864 mutable.FileNode: remove Client reference
865 client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
866 move create_mutable_file() into NodeMaker
867 test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
868 test_mutable.py: clean up basedir names
869 client.py: move create_empty_dirnode() into NodeMaker
870 dirnode.py: get rid of DirectoryNode.create
871 remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
872 stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
873 remove Client from NodeMaker
874 move helper status into History, pass History to web.Status instead of Client
875 test_mutable.py: fix minor typo
876]
877[docs: edits for docs/running.html from Sam Mason
878zooko@zooko.com**20090809201416
879 Ignore-this: 2207e80449943ebd4ed50cea57c43143
880]
881[docs: install.html: instruct Debian users to use this document and not to go find the DownloadDebianPackages page, ignore the warning at the top of it, and try it
882zooko@zooko.com**20090804123840
883 Ignore-this: 49da654f19d377ffc5a1eff0c820e026
884 http://allmydata.org/pipermail/tahoe-dev/2009-August/002507.html
885]
886[docs: relnotes.txt: reflow to 63 chars wide because google groups and some web forms seem to wrap to that
887zooko@zooko.com**20090802135016
888 Ignore-this: 53b1493a0491bc30fb2935fad283caeb
889]
890[docs: about.html: fix English usage noticed by Amber
891zooko@zooko.com**20090802050533
892 Ignore-this: 89965c4650f9bd100a615c401181a956
893]
894[docs: fix mis-spelled word in about.html
895zooko@zooko.com**20090802050320
896 Ignore-this: fdfd0397bc7cef9edfde425dddeb67e5
897]
898[TAG allmydata-tahoe-1.5.0
899zooko@zooko.com**20090802031303
900 Ignore-this: 94e5558e7225c39a86aae666ea00f166
901]
902Patch bundle hash:
903a43c11776d53b65b527751a76b1b4ea1bdbf5444