Sun Oct 11 20:40:05 PDT 2009  Kevan Carstensen <kevan@isnotajoke.com>
  * Alter various unit tests to work with the new happy behavior

Sat Oct 17 18:30:13 PDT 2009  Kevan Carstensen <kevan@isnotajoke.com>
  * Alter NoNetworkGrid to allow the creation of readonly servers for testing purposes.

Fri Oct 30 02:19:08 PDT 2009  "Kevan Carstensen" <kevan@isnotajoke.com>
  * Refactor some behavior into a mixin, and add tests for the behavior described in #778

New patches:

[Alter various unit tests to work with the new happy behavior
Kevan Carstensen <kevan@isnotajoke.com>**20091012034005
 Ignore-this: 8b6823526e72390cf69e0f1cf1f97de4
] {
hunk ./src/allmydata/test/common.py 918
             # We need multiple segments to test crypttext hash trees that are
             # non-trivial (i.e. they have more than just one hash in them).
             cl0.DEFAULT_ENCODING_PARAMETERS['max_segment_size'] = 12
+            # Tests that need to test servers of happiness using this should
+            # set their own value for happy -- the default (7) breaks stuff.
+            cl0.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
             d2 = cl0.upload(immutable.upload.Data(TEST_DATA, convergence=""))
             def _after_upload(u):
                 filecap = u.uri
hunk ./src/allmydata/test/test_system.py 111
         d = self.set_up_nodes()
         def _check_connections(res):
             for c in self.clients:
+                c.DEFAULT_ENCODING_PARAMETERS['happy'] = 5
                 all_peerids = c.get_storage_broker().get_all_serverids()
                 self.failUnlessEqual(len(all_peerids), self.numclients)
                 sb = c.storage_broker
hunk ./src/allmydata/test/test_system.py 250
                                                       add_to_sparent=True))
         def _added(extra_node):
             self.extra_node = extra_node
+            self.extra_node.DEFAULT_ENCODING_PARAMETERS['happy'] = 5
         d.addCallback(_added)
 
         HELPER_DATA = "Data that needs help to upload" * 1000
hunk ./src/allmydata/test/test_system.py 749
         self.basedir = "system/SystemTest/test_vdrive"
         self.data = LARGE_DATA
         d = self.set_up_nodes(use_stats_gatherer=True)
+        def _new_happy_semantics(ign):
+            for c in self.clients:
+                c.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
+        d.addCallback(_new_happy_semantics)
         d.addCallback(self._test_introweb)
         d.addCallback(self.log, "starting publish")
         d.addCallback(self._do_publish1)
hunk ./src/allmydata/test/test_system.py 1167
         d.addCallback(self.failUnlessEqual, "new.txt contents")
         # and again with something large enough to use multiple segments,
         # and hopefully trigger pauseProducing too
+        def _new_happy_semantics(ign):
+            for c in self.clients:
+                # these get reset somewhere? Whatever.
+                c.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
+        d.addCallback(_new_happy_semantics)
         d.addCallback(lambda res: self.PUT(public + "/subdir3/big.txt",
                                            "big" * 500000)) # 1.5MB
         d.addCallback(lambda res: self.GET(public + "/subdir3/big.txt"))
hunk ./src/allmydata/test/test_upload.py 169
 
 class FakeClient:
     DEFAULT_ENCODING_PARAMETERS = {"k":25,
-                                   "happy": 75,
+                                   "happy": 25,
                                    "n": 100,
                                    "max_segment_size": 1*MiB,
                                    }
hunk ./src/allmydata/test/test_upload.py 315
         data = self.get_data(SIZE_LARGE)
         segsize = int(SIZE_LARGE / 2.5)
         # we want 3 segments, since that's not a power of two
-        self.set_encoding_parameters(25, 75, 100, segsize)
+        self.set_encoding_parameters(25, 25, 100, segsize)
         d = upload_data(self.u, data)
         d.addCallback(extract_uri)
         d.addCallback(self._check_large, SIZE_LARGE)
hunk ./src/allmydata/test/test_upload.py 394
     def test_first_error(self):
         mode = dict([(0,"good")] + [(i,"first-fail") for i in range(1,10)])
         self.make_node(mode)
+        self.set_encoding_parameters(k=25, happy=1, n=50)
         d = upload_data(self.u, DATA)
         d.addCallback(extract_uri)
         d.addCallback(self._check_large, SIZE_LARGE)
hunk ./src/allmydata/test/test_upload.py 512
 
         self.make_client()
         data = self.get_data(SIZE_LARGE)
-        self.set_encoding_parameters(50, 75, 100)
+        # if there are 50 peers, then happy needs to be <= 50
+        self.set_encoding_parameters(50, 50, 100)
         d = upload_data(self.u, data)
         d.addCallback(extract_uri)
         d.addCallback(self._check_large, SIZE_LARGE)
hunk ./src/allmydata/test/test_upload.py 559
 
         self.make_client()
         data = self.get_data(SIZE_LARGE)
-        self.set_encoding_parameters(100, 150, 200)
+        # if there are 50 peers, then happy should be no more than 50 if
+        # we want this to work.
+        self.set_encoding_parameters(100, 50, 200)
         d = upload_data(self.u, data)
         d.addCallback(extract_uri)
         d.addCallback(self._check_large, SIZE_LARGE)
hunk ./src/allmydata/test/test_upload.py 579
 
         self.make_client(3)
         data = self.get_data(SIZE_LARGE)
-        self.set_encoding_parameters(3, 5, 10)
+        self.set_encoding_parameters(3, 3, 10)
         d = upload_data(self.u, data)
         d.addCallback(extract_uri)
         d.addCallback(self._check_large, SIZE_LARGE)
hunk ./src/allmydata/test/test_web.py 3419
         self.basedir = "web/Grid/exceptions"
         self.set_up_grid(num_clients=1, num_servers=2)
         c0 = self.g.clients[0]
+        c0.DEFAULT_ENCODING_PARAMETERS['happy'] = 2
         self.fileurls = {}
         DATA = "data" * 100
         d = c0.create_dirnode()
}
[Alter NoNetworkGrid to allow the creation of readonly servers for testing purposes.
Kevan Carstensen <kevan@isnotajoke.com>**20091018013013
 Ignore-this: e12cd7c4ddeb65305c5a7e08df57c754
] {
hunk ./src/allmydata/test/no_network.py 203
             c.setServiceParent(self)
             self.clients.append(c)
 
-    def make_server(self, i):
+    def make_server(self, i, readonly=False):
         serverid = hashutil.tagged_hash("serverid", str(i))[:20]
         serverdir = os.path.join(self.basedir, "servers",
                                  idlib.shortnodeid_b2a(serverid))
hunk ./src/allmydata/test/no_network.py 208
         fileutil.make_dirs(serverdir)
-        ss = StorageServer(serverdir, serverid, stats_provider=SimpleStats())
+        ss = StorageServer(serverdir, serverid, stats_provider=SimpleStats(),
+                           readonly_storage=readonly)
         return ss
 
     def add_server(self, i, ss):
}
[Refactor some behavior into a mixin, and add tests for the behavior described in #778
"Kevan Carstensen" <kevan@isnotajoke.com>**20091030091908
 Ignore-this: a6f9797057ca135579b249af3b2b66ac
] {
hunk ./src/allmydata/test/test_upload.py 2
 
-import os
+import os, shutil
 from cStringIO import StringIO
 from twisted.trial import unittest
 from twisted.python.failure import Failure
hunk ./src/allmydata/test/test_upload.py 12
 
 import allmydata # for __full_version__
 from allmydata import uri, monitor, client
-from allmydata.immutable import upload
+from allmydata.immutable import upload, encode
 from allmydata.interfaces import IFileURI, FileTooLargeError, NoSharesError, \
      NotEnoughSharesError
 from allmydata.util.assertutil import precondition
hunk ./src/allmydata/test/test_upload.py 20
 from no_network import GridTestMixin
 from common_util import ShouldFailMixin
 from allmydata.storage_client import StorageFarmBroker
+from allmydata.storage.server import storage_index_to_dir
 
 MiB = 1024*1024
 
hunk ./src/allmydata/test/test_upload.py 91
 class ServerError(Exception):
     pass
 
+class SetDEPMixin:
+    def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB):
+        p = {"k": k,
+             "happy": happy,
+             "n": n,
+             "max_segment_size": max_segsize,
+             }
+        self.node.DEFAULT_ENCODING_PARAMETERS = p
+
 class FakeStorageServer:
     def __init__(self, mode):
         self.mode = mode
hunk ./src/allmydata/test/test_upload.py 247
     u = upload.FileHandle(fh, convergence=None)
     return uploader.upload(u)
 
-class GoodServer(unittest.TestCase, ShouldFailMixin):
+class GoodServer(unittest.TestCase, ShouldFailMixin, SetDEPMixin):
     def setUp(self):
         self.node = FakeClient(mode="good")
         self.u = upload.Uploader()
hunk ./src/allmydata/test/test_upload.py 254
         self.u.running = True
         self.u.parent = self.node
 
-    def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB):
-        p = {"k": k,
-             "happy": happy,
-             "n": n,
-             "max_segment_size": max_segsize,
-             }
-        self.node.DEFAULT_ENCODING_PARAMETERS = p
-
     def _check_small(self, newuri, size):
         u = IFileURI(newuri)
         self.failUnless(isinstance(u, uri.LiteralFileURI))
hunk ./src/allmydata/test/test_upload.py 377
         d.addCallback(self._check_large, SIZE_LARGE)
         return d
 
-class ServerErrors(unittest.TestCase, ShouldFailMixin):
+class ServerErrors(unittest.TestCase, ShouldFailMixin, SetDEPMixin):
     def make_node(self, mode, num_servers=10):
         self.node = FakeClient(mode, num_servers)
         self.u = upload.Uploader()
hunk ./src/allmydata/test/test_upload.py 681
         d.addCallback(_done)
         return d
 
-class EncodingParameters(GridTestMixin, unittest.TestCase):
+class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin,
+    ShouldFailMixin):
+    def _do_upload_with_broken_servers(self, servers_to_break):
+        """
+        I act like a normal upload, but before I send the results of
+        Tahoe2PeerSelector to the Encoder, I break the first servers_to_break
+        PeerTrackers in the used_peers part of the return result.
+        """
+        assert self.g, "I tried to find a grid at self.g, but failed"
+        broker = self.g.clients[0].storage_broker
+        sh     = self.g.clients[0]._secret_holder
+        data = upload.Data("data" * 10000, convergence="")
+        data.encoding_param_k = 3
+        data.encoding_param_happy = 4
+        data.encoding_param_n = 10
+        uploadable = upload.EncryptAnUploadable(data)
+        encoder = encode.Encoder()
+        encoder.set_encrypted_uploadable(uploadable)
+        status = upload.UploadStatus()
+        selector = upload.Tahoe2PeerSelector("dglev", "test", status)
+        storage_index = encoder.get_param("storage_index")
+        share_size = encoder.get_param("share_size")
+        block_size = encoder.get_param("block_size")
+        num_segments = encoder.get_param("num_segments")
+        d = selector.get_shareholders(broker, sh, storage_index,
+                                      share_size, block_size, num_segments,
+                                      10, 4)
+        def _have_shareholders((used_peers, already_peers)):
+            assert servers_to_break <= len(used_peers)
+            for index in xrange(servers_to_break):
+                server = list(used_peers)[index]
+                for share in server.buckets.keys():
+                    server.buckets[share].abort()
+            buckets = {}
+            for peer in used_peers:
+                buckets.update(peer.buckets)
+            encoder.set_shareholders(buckets)
+            d = encoder.start()
+            return d
+        d.addCallback(_have_shareholders)
+        return d
+
+    def _add_server_with_share(self, server_number, share_number=None,
+                               readonly=False):
+        assert self.g, "I tried to find a grid at self.g, but failed"
+        assert self.shares, "I tried to find shares at self.shares, but failed"
+        ss = self.g.make_server(server_number, readonly)
+        self.g.add_server(server_number, ss)
+        if share_number:
+            # Copy share i from the directory associated with the first 
+            # storage server to the directory associated with this one.
+            old_share_location = self.shares[share_number][2]
+            new_share_location = os.path.join(ss.storedir, "shares")
+            si = uri.from_string(self.uri).get_storage_index()
+            new_share_location = os.path.join(new_share_location,
+                                              storage_index_to_dir(si))
+            if not os.path.exists(new_share_location):
+                os.makedirs(new_share_location)
+            new_share_location = os.path.join(new_share_location,
+                                              str(share_number))
+            shutil.copy(old_share_location, new_share_location)
+            shares = self.find_shares(self.uri)
+            # Make sure that the storage server has the share.
+            self.failUnless((share_number, ss.my_nodeid, new_share_location)
+                            in shares)
+
+    def _setup_and_upload(self):
+        """
+        I set up a NoNetworkGrid with a single server and client,
+        upload a file to it, store its uri in self.uri, and store its
+        sharedata in self.shares.
+        """
+        self.set_up_grid(num_clients=1, num_servers=1)
+        client = self.g.clients[0]
+        client.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
+        data = upload.Data("data" * 10000, convergence="")
+        self.data = data
+        d = client.upload(data)
+        def _store_uri(ur):
+            self.uri = ur.uri
+        d.addCallback(_store_uri)
+        d.addCallback(lambda ign:
+            self.find_shares(self.uri))
+        def _store_shares(shares):
+            self.shares = shares
+        d.addCallback(_store_shares)
+        return d
+
     def test_configure_parameters(self):
         self.basedir = self.mktemp()
         hooks = {0: self._set_up_nodes_extra_config}
hunk ./src/allmydata/test/test_upload.py 788
         d.addCallback(_check)
         return d
 
+    def _setUp(self, ns):
+        # Used by test_happy_semantics and test_prexisting_share_behavior
+        # to set up the grid.
+        self.node = FakeClient(mode="good", num_servers=ns)
+        self.u = upload.Uploader()
+        self.u.running = True
+        self.u.parent = self.node
+
+    def test_happy_semantics(self):
+        self._setUp(2)
+        DATA = upload.Data("kittens" * 10000, convergence="")
+        # These parameters are unsatisfiable with the client that we've made
+        # -- we'll use them to test that the semnatics work correctly.
+        self.set_encoding_parameters(k=3, happy=5, n=10)
+        d = self.shouldFail(NotEnoughSharesError, "test_happy_semantics",
+                            "shares could only be placed on 2 servers "
+                            "(5 were requested)",
+                            self.u.upload, DATA)
+        # Let's reset the client to have 10 servers
+        d.addCallback(lambda ign:
+            self._setUp(10))
+        # These parameters are satisfiable with the client we've made.
+        d.addCallback(lambda ign:
+            self.set_encoding_parameters(k=3, happy=5, n=10))
+        # this should work
+        d.addCallback(lambda ign:
+            self.u.upload(DATA))
+        # Let's reset the client to have 7 servers
+        # (this is less than n, but more than h)
+        d.addCallback(lambda ign:
+            self._setUp(7))
+        # These encoding parameters should still be satisfiable with our 
+        # client setup
+        d.addCallback(lambda ign:
+            self.set_encoding_parameters(k=3, happy=5, n=10))
+        # This, then, should work.
+        d.addCallback(lambda ign:
+            self.u.upload(DATA))
+        return d
+
+    def test_problem_layouts(self):
+        self.basedir = self.mktemp()
+        # This scenario is at 
+        # http://allmydata.org/trac/tahoe/ticket/778#comment:52
+        #
+        # The scenario in comment:52 proposes that we have a layout
+        # like:
+        # server 1: share 1
+        # server 2: share 1
+        # server 3: share 1
+        # server 4: shares 2 - 10
+        # To get access to the shares, we will first upload to one 
+        # server, which will then have shares 1 - 10. We'll then 
+        # add three new servers, configure them to not accept any new
+        # shares, then write share 1 directly into the serverdir of each.
+        # Then each of servers 1 - 3 will report that they have share 1, 
+        # and will not accept any new share, while server 4 will report that
+        # it has shares 2 - 10 and will accept new shares.
+        # We'll then set 'happy' = 4, and see that an upload fails
+        # (as it should)
+        d = self._setup_and_upload()
+        d.addCallback(lambda ign:
+            self._add_server_with_share(1, 0, True))
+        d.addCallback(lambda ign:
+            self._add_server_with_share(2, 0, True))
+        d.addCallback(lambda ign:
+            self._add_server_with_share(3, 0, True))
+        # Remove the first share from server 0.
+        def _remove_share_0():
+            share_location = self.shares[0][2]
+            os.remove(share_location)
+        d.addCallback(lambda ign:
+            _remove_share_0())
+        # Set happy = 4 in the client.
+        def _prepare():
+            client = self.g.clients[0]
+            client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
+            return client
+        d.addCallback(lambda ign:
+            _prepare())
+        # Uploading data should fail
+        d.addCallback(lambda client:
+            self.shouldFail(NotEnoughSharesError, "test_happy_semantics",
+                            "shares could only be placed on 1 servers "
+                            "(4 were requested)",
+                            client.upload, upload.Data("data" * 10000,
+                                                       convergence="")))
+
+
+        # This scenario is at
+        # http://allmydata.org/trac/tahoe/ticket/778#comment:53
+        #
+        # Set up the grid to have one server
+        def _change_basedir(ign):
+            self.basedir = self.mktemp()
+        d.addCallback(_change_basedir)
+        d.addCallback(lambda ign:
+            self._setup_and_upload())
+        # We want to have a layout like this:
+        # server 1: share 1
+        # server 2: share 2
+        # server 3: share 3
+        # server 4: shares 1 - 10
+        # (this is an expansion of Zooko's example because it is easier
+        #  to code, but it will fail in the same way)
+        # To start, we'll create a server with shares 1-10 of the data 
+        # we're about to upload.
+        # Next, we'll add three new servers to our NoNetworkGrid. We'll add
+        # one share from our initial upload to each of these.
+        # The counterintuitive ordering of the share numbers is to deal with 
+        # the permuting of these servers -- distributing the shares this 
+        # way ensures that the Tahoe2PeerSelector sees them in the order 
+        # described above.
+        d.addCallback(lambda ign:
+            self._add_server_with_share(server_number=1, share_number=2))
+        d.addCallback(lambda ign:
+            self._add_server_with_share(server_number=2, share_number=0))
+        d.addCallback(lambda ign:
+            self._add_server_with_share(server_number=3, share_number=1))
+        # So, we now have the following layout:
+        # server 0: shares 1 - 10
+        # server 1: share 0
+        # server 2: share 1
+        # server 3: share 2
+        # We want to change the 'happy' parameter in the client to 4. 
+        # We then want to feed the upload process a list of peers that
+        # server 0 is at the front of, so we trigger Zooko's scenario.
+        # Ideally, a reupload of our original data should work.
+        def _reset_encoding_parameters(ign):
+            client = self.g.clients[0]
+            client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
+            return client
+        d.addCallback(_reset_encoding_parameters)
+        # We need this to get around the fact that the old Data 
+        # instance already has a happy parameter set.
+        d.addCallback(lambda client:
+            client.upload(upload.Data("data" * 10000, convergence="")))
+        return d
+
+
+    def test_dropped_servers_in_encoder(self):
+        def _set_basedir(ign=None):
+            self.basedir = self.mktemp()
+        _set_basedir()
+        d = self._setup_and_upload();
+        # Add 5 servers, with one share each from the original
+        # Add a readonly server
+        def _do_server_setup(ign):
+            self._add_server_with_share(1, 1, True)
+            self._add_server_with_share(2)
+            self._add_server_with_share(3)
+            self._add_server_with_share(4)
+            self._add_server_with_share(5)
+        d.addCallback(_do_server_setup)
+        # remove the original server
+        # (necessary to ensure that the Tahoe2PeerSelector will distribute
+        #  all the shares)
+        def _remove_server(ign):
+            server = self.g.servers_by_number[0]
+            self.g.remove_server(server.my_nodeid)
+        d.addCallback(_remove_server)
+        # This should succeed.
+        d.addCallback(lambda ign:
+            self._do_upload_with_broken_servers(1))
+        # Now, do the same thing over again, but drop 2 servers instead
+        # of 1. This should fail.
+        d.addCallback(_set_basedir)
+        d.addCallback(lambda ign:
+            self._setup_and_upload())
+        d.addCallback(_do_server_setup)
+        d.addCallback(_remove_server)
+        d.addCallback(lambda ign:
+            self.shouldFail(NotEnoughSharesError,
+                            "test_dropped_server_in_encoder", "",
+                            self._do_upload_with_broken_servers, 2))
+        return d
+
+
+    def test_servers_with_unique_shares(self):
+        # servers_with_unique_shares expects a dict of 
+        # shnum => peerid as a preexisting shares argument.
+        test1 = {
+                 1 : "server1",
+                 2 : "server2",
+                 3 : "server3",
+                 4 : "server4"
+                }
+        unique_servers = upload.servers_with_unique_shares(test1)
+        self.failUnlessEqual(4, len(unique_servers))
+        for server in ["server1", "server2", "server3", "server4"]:
+            self.failUnlessIn(server, unique_servers)
+        test1[4] = "server1"
+        # Now there should only be 3 unique servers.
+        unique_servers = upload.servers_with_unique_shares(test1)
+        self.failUnlessEqual(3, len(unique_servers))
+        for server in ["server1", "server2", "server3"]:
+            self.failUnlessIn(server, unique_servers)
+        # servers_with_unique_shares expects a set of PeerTracker
+        # instances as a used_peers argument, but only uses the peerid
+        # instance variable to assess uniqueness. So we feed it some fake
+        # PeerTrackers whose only important characteristic is that they 
+        # have peerid set to something.
+        class FakePeerTracker:
+            pass
+        trackers = []
+        for server in ["server5", "server6", "server7", "server8"]:
+            t = FakePeerTracker()
+            t.peerid = server
+            trackers.append(t)
+        # Recall that there are 3 unique servers in test1. Since none of
+        # those overlap with the ones in trackers, we should get 7 back
+        unique_servers = upload.servers_with_unique_shares(test1, set(trackers))
+        self.failUnlessEqual(7, len(unique_servers))
+        expected_servers = ["server" + str(i) for i in xrange(1, 9)]
+        expected_servers.remove("server4")
+        for server in expected_servers:
+            self.failUnlessIn(server, unique_servers)
+        # Now add an overlapping server to trackers.
+        t = FakePeerTracker()
+        t.peerid = "server1"
+        trackers.append(t)
+        unique_servers = upload.servers_with_unique_shares(test1, set(trackers))
+        self.failUnlessEqual(7, len(unique_servers))
+        for server in expected_servers:
+            self.failUnlessIn(server, unique_servers)
+
+
     def _set_up_nodes_extra_config(self, clientdir):
         cfgfn = os.path.join(clientdir, "tahoe.cfg")
         oldcfg = open(cfgfn, "r").read()
}

Context:

[wui: s/TahoeLAFS/Tahoe-LAFS/
zooko@zooko.com**20091029035050
 Ignore-this: 901e64cd862e492ed3132bd298583c26
] 
[tests: bump up the timeout on test_repairer to see if 120 seconds was too short for François's ARM box to do the test even when it was doing it right.
zooko@zooko.com**20091027224800
 Ignore-this: 95e93dc2e018b9948253c2045d506f56
] 
[dirnode.pack_children(): add deep_immutable= argument
Brian Warner <warner@lothar.com>**20091026162809
 Ignore-this: d5a2371e47662c4bc6eff273e8181b00
 
 This will be used by DIR2:CHK to enforce the deep-immutability requirement.
] 
[webapi: use t=mkdir-with-children instead of a children= arg to t=mkdir .
Brian Warner <warner@lothar.com>**20091026011321
 Ignore-this: 769cab30b6ab50db95000b6c5a524916
 
 This is safer: in the earlier API, an old webapi server would silently ignore
 the initial children, and clients trying to set them would have to fetch the
 newly-created directory to discover the incompatibility. In the new API,
 clients using t=mkdir-with-children against an old webapi server will get a
 clear error.
] 
[nodemaker.create_new_mutable_directory: pack_children() in initial_contents=
Brian Warner <warner@lothar.com>**20091020005118
 Ignore-this: bd43c4eefe06fd32b7492bcb0a55d07e
 instead of creating an empty file and then adding the children later.
 
 This should speed up mkdir(initial_children) considerably, removing two
 roundtrips and an entire read-modify-write cycle, probably bringing it down
 to a single roundtrip. A quick test (against the volunteergrid) suggests a
 30% speedup.
 
 test_dirnode: add new tests to enforce the restrictions that interfaces.py
 claims for create_new_mutable_directory(): no UnknownNodes, metadata dicts
] 
[test_dirnode.py: add tests of initial_children= args to client.create_dirnode
Brian Warner <warner@lothar.com>**20091017194159
 Ignore-this: 2e2da28323a4d5d815466387914abc1b
 and nodemaker.create_new_mutable_directory
] 
[update many dirnode interfaces to accept dict-of-nodes instead of dict-of-caps
Brian Warner <warner@lothar.com>**20091017192829
 Ignore-this: b35472285143862a856bf4b361d692f0
 
 interfaces.py: define INodeMaker, document argument values, change
                create_new_mutable_directory() to take dict-of-nodes. Change
                dirnode.set_nodes() and dirnode.create_subdirectory() too.
 nodemaker.py: use INodeMaker, update create_new_mutable_directory()
 client.py: have create_dirnode() delegate initial_children= to nodemaker
 dirnode.py (Adder): take dict-of-nodes instead of list-of-nodes, which
                     updates set_nodes() and create_subdirectory()
 web/common.py (convert_initial_children_json): create dict-of-nodes
 web/directory.py: same
 web/unlinked.py: same
 test_dirnode.py: update tests to match
] 
[dirnode.py: move pack_children() out to a function, for eventual use by others
Brian Warner <warner@lothar.com>**20091017180707
 Ignore-this: 6a823fb61f2c180fd38d6742d3196a7a
] 
[move dirnode.CachingDict to dictutil.AuxValueDict, generalize method names,
Brian Warner <warner@lothar.com>**20091017180005
 Ignore-this: b086933cf429df0fcea16a308d2640dd
 improve tests. Let dirnode _pack_children accept either dict or AuxValueDict.
] 
[test/common.py: update FakeMutableFileNode to new contents= callable scheme
Brian Warner <warner@lothar.com>**20091013052154
 Ignore-this: 62f00a76454a2190d1c8641c5993632f
] 
[The initial_children= argument to nodemaker.create_new_mutable_directory is
Brian Warner <warner@lothar.com>**20091013031922
 Ignore-this: 72e45317c21f9eb9ec3bd79bd4311f48
 now enabled.
] 
[client.create_mutable_file(contents=) now accepts a callable, which is
Brian Warner <warner@lothar.com>**20091013031232
 Ignore-this: 3c89d2f50c1e652b83f20bd3f4f27c4b
 invoked with the new MutableFileNode and is supposed to return the initial
 contents. This can be used by e.g. a new dirnode which needs the filenode's
 writekey to encrypt its initial children.
 
 create_mutable_file() still accepts a bytestring too, or None for an empty
 file.
] 
[webapi: t=mkdir now accepts initial children, using the same JSON that t=json
Brian Warner <warner@lothar.com>**20091013023444
 Ignore-this: 574a46ed46af4251abf8c9580fd31ef7
 emits.
 
 client.create_dirnode(initial_children=) now works.
] 
[replace dirnode.create_empty_directory() with create_subdirectory(), which
Brian Warner <warner@lothar.com>**20091013021520
 Ignore-this: 6b57cb51bcfcc6058d0df569fdc8a9cf
 takes an initial_children= argument
] 
[dirnode.set_children: change return value: fire with self instead of None
Brian Warner <warner@lothar.com>**20091013015026
 Ignore-this: f1d14e67e084e4b2a4e25fa849b0e753
] 
[dirnode.set_nodes: change return value: fire with self instead of None
Brian Warner <warner@lothar.com>**20091013014546
 Ignore-this: b75b3829fb53f7399693f1c1a39aacae
] 
[dirnode.set_children: take a dict, not a list
Brian Warner <warner@lothar.com>**20091013002440
 Ignore-this: 540ce72ce2727ee053afaae1ff124e21
] 
[dirnode.set_uri/set_children: change signature to take writecap+readcap
Brian Warner <warner@lothar.com>**20091012235126
 Ignore-this: 5df617b2d379a51c79148a857e6026b1
 instead of a single cap. The webapi t=set_children call benefits too.
] 
[replace Client.create_empty_dirnode() with create_dirnode(), in anticipation
Brian Warner <warner@lothar.com>**20091012224506
 Ignore-this: cbdaa4266ecb3c6496ffceab4f95709d
 of adding initial_children= argument.
 
 Includes stubbed-out initial_children= support.
] 
[test_web.py: use a less-fake client, making test harness smaller
Brian Warner <warner@lothar.com>**20091012222808
 Ignore-this: 29e95147f8c94282885c65b411d100bb
] 
[webapi.txt: document t=set_children, other small edits
Brian Warner <warner@lothar.com>**20091009200446
 Ignore-this: 4d7e76b04a7b8eaa0a981879f778ea5d
] 
[Verifier: check the full cryptext-hash tree on each share. Removed .todos
Brian Warner <warner@lothar.com>**20091005221849
 Ignore-this: 6fb039c5584812017d91725e687323a5
 from the last few test_repairer tests that were waiting on this.
] 
[Verifier: check the full block-hash-tree on each share
Brian Warner <warner@lothar.com>**20091005214844
 Ignore-this: 3f7ccf6d253f32340f1bf1da27803eee
 
 Removed the .todo from two test_repairer tests that check this. The only
 remaining .todos are on the three crypttext-hash-tree tests.
] 
[Verifier: check the full share-hash chain on each share
Brian Warner <warner@lothar.com>**20091005213443
 Ignore-this: 3d30111904158bec06a4eac22fd39d17
 
 Removed the .todo from two test_repairer tests that check this.
] 
[test_repairer: rename Verifier test cases to be more precise and less verbose
Brian Warner <warner@lothar.com>**20091005201115
 Ignore-this: 64be7094e33338c7c2aea9387e138771
] 
[immutable/checker.py: rearrange code a little bit, make it easier to follow
Brian Warner <warner@lothar.com>**20091005200252
 Ignore-this: 91cc303fab66faf717433a709f785fb5
] 
[test/common.py: wrap docstrings to 80cols so I can read them more easily
Brian Warner <warner@lothar.com>**20091005200143
 Ignore-this: b180a3a0235cbe309c87bd5e873cbbb3
] 
[immutable/download.py: wrap to 80cols, no functional changes
Brian Warner <warner@lothar.com>**20091005192542
 Ignore-this: 6b05fe3dc6d78832323e708b9e6a1fe
] 
[CHK-hashes.svg: cross out plaintext hashes, since we don't include
Brian Warner <warner@lothar.com>**20091005010803
 Ignore-this: bea2e953b65ec7359363aa20de8cb603
 them (until we finish #453)
] 
[docs: a few licensing clarifications requested by Ubuntu
zooko@zooko.com**20090927033226
 Ignore-this: 749fc8c9aeb6dc643669854a3e81baa7
] 
[setup: remove binary WinFUSE modules
zooko@zooko.com**20090924211436
 Ignore-this: 8aefc571d2ae22b9405fc650f2c2062
 I would prefer to have just source code, or indications of what 3rd-party packages are required, under revision control, and have the build process generate o
 r acquire the binaries as needed.  Also, having these in our release tarballs is interfering with getting Tahoe-LAFS uploaded into Ubuntu Karmic.  (Technicall
 y, they would accept binary modules as long as they came with the accompanying source so that they could satisfy their obligations under GPL2+ and TGPPL1+, bu
 t it is easier for now to remove the binaries from the source tree.)
 In this case, the binaries are from the tahoe-w32-client project: http://allmydata.org/trac/tahoe-w32-client , from which you can also get the source.
] 
[setup: remove binary _fusemodule.so 's
zooko@zooko.com**20090924211130
 Ignore-this: 74487bbe27d280762ac5dd5f51e24186
 I would prefer to have just source code, or indications of what 3rd-party packages are required, under revision control, and have the build process generate or acquire the binaries as needed.  Also, having these in our release tarballs is interfering with getting Tahoe-LAFS uploaded into Ubuntu Karmic.  (Technically, they would accept binary modules as long as they came with the accompanying source so that they could satisfy their obligations under GPL2+ and TGPPL1+, but it is easier for now to remove the binaries from the source tree.)
 In this case, these modules come from the MacFUSE project: http://code.google.com/p/macfuse/
] 
[doc: add a copy of LGPL2 for documentation purposes for ubuntu
zooko@zooko.com**20090924054218
 Ignore-this: 6a073b48678a7c84dc4fbcef9292ab5b
] 
[setup: remove a convenience copy of figleaf, to ease inclusion into Ubuntu Karmic Koala
zooko@zooko.com**20090924053215
 Ignore-this: a0b0c990d6e2ee65c53a24391365ac8d
 We need to carefully document the licence of figleaf in order to get Tahoe-LAFS into Ubuntu Karmic Koala.  However, figleaf isn't really a part of Tahoe-LAFS per se -- this is just a "convenience copy" of a development tool.  The quickest way to make Tahoe-LAFS acceptable for Karmic then, is to remove figleaf from the Tahoe-LAFS tarball itself.  People who want to run figleaf on Tahoe-LAFS (as everyone should want) can install figleaf themselves.  I haven't tested this -- there may be incompatibilities between upstream figleaf and the copy that we had here...
] 
[setup: shebang for misc/build-deb.py to fail quickly
zooko@zooko.com**20090819135626
 Ignore-this: 5a1b893234d2d0bb7b7346e84b0a6b4d
 Without this patch, when I ran "chmod +x ./misc/build-deb.py && ./misc/build-deb.py" then it hung indefinitely.  (I wonder what it was doing.)
] 
[docs: Shawn Willden grants permission for his contributions under GPL2+|TGPPL1+
zooko@zooko.com**20090921164651
 Ignore-this: ef1912010d07ff2ffd9678e7abfd0d57
] 
[docs: Csaba Henk granted permission to license fuse.py under the same terms as Tahoe-LAFS itself
zooko@zooko.com**20090921154659
 Ignore-this: c61ba48dcb7206a89a57ca18a0450c53
] 
[setup: mark setup.py as having utf-8 encoding in it
zooko@zooko.com**20090920180343
 Ignore-this: 9d3850733700a44ba7291e9c5e36bb91
] 
[doc: licensing cleanups
zooko@zooko.com**20090920171631
 Ignore-this: 7654f2854bf3c13e6f4d4597633a6630
 Use nice utf-8 © instead of "(c)". Remove licensing statements on utility modules that have been assigned to allmydata.com by their original authors. (Nattraverso was not assigned to allmydata.com -- it was LGPL'ed -- but I checked and src/allmydata/util/iputil.py was completely rewritten and doesn't contain any line of code from nattraverso.)  Add notes to misc/debian/copyright about licensing on files that aren't just allmydata.com-licensed.
] 
[build-deb.py: run darcsver early, otherwise we get the wrong version later on
Brian Warner <warner@lothar.com>**20090918033620
 Ignore-this: 6635c5b85e84f8aed0d8390490c5392a
] 
[new approach for debian packaging, sharing pieces across distributions. Still experimental, still only works for sid.
warner@lothar.com**20090818190527
 Ignore-this: a75eb63db9106b3269badbfcdd7f5ce1
] 
[new experimental deb-packaging rules. Only works for sid so far.
Brian Warner <warner@lothar.com>**20090818014052
 Ignore-this: 3a26ad188668098f8f3cc10a7c0c2f27
] 
[setup.py: read _version.py and pass to setup(version=), so more commands work
Brian Warner <warner@lothar.com>**20090818010057
 Ignore-this: b290eb50216938e19f72db211f82147e
 like "setup.py --version" and "setup.py --fullname"
] 
[test/check_speed.py: fix shbang line
Brian Warner <warner@lothar.com>**20090818005948
 Ignore-this: 7f3a37caf349c4c4de704d0feb561f8d
] 
[setup: remove bundled version of darcsver-1.2.1
zooko@zooko.com**20090816233432
 Ignore-this: 5357f26d2803db2d39159125dddb963a
 That version of darcsver emits a scary error message when the darcs executable or the _darcs subdirectory is not found.
 This error is hidden (unless the --loud option is passed) in darcsver >= 1.3.1.
 Fixes #788.
] 
[de-Service-ify Helper, pass in storage_broker and secret_holder directly.
Brian Warner <warner@lothar.com>**20090815201737
 Ignore-this: 86b8ac0f90f77a1036cd604dd1304d8b
 This makes it more obvious that the Helper currently generates leases with
 the Helper's own secrets, rather than getting values from the client, which
 is arguably a bug that will likely be resolved with the Accounting project.
] 
[immutable.Downloader: pass StorageBroker to constructor, stop being a Service
Brian Warner <warner@lothar.com>**20090815192543
 Ignore-this: af5ab12dbf75377640a670c689838479
 child of the client, access with client.downloader instead of
 client.getServiceNamed("downloader"). The single "Downloader" instance is
 scheduled for demolition anyways, to be replaced by individual
 filenode.download calls.
] 
[tests: double the timeout on test_runner.RunNode.test_introducer since feisty hit a timeout
zooko@zooko.com**20090815160512
 Ignore-this: ca7358bce4bdabe8eea75dedc39c0e67
 I'm not sure if this is an actual timing issue (feisty is running on an overloaded VM if I recall correctly), or it there is a deeper bug.
] 
[stop making History be a Service, it wasn't necessary
Brian Warner <warner@lothar.com>**20090815114415
 Ignore-this: b60449231557f1934a751c7effa93cfe
] 
[Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
Brian Warner <warner@lothar.com>**20090815112846
 Ignore-this: 1db1b9c149a60a310228aba04c5c8e5f
 
 * stop using IURI as an adapter
 * pass cap strings around instead of URI instances
 * move filenode/dirnode creation duties from Client to new NodeMaker class
 * move other Client duties to KeyGenerator, SecretHolder, History classes
 * stop passing Client reference to dirnode/filenode constructors
   - pass less-powerful references instead, like StorageBroker or Uploader
 * always create DirectoryNodes by wrapping a filenode (mutable for now)
 * remove some specialized mock classes from unit tests
 
 Detailed list of changes (done one at a time, then merged together)
 
 always pass a string to create_node_from_uri(), not an IURI instance
 always pass a string to IFilesystemNode constructors, not an IURI instance
 stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
 client.py: move SecretHolder code out to a separate class
 test_web.py: hush pyflakes
 client.py: move NodeMaker functionality out into a separate object
 LiteralFileNode: stop storing a Client reference
 immutable Checker: remove Client reference, it only needs a SecretHolder
 immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
 immutable Repairer: replace Client reference with StorageBroker and SecretHolder
 immutable FileNode: remove Client reference
 mutable.Publish: stop passing Client
 mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
 MutableChecker: reference StorageBroker and History directly, not through Client
 mutable.FileNode: removed unused indirection to checker classes
 mutable.FileNode: remove Client reference
 client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
 move create_mutable_file() into NodeMaker
 test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
 test_mutable.py: clean up basedir names
 client.py: move create_empty_dirnode() into NodeMaker
 dirnode.py: get rid of DirectoryNode.create
 remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
 stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
 remove Client from NodeMaker
 move helper status into History, pass History to web.Status instead of Client
 test_mutable.py: fix minor typo
] 
[docs: edits for docs/running.html from Sam Mason
zooko@zooko.com**20090809201416
 Ignore-this: 2207e80449943ebd4ed50cea57c43143
] 
[docs: install.html: instruct Debian users to use this document and not to go find the DownloadDebianPackages page, ignore the warning at the top of it, and try it
zooko@zooko.com**20090804123840
 Ignore-this: 49da654f19d377ffc5a1eff0c820e026
 http://allmydata.org/pipermail/tahoe-dev/2009-August/002507.html
] 
[docs: relnotes.txt: reflow to 63 chars wide because google groups and some web forms seem to wrap to that
zooko@zooko.com**20090802135016
 Ignore-this: 53b1493a0491bc30fb2935fad283caeb
] 
[docs: about.html: fix English usage noticed by Amber
zooko@zooko.com**20090802050533
 Ignore-this: 89965c4650f9bd100a615c401181a956
] 
[docs: fix mis-spelled word in about.html
zooko@zooko.com**20090802050320
 Ignore-this: fdfd0397bc7cef9edfde425dddeb67e5
] 
[TAG allmydata-tahoe-1.5.0
zooko@zooko.com**20090802031303
 Ignore-this: 94e5558e7225c39a86aae666ea00f166
] 
Patch bundle hash:
a43c11776d53b65b527751a76b1b4ea1bdbf5444
