Ticket #1212: 1212.darcs.patch

File 1212.darcs.patch, 3.2 KB (added by zooko, at 2010-09-29T05:18:25Z)
Line 
11 patch for repository http://tahoe-lafs.org/source/tahoe-lafs/trunk-hashedformat:
2
3Mon Sep 27 13:01:02 PDT 2010  Kevan Carstensen <kevan@isnotajoke.com>
4  * immutable/repairer.py: don't use the default happiness setting when repairing
5
6New patches:
7
8[immutable/repairer.py: don't use the default happiness setting when repairing
9Kevan Carstensen <kevan@isnotajoke.com>**20100927200102
10 Ignore-this: bd704d9744b970849da8d46a16b8089a
11] {
12hunk ./src/allmydata/immutable/repairer.py 60
13             vcap = self._filenode.get_verify_cap()
14             k = vcap.needed_shares
15             N = vcap.total_shares
16-            happy = upload.BaseUploadable.default_encoding_param_happy
17+            # Per ticket #1212
18+            # (http://tahoe-lafs.org/trac/tahoe-lafs/ticket/1212)
19+            happy = 0
20             self._encodingparams = (k, happy, N, segsize)
21             ul = upload.CHKUploader(self._storage_broker, self._secret_holder)
22             return ul.start(self) # I am the IEncryptedUploadable
23hunk ./src/allmydata/test/test_repairer.py 519
24                       self.failUnlessEqual(newdata, common.TEST_DATA))
25         return d
26 
27+    def test_repairer_servers_of_happiness(self):
28+        # The repairer is supposed to generate and place as many of the
29+        # missing shares as possible without caring about how they are
30+        # distributed.
31+        self.basedir = "repairer/Repairer/repairer_servers_of_happiness"
32+        self.set_up_grid(num_clients=2, num_servers=10)
33+        d = self.upload_and_stash()
34+        # Now delete some servers. We want to leave 3 servers, which
35+        # will allow us to restore the file to a healthy state without
36+        # distributing the shares widely enough to satisfy the default
37+        # happiness setting.
38+        def _delete_some_servers(ignored):
39+            for i in xrange(7):
40+                self.g.remove_server(self.g.servers_by_number[i].my_nodeid)
41+
42+            assert len(self.g.servers_by_number) == 3
43+
44+        d.addCallback(_delete_some_servers)
45+        # Now try to repair the file.
46+        d.addCallback(lambda ignored:
47+            self.c0_filenode.check_and_repair(Monitor(), verify=False))
48+        def _check_results(crr):
49+            self.failUnlessIsInstance(crr,
50+                                      check_results.CheckAndRepairResults)
51+            pre = crr.get_pre_repair_results()
52+            post = crr.get_post_repair_results()
53+            for p in (pre, post):
54+                self.failUnlessIsInstance(p, check_results.CheckResults)
55+
56+            self.failIf(pre.is_healthy())
57+            self.failUnless(post.is_healthy())
58+
59+        d.addCallback(_check_results)
60+        return d
61+
62     # why is test_repair_from_corruption_of_1 disabled? Read on:
63     #
64     # As recently documented in NEWS for the 1.3.0 release, the current
65}
66
67Context:
68
69[NEWS: note dependency updates to pycryptopp and pycrypto.
70david-sarah@jacaranda.org**20100924191207
71 Ignore-this: eeaf5c9c9104f24c450c2ec4482ac1ee
72]
73[TAG allmydata-tahoe-1.8.0
74zooko@zooko.com**20100924021631
75 Ignore-this: 494ca0a885c5e20c883845fc53e7ab5d
76]
77Patch bundle hash:
78b839e041e04283d24703bc98b7e07b3278cc38ad