Ticket #928: test_hung_server.py

File test_hung_server.py, 8.3 KB (added by davidsarah, at 2010-01-28T06:56:16Z)

Incomplete tests for download when some servers are hung

Line 
1
2from twisted.trial import unittest
3from twisted.internet import defer, reactor
4from allmydata.util.consumer import download_to_data
5from allmydata.immutable import upload
6from allmydata.test.no_network import GridTestMixin
7
8immutable_plaintext = "data" * 10000
9mutable_plaintext = "muta" * 10000
10
11class HungServerDownloadTest(GridTestMixin, unittest.TestCase):
12    timeout = 30
13
14    def test_k_good_servers(self):
15        # k = 3 servers with valid shares, and the rest hung
16
17        self.basedir = "download/test_k_good_servers"
18        self.set_up_grid(num_clients=1, num_servers=10)
19
20        self.c0 = self.g.clients[0]
21        #self.c0.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
22        sb = self.c0.nodemaker.storage_broker
23        peerids = [serverid for (serverid, ss) in sb.get_all_servers()]
24
25        the_cows_come_home = defer.Deferred()
26
27        data = upload.Data(immutable_plaintext, convergence="")
28        d = self.c0.upload(data)
29        def _store_uri(u):
30            self.uri = u.uri
31            return self.find_shares(self.uri)
32        d.addCallback(_store_uri)
33        def _store_shares(shares):
34            self.shares = shares  # currently unused
35        d.addCallback(_store_shares)
36
37        # just hanging server 9 also fails without the #928 fix
38        # but *breaking* servers 3..9 passes
39        def _hang_servers(ignored):
40            for i in range(3, 10):
41                self.g.hang_server(peerids[i], until=the_cows_come_home)
42                #self.g.break_server(peerids[i])
43        d.addCallback(_hang_servers)
44
45        d.addCallback(lambda ign: self.download_immutable())
46       
47        #d.addCallback(lambda ign: the_cows_come_home.callback(None))
48
49        # test that our 'hang_server' abstraction works: unhang servers after 10 seconds
50        #reactor.callLater(10, the_cows_come_home.callback, None)
51        return d
52
53    def download_immutable(self):
54        n = self.c0.create_node_from_uri(self.uri)
55        d = download_to_data(n)
56        def _got_data(data):
57            self.failUnlessEqual(data, immutable_plaintext)
58        d.addCallback(_got_data)
59        return d
60
61    def download_mutable(self):  # currently unused
62        n = self.c0.create_node_from_uri(self.uri)
63        d = n.download_best_version()
64        def _got_data(data):
65            self.failUnlessEqual(data, mutable_plaintext)
66        d.addCallback(_got_data)
67        return d
68
69
70    """ currently unused
71    def _add_server_with_share(self, server_number, share_number=None,
72                               readonly=False):
73        assert self.g, "I tried to find a grid at self.g, but failed"
74        assert self.shares, "I tried to find shares at self.shares, but failed"
75        ss = self.g.make_server(server_number, readonly)
76        self.g.add_server(server_number, ss)
77        if share_number:
78            # Copy share i from the directory associated with the first
79            # storage server to the directory associated with this one.
80            old_share_location = self.shares[share_number][2]
81            new_share_location = os.path.join(ss.storedir, "shares")
82            si = uri.from_string(self.uri).get_storage_index()
83            new_share_location = os.path.join(new_share_location,
84                                              storage_index_to_dir(si))
85            if not os.path.exists(new_share_location):
86                os.makedirs(new_share_location)
87            new_share_location = os.path.join(new_share_location,
88                                              str(share_number))
89            shutil.copy(old_share_location, new_share_location)
90            shares = self.find_shares(self.uri)
91            # Make sure that the storage server has the share.
92            self.failUnless((share_number, ss.my_nodeid, new_share_location)
93                            in shares)
94
95    def test_problem_layouts(self):
96        self.basedir = self.mktemp()
97        # This scenario is at
98        # http://allmydata.org/trac/tahoe/ticket/778#comment:52
99        #
100        # The scenario in comment:52 proposes that we have a layout
101        # like:
102        # server 1: share 1
103        # server 2: share 1
104        # server 3: share 1
105        # server 4: shares 2 - 10
106        # To get access to the shares, we will first upload to one
107        # server, which will then have shares 1 - 10. We'll then
108        # add three new servers, configure them to not accept any new
109        # shares, then write share 1 directly into the serverdir of each.
110        # Then each of servers 1 - 3 will report that they have share 1,
111        # and will not accept any new share, while server 4 will report that
112        # it has shares 2 - 10 and will accept new shares.
113        # We'll then set 'happy' = 4, and see that an upload fails
114        # (as it should)
115        d = self._setup_and_upload()
116        d.addCallback(lambda ign:
117            self._add_server_with_share(1, 0, True))
118        d.addCallback(lambda ign:
119            self._add_server_with_share(2, 0, True))
120        d.addCallback(lambda ign:
121            self._add_server_with_share(3, 0, True))
122        # Remove the first share from server 0.
123        def _remove_share_0():
124            share_location = self.shares[0][2]
125            os.remove(share_location)
126        d.addCallback(lambda ign:
127            _remove_share_0())
128        # Set happy = 4 in the client.
129        def _prepare():
130            client = self.g.clients[0]
131            client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
132            return client
133        d.addCallback(lambda ign:
134            _prepare())
135        # Uploading data should fail
136        d.addCallback(lambda client:
137            self.shouldFail(NotEnoughSharesError, "test_happy_semantics",
138                            "shares could only be placed on 1 servers "
139                            "(4 were requested)",
140                            client.upload, upload.Data("data" * 10000,
141                                                       convergence="")))
142
143        # This scenario is at
144        # http://allmydata.org/trac/tahoe/ticket/778#comment:53
145        #
146        # Set up the grid to have one server
147        def _change_basedir(ign):
148            self.basedir = self.mktemp()
149        d.addCallback(_change_basedir)
150        d.addCallback(lambda ign:
151            self._setup_and_upload())
152        # We want to have a layout like this:
153        # server 1: share 1
154        # server 2: share 2
155        # server 3: share 3
156        # server 4: shares 1 - 10
157        # (this is an expansion of Zooko's example because it is easier
158        #  to code, but it will fail in the same way)
159        # To start, we'll create a server with shares 1-10 of the data
160        # we're about to upload.
161        # Next, we'll add three new servers to our NoNetworkGrid. We'll add
162        # one share from our initial upload to each of these.
163        # The counterintuitive ordering of the share numbers is to deal with
164        # the permuting of these servers -- distributing the shares this
165        # way ensures that the Tahoe2PeerSelector sees them in the order
166        # described above.
167        d.addCallback(lambda ign:
168            self._add_server_with_share(server_number=1, share_number=2))
169        d.addCallback(lambda ign:
170            self._add_server_with_share(server_number=2, share_number=0))
171        d.addCallback(lambda ign:
172            self._add_server_with_share(server_number=3, share_number=1))
173        # So, we now have the following layout:
174        # server 0: shares 1 - 10
175        # server 1: share 0
176        # server 2: share 1
177        # server 3: share 2
178        # We want to change the 'happy' parameter in the client to 4.
179        # We then want to feed the upload process a list of peers that
180        # server 0 is at the front of, so we trigger Zooko's scenario.
181        # Ideally, a reupload of our original data should work.
182        def _reset_encoding_parameters(ign):
183            client = self.g.clients[0]
184            client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
185            return client
186        d.addCallback(_reset_encoding_parameters)
187        # We need this to get around the fact that the old Data
188        # instance already has a happy parameter set.
189        d.addCallback(lambda client:
190            client.upload(upload.Data("data" * 10000, convergence="")))
191        return d
192        """